dirKeys = new ArrayList<>();
@@ -516,7 +473,7 @@ public void testCreateFile() throws Exception {
outputStream.close();
OmKeyInfo omKeyInfo = omMgr.getKeyTable(getBucketLayout()).get(openFileKey);
- assertNotNull("Invalid Key!", omKeyInfo);
+ assertNotNull(omKeyInfo, "Invalid Key!");
verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
// wait for DB updates
@@ -571,11 +528,10 @@ public void testFSDeleteLogWarnNoExist() throws Exception {
private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
long parentID) {
- assertEquals("Wrong keyName", fileName, omKeyInfo.getKeyName());
- assertEquals("Wrong parentID", parentID,
- omKeyInfo.getParentObjectID());
+ assertEquals(fileName, omKeyInfo.getKeyName(), "Wrong keyName");
+ assertEquals(parentID, omKeyInfo.getParentObjectID(), "Wrong parentID");
String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
- assertEquals("Wrong path format", dbKey, omKeyInfo.getPath());
+ assertEquals(dbKey, omKeyInfo.getPath(), "Wrong path format");
}
long verifyDirKey(long volumeId, long bucketId, long parentId,
@@ -586,21 +542,13 @@ long verifyDirKey(long volumeId, long bucketId, long parentId,
parentId + "/" + dirKey;
dirKeys.add(dbKey);
OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
- assertNotNull("Failed to find " + absolutePath +
- " using dbKey: " + dbKey, dirInfo);
- assertEquals("Parent Id mismatches", parentId,
- dirInfo.getParentObjectID());
- assertEquals("Mismatches directory name", dirKey,
- dirInfo.getName());
- assertTrue("Mismatches directory creation time param",
- dirInfo.getCreationTime() > 0);
- assertEquals("Mismatches directory modification time param",
- dirInfo.getCreationTime(), dirInfo.getModificationTime());
+ assertNotNull(dirInfo, "Failed to find " + absolutePath +
+ " using dbKey: " + dbKey);
+ assertEquals(parentId, dirInfo.getParentObjectID(), "Parent Id mismatches");
+ assertEquals(dirKey, dirInfo.getName(), "Mismatches directory name");
+ assertTrue(dirInfo.getCreationTime() > 0, "Mismatches directory creation time param");
+ assertEquals(dirInfo.getCreationTime(), dirInfo.getModificationTime());
return dirInfo.getObjectID();
}
- @Override
- public BucketLayout getBucketLayout() {
- return BucketLayout.FILE_SYSTEM_OPTIMIZED;
- }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java
new file mode 100644
index 00000000000..5fdab6fe95d
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FS extends AbstractOzoneFileSystemTest {
+ TestO3FS() {
+ super(false, false, BucketLayout.LEGACY);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java
new file mode 100644
index 00000000000..0d6be62b4fc
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FSWithFSO extends AbstractOzoneFileSystemTestWithFSO {
+ TestO3FSWithFSO() {
+ super(false);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java
new file mode 100644
index 00000000000..d616d08e328
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FSWithFSOAndOMRatis extends AbstractOzoneFileSystemTestWithFSO {
+ TestO3FSWithFSOAndOMRatis() {
+ super(true);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java
new file mode 100644
index 00000000000..5fffd9df7f4
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FSWithFSPaths extends AbstractOzoneFileSystemTest {
+ TestO3FSWithFSPaths() {
+ super(true, false, BucketLayout.LEGACY);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java
new file mode 100644
index 00000000000..461961c3e73
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FSWithFSPathsAndOMRatis extends AbstractOzoneFileSystemTest {
+ TestO3FSWithFSPathsAndOMRatis() {
+ super(true, true, BucketLayout.LEGACY);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java
new file mode 100644
index 00000000000..a02f3812e04
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.junit.jupiter.api.TestInstance;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestO3FSWithOMRatis extends AbstractOzoneFileSystemTest {
+ TestO3FSWithOMRatis() {
+ super(false, true, BucketLayout.LEGACY);
+ }
+}
From d83f434274b9c2d3fe4b7bdcff0b1862a8291c7d Mon Sep 17 00:00:00 2001
From: Hemant Kumar
Date: Mon, 18 Dec 2023 23:17:40 -0800
Subject: [PATCH 02/28] HDDS-9423. Throw appropriate error messages when
deleting a file in .snapshot path (#5814)
---
.../java/org/apache/hadoop/ozone/OmUtils.java | 31 +++++++--
.../om/request/key/OMKeyDeleteRequest.java | 4 +-
.../request/file/TestOMFileCreateRequest.java | 64 +++++++------------
.../request/key/TestOMKeyDeleteRequest.java | 40 +++++++++---
.../BasicRootedOzoneClientAdapterImpl.java | 7 +-
.../ozone/shell/keys/DeleteKeyHandler.java | 9 +++
6 files changed, 95 insertions(+), 60 deletions(-)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index babeb305487..f23a703bd0d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -629,15 +629,36 @@ public static void verifyKeyNameWithSnapshotReservedWord(String keyName)
if (keyName.substring(OM_SNAPSHOT_INDICATOR.length())
.startsWith(OM_KEY_PREFIX)) {
throw new OMException(
- "Cannot create key under path reserved for "
- + "snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX,
+ "Cannot create key under path reserved for snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX,
OMException.ResultCodes.INVALID_KEY_NAME);
}
} else {
- // We checked for startsWith OM_SNAPSHOT_INDICATOR and the length is
+ // We checked for startsWith OM_SNAPSHOT_INDICATOR, and the length is
// the same, so it must be equal OM_SNAPSHOT_INDICATOR.
- throw new OMException(
- "Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR,
+ throw new OMException("Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR,
+ OMException.ResultCodes.INVALID_KEY_NAME);
+ }
+ }
+ }
+
+ /**
+ * Verify if key name contains snapshot reserved word.
+ * This is similar to verifyKeyNameWithSnapshotReservedWord. The only difference is exception message.
+ */
+ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyName) throws OMException {
+ if (keyName != null &&
+ keyName.startsWith(OM_SNAPSHOT_INDICATOR)) {
+ if (keyName.length() > OM_SNAPSHOT_INDICATOR.length()) {
+ if (keyName.substring(OM_SNAPSHOT_INDICATOR.length())
+ .startsWith(OM_KEY_PREFIX)) {
+ throw new OMException(
+ "Cannot delete key under path reserved for snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX,
+ OMException.ResultCodes.INVALID_KEY_NAME);
+ }
+ } else {
+ // We checked for startsWith OM_SNAPSHOT_INDICATOR, and the length is
+ // the same, so it must be equal OM_SNAPSHOT_INDICATOR.
+ throw new OMException("Cannot delete key with reserved name: " + OM_SNAPSHOT_INDICATOR,
OMException.ResultCodes.INVALID_KEY_NAME);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 9fefd70a2da..0998d001756 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -22,6 +22,7 @@
import java.nio.file.InvalidPathException;
import java.util.Map;
+import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -76,8 +77,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
Preconditions.checkNotNull(deleteKeyRequest);
OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
-
String keyPath = keyArgs.getKeyName();
+
+ OmUtils.verifyKeyNameWithSnapshotReservedWordForDeletion(keyPath);
keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(),
keyPath, getBucketLayout());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index dbd2a80964b..0a7a352b382 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -23,8 +23,6 @@
import java.util.List;
import java.util.UUID;
import java.util.stream.Collectors;
-import java.util.Map;
-import java.util.HashMap;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.OzoneAcl;
@@ -41,15 +39,12 @@
import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS;
@@ -481,40 +476,25 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo,
}
}
- @Test
- public void testPreExecuteWithInvalidKeyPrefix() throws Exception {
- Map invalidKeyScenarios = new HashMap() {
- {
- put(OM_SNAPSHOT_INDICATOR + "/" + keyName,
- "Cannot create key under path reserved for snapshot: "
- + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX);
- put(OM_SNAPSHOT_INDICATOR + "/a/" + keyName,
- "Cannot create key under path reserved for snapshot: "
- + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX);
- put(OM_SNAPSHOT_INDICATOR + "/a/b" + keyName,
- "Cannot create key under path reserved for snapshot: "
- + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX);
- put(OM_SNAPSHOT_INDICATOR,
- "Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR);
- }
- };
-
- for (Map.Entry entry : invalidKeyScenarios.entrySet()) {
- String invalidKeyName = entry.getKey();
- String expectedErrorMessage = entry.getValue();
+ @ParameterizedTest
+ @CsvSource(value = {
+ ".snapshot/keyName,Cannot create key under path reserved for snapshot: .snapshot/",
+ ".snapshot/a/keyName,Cannot create key under path reserved for snapshot: .snapshot/",
+ ".snapshot/a/b/keyName,Cannot create key under path reserved for snapshot: .snapshot/",
+ ".snapshot,Cannot create key with reserved name: .snapshot"})
+ public void testPreExecuteWithInvalidKeyPrefix(String invalidKeyName,
+ String expectedErrorMessage) {
- OMRequest omRequest = createFileRequest(volumeName, bucketName,
- invalidKeyName, HddsProtos.ReplicationFactor.ONE,
- HddsProtos.ReplicationType.RATIS, false, false);
-
- OMFileCreateRequest omFileCreateRequest =
- getOMFileCreateRequest(omRequest);
+ OMRequest omRequest = createFileRequest(volumeName, bucketName,
+ invalidKeyName, HddsProtos.ReplicationFactor.ONE,
+ HddsProtos.ReplicationType.RATIS, false, false);
- OMException ex = Assertions.assertThrows(OMException.class,
- () -> omFileCreateRequest.preExecute(ozoneManager));
+ OMFileCreateRequest omFileCreateRequest =
+ getOMFileCreateRequest(omRequest);
- Assertions.assertTrue(ex.getMessage().contains(expectedErrorMessage));
- }
+ OMException ex = Assertions.assertThrows(OMException.class,
+ () -> omFileCreateRequest.preExecute(ozoneManager));
+ Assertions.assertTrue(ex.getMessage().contains(expectedErrorMessage));
}
protected void testNonRecursivePath(String key,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index fe84e3cfbe7..907022ceddb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -20,6 +20,7 @@
import java.util.UUID;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.junit.jupiter.api.Assertions;
@@ -28,21 +29,36 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .DeleteKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
- .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.ValueSource;
/**
* Tests OmKeyDelete request.
*/
public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
- @Test
- public void testPreExecute() throws Exception {
- doPreExecute(createDeleteKeyRequest());
+ @ParameterizedTest
+ @ValueSource(strings = {"keyName", "a/b/keyName", "a/.snapshot/keyName", "a.snapshot/b/keyName"})
+ public void testPreExecute(String testKeyName) throws Exception {
+ doPreExecute(createDeleteKeyRequest(testKeyName));
+ }
+
+ @ParameterizedTest
+ @CsvSource(value = {".snapshot,Cannot delete key with reserved name: .snapshot",
+ ".snapshot/snapName,Cannot delete key under path reserved for snapshot: .snapshot/",
+ ".snapshot/snapName/keyName,Cannot delete key under path reserved for snapshot: .snapshot/"})
+ public void testPreExecuteFailure(String testKeyName,
+ String expectedExceptionMessage) {
+ OMKeyDeleteRequest deleteKeyRequest =
+ getOmKeyDeleteRequest(createDeleteKeyRequest(testKeyName));
+ OMException omException = Assertions.assertThrows(OMException.class,
+ () -> deleteKeyRequest.preExecute(ozoneManager));
+ Assertions.assertEquals(expectedExceptionMessage, omException.getMessage());
+ Assertions.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, omException.getResult());
}
@Test
@@ -154,8 +170,12 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
* @return OMRequest
*/
private OMRequest createDeleteKeyRequest() {
+ return createDeleteKeyRequest(keyName);
+ }
+
+ private OMRequest createDeleteKeyRequest(String testKeyName) {
KeyArgs keyArgs = KeyArgs.newBuilder().setBucketName(bucketName)
- .setVolumeName(volumeName).setKeyName(keyName).build();
+ .setVolumeName(volumeName).setKeyName(testKeyName).build();
DeleteKeyRequest deleteKeyRequest =
DeleteKeyRequest.newBuilder().setKeyArgs(keyArgs).build();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index e565c2bedf3..193e080f0e0 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathPermissionException;
import org.apache.hadoop.fs.SafeModeAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -545,13 +546,15 @@ public boolean deleteObject(String path, boolean recursive)
bucket.deleteDirectory(keyName, recursive);
return true;
} catch (OMException ome) {
- LOG.error("delete key failed {}", ome.getMessage());
+ LOG.error("Delete key failed. {}", ome.getMessage());
if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+ } else if (OMException.ResultCodes.INVALID_KEY_NAME == ome.getResult()) {
+ throw new PathPermissionException(ome.getMessage());
}
return false;
} catch (IOException ioe) {
- LOG.error("delete key failed " + ioe.getMessage());
+ LOG.error("Delete key failed. {}", ioe.getMessage());
return false;
}
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java
index 5e56cda4780..d1a6a4e156f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java
@@ -19,11 +19,13 @@
package org.apache.hadoop.ozone.shell.keys;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientException;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.shell.OzoneAddress;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -59,6 +61,13 @@ protected void execute(OzoneClient client, OzoneAddress address)
OzoneBucket bucket = vol.getBucket(bucketName);
String keyName = address.getKeyName();
+ try {
+ OmUtils.verifyKeyNameWithSnapshotReservedWordForDeletion(keyName);
+ } catch (OMException omException) {
+ out().printf("Operation not permitted: %s %n", omException.getMessage());
+ return;
+ }
+
if (bucket.getBucketLayout().isFileSystemOptimized()) {
// Handle FSO delete key which supports trash also
deleteFSOKey(bucket, keyName);
From 6b86d93e1bb0e6333fae8b8a979c61251f8c5d15 Mon Sep 17 00:00:00 2001
From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com>
Date: Tue, 19 Dec 2023 14:10:26 +0530
Subject: [PATCH 03/28] HDDS-9809. Migrate assertions in integration tests to
JUnit5 (#5815)
---
.../org/apache/hadoop/fs/ozone/TestHSync.java | 9 +-
.../scm/TestSCMDatanodeProtocolServer.java | 9 +-
.../hdds/scm/TestSCMInstallSnapshot.java | 30 +++--
.../TestContainerStateManagerIntegration.java | 89 +++++++------
.../scm/pipeline/TestLeaderChoosePolicy.java | 15 ++-
.../hdds/scm/pipeline/TestPipelineClose.java | 19 +--
.../TestRatisPipelineCreateAndDestroy.java | 24 ++--
.../scm/storage/TestContainerCommandsEC.java | 59 ++++-----
.../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 45 ++++---
.../hdds/upgrade/TestHddsUpgradeUtils.java | 56 ++++----
.../apache/hadoop/ozone/OzoneTestUtils.java | 10 +-
.../hadoop/ozone/TestDelegationToken.java | 23 ++--
.../TestContainerStateMachineFailures.java | 103 +++++++--------
.../rpc/TestFailureHandlingByClient.java | 87 ++++++-------
.../rpc/TestOzoneRpcClientWithRatis.java | 33 ++---
.../ozone/client/rpc/TestWatchForCommit.java | 82 ++++++------
.../hadoop/ozone/container/TestHelper.java | 28 ++--
.../commandhandler/TestBlockDeletion.java | 3 +-
.../TestCloseContainerByPipeline.java | 40 +++---
.../ozoneimpl/TestOzoneContainer.java | 121 +++++++++---------
.../container/server/TestContainerServer.java | 16 +--
.../ozone/freon/TestRandomKeyGenerator.java | 55 ++++----
.../ozone/om/TestAddRemoveOzoneManager.java | 60 +++++----
.../ozone/om/TestOMDbCheckpointServlet.java | 5 +-
...estReconInsightsForDeletedDirectories.java | 36 +++---
.../ozone/scm/TestFailoverWithSCMHA.java | 44 ++++---
...estSCMContainerPlacementPolicyMetrics.java | 10 +-
.../scm/TestSCMInstallSnapshotWithHA.java | 32 ++---
.../hadoop/ozone/shell/TestOzoneShellHA.java | 5 +-
29 files changed, 574 insertions(+), 574 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index b313aa80fb5..559b8da4982 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -68,7 +68,6 @@
import org.apache.hadoop.util.Time;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -84,7 +83,8 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY;
-import static org.junit.Assert.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -187,8 +187,7 @@ public void testKeyHSyncThenClose() throws Exception {
RepeatedOmKeyInfo val = kv.getValue();
LOG.error("Unexpected deletedTable entry: key = {}, val = {}",
key, val);
- Assertions.fail("deletedTable should not have such entry. key = " +
- key);
+ fail("deletedTable should not have such entry. key = " + key);
}
}
}
@@ -332,7 +331,7 @@ static void runTestHSync(FileSystem fs, Path file,
int offset = 0;
try (FSDataInputStream in = fs.open(file)) {
final long skipped = in.skip(length);
- Assertions.assertEquals(length, skipped);
+ assertEquals(length, skipped);
for (; ;) {
final int n = in.read(buffer, 0, buffer.length);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java
index 4e329ad305e..fee608c05b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java
@@ -20,13 +20,14 @@
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.junit.Assert;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
/**
* Test for StorageContainerDatanodeProtocolProtos.
*/
@@ -44,9 +45,9 @@ public void ensureTermAndDeadlineOnCommands()
StorageContainerDatanodeProtocolProtos.SCMCommandProto proto =
SCMDatanodeProtocolServer.getCommandResponse(command, scm);
- Assert.assertEquals(StorageContainerDatanodeProtocolProtos.SCMCommandProto
+ assertEquals(StorageContainerDatanodeProtocolProtos.SCMCommandProto
.Type.replicateContainerCommand, proto.getCommandType());
- Assert.assertEquals(5L, proto.getTerm());
- Assert.assertEquals(1234L, proto.getDeadlineMsSinceEpoch());
+ assertEquals(5L, proto.getTerm());
+ assertEquals(1234L, proto.getDeadlineMsSinceEpoch());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
index a37d3c47564..53f07abc91e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
@@ -39,7 +39,6 @@
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterAll;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -52,6 +51,11 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
/**
* Class to test install snapshot feature for SCM HA.
@@ -115,9 +119,9 @@ private DBCheckpoint downloadSnapshot() throws Exception {
String snapshotDir =
conf.get(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_DIR);
final File[] files = FileUtil.listFiles(provider.getScmSnapshotDir());
- Assert.assertTrue(files[0].getName().startsWith(
+ assertTrue(files[0].getName().startsWith(
OzoneConsts.SCM_DB_NAME + "-" + scmNodeDetails.getNodeId()));
- Assert.assertTrue(files[0].getAbsolutePath().startsWith(snapshotDir));
+ assertTrue(files[0].getAbsolutePath().startsWith(snapshotDir));
return checkpoint;
}
@@ -133,7 +137,7 @@ public void testInstallCheckPoint() throws Exception {
// Hack the transaction index in the checkpoint so as to ensure the
// checkpointed transaction index is higher than when it was downloaded
// from.
- Assert.assertNotNull(db);
+ assertNotNull(db);
HAUtils.getTransactionInfoTable(db, new SCMDBDefinition())
.put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder()
.setCurrentTerm(10).setTransactionIndex(100).build());
@@ -144,9 +148,9 @@ public void testInstallCheckPoint() throws Exception {
scm.getPipelineManager().getPipelines().get(0).getId();
scm.getScmMetadataStore().getPipelineTable().delete(pipelineID);
scm.getContainerManager().deleteContainer(cid);
- Assert.assertNull(
+ assertNull(
scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
- Assert.assertFalse(scm.getContainerManager().containerExist(cid));
+ assertFalse(scm.getContainerManager().containerExist(cid));
SCMStateMachine sm =
scm.getScmHAManager().getRatisServer().getSCMStateMachine();
@@ -154,16 +158,14 @@ public void testInstallCheckPoint() throws Exception {
sm.setInstallingSnapshotData(checkpoint, null);
sm.reinitialize();
- Assert.assertNotNull(
- scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
- Assert.assertNotNull(
- scm.getScmMetadataStore().getContainerTable().get(cid));
- Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
- Assert.assertTrue(scm.getContainerManager().containerExist(cid));
- Assert.assertEquals(100, scm.getScmMetadataStore().
+ assertNotNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
+ assertNotNull(scm.getScmMetadataStore().getContainerTable().get(cid));
+ assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
+ assertTrue(scm.getContainerManager().containerExist(cid));
+ assertEquals(100, scm.getScmMetadataStore().
getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY)
.getTransactionIndex());
- Assert.assertEquals(100,
+ assertEquals(100,
scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo()
.getTermIndex().getIndex());
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index d7f11566909..72d1ebf4381 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.jupiter.api.AfterEach;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeEach;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.Test;
@@ -51,6 +50,12 @@
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for ContainerStateManager.
@@ -100,21 +105,21 @@ public void testAllocateContainer() throws IOException {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline());
- Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
+ assertNotEquals(container1.getContainerInfo().getContainerID(),
info.getContainerID());
- Assert.assertEquals(OzoneConsts.OZONE, info.getOwner());
- Assert.assertEquals(SCMTestUtils.getReplicationType(conf),
+ assertEquals(OzoneConsts.OZONE, info.getOwner());
+ assertEquals(SCMTestUtils.getReplicationType(conf),
info.getReplicationType());
- Assert.assertEquals(SCMTestUtils.getReplicationFactor(conf),
+ assertEquals(SCMTestUtils.getReplicationFactor(conf),
ReplicationConfig.getLegacyFactor(info.getReplicationConfig()));
- Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState());
+ assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState());
// Check there are two containers in ALLOCATED state after allocation
ContainerWithPipeline container2 = scm.getClientProtocolServer()
.allocateContainer(
SCMTestUtils.getReplicationType(conf),
SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
- Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
+ assertNotEquals(container1.getContainerInfo().getContainerID(),
container2.getContainerInfo().getContainerID());
}
@@ -128,7 +133,7 @@ public void testAllocateContainerWithDifferentOwner() throws IOException {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline());
- Assert.assertNotNull(info);
+ assertNotNull(info);
String newContainerOwner = "OZONE_NEW";
ContainerWithPipeline container2 = scm.getClientProtocolServer()
@@ -137,9 +142,9 @@ public void testAllocateContainerWithDifferentOwner() throws IOException {
ContainerInfo info2 = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner,
container1.getPipeline());
- Assert.assertNotNull(info2);
+ assertNotNull(info2);
- Assert.assertNotEquals(info.containerID(), info2.containerID());
+ assertNotEquals(info.containerID(), info2.containerID());
}
@Test
@@ -179,7 +184,7 @@ public void testContainerStateManagerRestart() throws IOException,
.filter(info ->
info.getState() == HddsProtos.LifeCycleState.OPEN)
.count();
- Assert.assertEquals(5, matchCount);
+ assertEquals(5, matchCount);
matchCount = result.stream()
.filter(info ->
info.getOwner().equals(OzoneConsts.OZONE))
@@ -191,7 +196,7 @@ public void testContainerStateManagerRestart() throws IOException,
.filter(info ->
info.getState() == HddsProtos.LifeCycleState.CLOSING)
.count();
- Assert.assertEquals(5, matchCount);
+ assertEquals(5, matchCount);
}
@Test
@@ -209,7 +214,7 @@ public void testGetMatchingContainer() throws IOException {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline());
- Assert.assertTrue(info.getContainerID() > cid);
+ assertTrue(info.getContainerID() > cid);
cid = info.getContainerID();
}
@@ -218,7 +223,7 @@ public void testGetMatchingContainer() throws IOException {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline());
- Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+ assertEquals(container1.getContainerInfo().getContainerID(),
info.getContainerID());
}
@@ -248,7 +253,7 @@ public void testGetMatchingContainerMultipleThreads()
// make sure pipeline has has numContainerPerOwnerInPipeline number of
// containers.
- Assert.assertEquals(scm.getPipelineManager()
+ assertEquals(scm.getPipelineManager()
.getNumberOfContainers(container1.getPipeline().getId()),
numContainerPerOwnerInPipeline);
Thread.sleep(5000);
@@ -259,7 +264,7 @@ public void testGetMatchingContainerMultipleThreads()
// TODO: #CLUTIL Look at the division of block allocations in different
// containers.
LOG.error("Total allocated block = " + matchedCount);
- Assert.assertTrue(matchedCount <=
+ assertTrue(matchedCount <=
numBlockAllocates / container2MatchedCount.size() + threshold
&& matchedCount >=
numBlockAllocates / container2MatchedCount.size() - threshold);
@@ -272,7 +277,7 @@ public void testUpdateContainerState() throws IOException,
Set containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.OPEN);
int containers = containerList == null ? 0 : containerList.size();
- Assert.assertEquals(0, containers);
+ assertEquals(0, containers);
// Allocate container1 and update its state from
// OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
@@ -282,35 +287,35 @@ public void testUpdateContainerState() throws IOException,
SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.OPEN);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
containerManager
.updateContainerState(container1.getContainerInfo().containerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.CLOSING);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
containerManager
.updateContainerState(container1.getContainerInfo().containerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.CLOSED);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
containerManager
.updateContainerState(container1.getContainerInfo().containerID(),
HddsProtos.LifeCycleEvent.DELETE);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.DELETING);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
containerManager
.updateContainerState(container1.getContainerInfo().containerID(),
HddsProtos.LifeCycleEvent.CLEANUP);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.DELETED);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
// Allocate container1 and update its state from
// OPEN -> CLOSING -> CLOSED
@@ -329,7 +334,7 @@ public void testUpdateContainerState() throws IOException,
HddsProtos.LifeCycleEvent.CLOSE);
containerList = containerStateManager
.getContainerIDs(HddsProtos.LifeCycleState.CLOSED);
- Assert.assertEquals(1, containerList.size());
+ assertEquals(1, containerList.size());
}
@@ -346,7 +351,7 @@ public void testReplicaMap() throws Exception {
ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong());
Set replicaSet =
containerStateManager.getContainerReplicas(containerID);
- Assert.assertNull(replicaSet);
+ assertNull(replicaSet);
ContainerWithPipeline container = scm.getClientProtocolServer()
.allocateContainer(
@@ -369,44 +374,44 @@ public void testReplicaMap() throws Exception {
containerStateManager.updateContainerReplica(id, replicaOne);
containerStateManager.updateContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(2, replicaSet.size());
- Assert.assertTrue(replicaSet.contains(replicaOne));
- Assert.assertTrue(replicaSet.contains(replicaTwo));
+ assertEquals(2, replicaSet.size());
+ assertTrue(replicaSet.contains(replicaOne));
+ assertTrue(replicaSet.contains(replicaTwo));
// Test 3: Remove one replica node and then test
containerStateManager.removeContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(1, replicaSet.size());
- Assert.assertFalse(replicaSet.contains(replicaOne));
- Assert.assertTrue(replicaSet.contains(replicaTwo));
+ assertEquals(1, replicaSet.size());
+ assertFalse(replicaSet.contains(replicaOne));
+ assertTrue(replicaSet.contains(replicaTwo));
// Test 3: Remove second replica node and then test
containerStateManager.removeContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(0, replicaSet.size());
- Assert.assertFalse(replicaSet.contains(replicaOne));
- Assert.assertFalse(replicaSet.contains(replicaTwo));
+ assertEquals(0, replicaSet.size());
+ assertFalse(replicaSet.contains(replicaOne));
+ assertFalse(replicaSet.contains(replicaTwo));
// Test 4: Re-insert dn1
containerStateManager.updateContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(1, replicaSet.size());
- Assert.assertTrue(replicaSet.contains(replicaOne));
- Assert.assertFalse(replicaSet.contains(replicaTwo));
+ assertEquals(1, replicaSet.size());
+ assertTrue(replicaSet.contains(replicaOne));
+ assertFalse(replicaSet.contains(replicaTwo));
// Re-insert dn2
containerStateManager.updateContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(2, replicaSet.size());
- Assert.assertTrue(replicaSet.contains(replicaOne));
- Assert.assertTrue(replicaSet.contains(replicaTwo));
+ assertEquals(2, replicaSet.size());
+ assertTrue(replicaSet.contains(replicaOne));
+ assertTrue(replicaSet.contains(replicaTwo));
// Re-insert dn1
containerStateManager.updateContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
- Assert.assertEquals(2, replicaSet.size());
- Assert.assertTrue(replicaSet.contains(replicaOne));
- Assert.assertTrue(replicaSet.contains(replicaTwo));
+ assertEquals(2, replicaSet.size());
+ assertTrue(replicaSet.contains(replicaOne));
+ assertTrue(replicaSet.contains(replicaTwo));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index bcd00d07934..a695038d444 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -25,7 +25,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.LambdaTestUtils;
import org.apache.ozone.test.tag.Unhealthy;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -41,6 +40,8 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for LeaderChoosePolicy.
@@ -93,9 +94,9 @@ private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn)
leaderCount.put(leader, leaderCount.get(leader) + 1);
}
- Assert.assertTrue(leaderCount.size() == dnNum);
+ assertTrue(leaderCount.size() == dnNum);
for (Map.Entry entry: leaderCount.entrySet()) {
- Assert.assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn);
+ assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn);
}
}
@@ -114,7 +115,7 @@ public void testRestoreSuggestedLeader() throws Exception {
// make sure two pipelines are created
waitForPipelines(pipelineNum);
// No Factor ONE pipeline is auto created.
- Assert.assertEquals(0,
+ assertEquals(0,
pipelineManager.getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.ONE)).size());
@@ -132,7 +133,7 @@ public void testRestoreSuggestedLeader() throws Exception {
cluster.getStorageContainerManager().getPipelineManager()
.getPipelines();
- Assert.assertEquals(
+ assertEquals(
pipelinesBeforeRestart.size(), pipelinesAfterRestart.size());
for (Pipeline p : pipelinesBeforeRestart) {
@@ -144,7 +145,7 @@ public void testRestoreSuggestedLeader() throws Exception {
}
}
- Assert.assertTrue(equal);
+ assertTrue(equal);
}
}
@@ -163,7 +164,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
// make sure pipelines are created
waitForPipelines(pipelineNum);
// No Factor ONE pipeline is auto created.
- Assert.assertEquals(0, pipelineManager.getPipelines(
+ assertEquals(0, pipelineManager.getPipelines(
RatisReplicationConfig.getInstance(
ReplicationFactor.ONE)).size());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index b823f15798f..6c66ecf3185 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -46,7 +46,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.protocol.RaftGroupId;
import org.junit.jupiter.api.AfterEach;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeEach;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.Test;
@@ -62,6 +61,9 @@
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for Pipeline Closing.
@@ -126,8 +128,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException,
.getContainersInPipeline(ratisContainer.getPipeline().getId());
ContainerID cId = ratisContainer.getContainerInfo().containerID();
- Assert.assertEquals(1, set.size());
- set.forEach(containerID -> Assert.assertEquals(containerID, cId));
+ assertEquals(1, set.size());
+ set.forEach(containerID -> assertEquals(containerID, cId));
// Now close the container and it should not show up while fetching
// containers by pipeline
@@ -138,13 +140,13 @@ public void testPipelineCloseWithClosedContainer() throws IOException,
Set setClosed = pipelineManager
.getContainersInPipeline(ratisContainer.getPipeline().getId());
- Assert.assertEquals(0, setClosed.size());
+ assertEquals(0, setClosed.size());
pipelineManager.closePipeline(ratisContainer.getPipeline().getId());
pipelineManager.deletePipeline(ratisContainer.getPipeline().getId());
for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) {
// Assert that the pipeline has been removed from Node2PipelineMap as well
- Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn)
+ assertFalse(scm.getScmNodeManager().getPipelines(dn)
.contains(ratisContainer.getPipeline().getId()));
}
}
@@ -154,7 +156,7 @@ public void testPipelineCloseWithOpenContainer()
throws IOException, TimeoutException, InterruptedException {
Set setOpen = pipelineManager.getContainersInPipeline(
ratisContainer.getPipeline().getId());
- Assert.assertEquals(1, setOpen.size());
+ assertEquals(1, setOpen.size());
pipelineManager
.closePipeline(ratisContainer.getPipeline(), false);
@@ -230,7 +232,7 @@ public void testPipelineCloseWithLogFailure()
try {
pipelineManager.getPipeline(openPipeline.getId());
} catch (PipelineNotFoundException e) {
- Assert.assertTrue("pipeline should exist", false);
+ assertTrue(false, "pipeline should exist");
}
DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0);
@@ -277,8 +279,7 @@ private boolean verifyCloseForPipeline(Pipeline pipeline,
}
}
- Assert.assertTrue("SCM did not receive a Close action for the Pipeline",
- found);
+ assertTrue(found, "SCM did not receive a Close action for the Pipeline");
return found;
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
index 7e88f45025c..22ec99d2a6a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
@@ -30,11 +30,10 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@@ -81,7 +80,7 @@ public void testAutomaticPipelineCreationOnPipelineDestroy()
init(numOfDatanodes);
// make sure two pipelines are created
waitForPipelines(2);
- Assert.assertEquals(numOfDatanodes, pipelineManager.getPipelines(
+ Assertions.assertEquals(numOfDatanodes, pipelineManager.getPipelines(
RatisReplicationConfig.getInstance(
ReplicationFactor.ONE)).size());
@@ -103,7 +102,7 @@ public void testAutomaticPipelineCreationDisablingFactorONE()
// make sure two pipelines are created
waitForPipelines(2);
// No Factor ONE pipeline is auto created.
- Assert.assertEquals(0, pipelineManager.getPipelines(
+ Assertions.assertEquals(0, pipelineManager.getPipelines(
RatisReplicationConfig.getInstance(
ReplicationFactor.ONE)).size());
@@ -140,17 +139,12 @@ public void testPipelineCreationOnNodeRestart() throws Exception {
100, 10 * 1000);
// try creating another pipeline now
- try {
- pipelineManager.createPipeline(RatisReplicationConfig.getInstance(
- ReplicationFactor.THREE));
- Assert.fail("pipeline creation should fail after shutting down pipeline");
- } catch (IOException ioe) {
- // As now all datanodes are shutdown, they move to stale state, there
- // will be no sufficient datanodes to create the pipeline.
- Assert.assertTrue(ioe instanceof SCMException);
- Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_HEALTHY_NODES,
- ((SCMException) ioe).getResult());
- }
+ SCMException ioe = Assertions.assertThrows(SCMException.class, () ->
+ pipelineManager.createPipeline(RatisReplicationConfig.getInstance(
+ ReplicationFactor.THREE)),
+ "pipeline creation should fail after shutting down pipeline");
+ Assertions.assertEquals(
+ SCMException.ResultCodes.FAILED_TO_FIND_HEALTHY_NODES, ioe.getResult());
// make sure pipelines is destroyed
waitForPipelines(0);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index f2fe3fa31a1..8ab74422516 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -77,10 +77,8 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -114,6 +112,10 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class tests container commands on EC containers.
@@ -382,8 +384,8 @@ public void testOrphanBlock() throws Exception {
.filter(bd -> bd.getBlockID().getLocalID() == localID)
.count();
- Assert.assertEquals(0L, count);
- Assert.assertEquals(0, response.getBlockDataList().size());
+ assertEquals(0L, count);
+ assertEquals(0, response.getBlockDataList().size());
}
}
@@ -410,25 +412,24 @@ public void testListBlock() throws Exception {
.map(expectedChunksFunc::apply).sum();
if (minNumExpectedBlocks == 0) {
final int j = i;
- Throwable t = Assertions.assertThrows(StorageContainerException.class,
+ Throwable t = assertThrows(StorageContainerException.class,
() -> ContainerProtocolCalls
.listBlock(clients.get(j), containerID, null,
minNumExpectedBlocks + 1, containerToken));
- Assertions
- .assertEquals("ContainerID " + containerID + " does not exist",
+ assertEquals("ContainerID " + containerID + " does not exist",
t.getMessage());
continue;
}
ListBlockResponseProto response = ContainerProtocolCalls
.listBlock(clients.get(i), containerID, null, Integer.MAX_VALUE,
containerToken);
- Assertions.assertTrue(
+ assertTrue(
minNumExpectedBlocks <= response.getBlockDataList().stream().filter(
k -> k.getChunksCount() > 0 && k.getChunks(0).getLen() > 0)
.collect(Collectors.toList()).size(),
"blocks count should be same or more than min expected" +
" blocks count on DN " + i);
- Assertions.assertTrue(
+ assertTrue(
minNumExpectedChunks <= response.getBlockDataList().stream()
.mapToInt(BlockData::getChunksCount).sum(),
"chunks count should be same or more than min expected" +
@@ -492,10 +493,10 @@ public void testCreateRecoveryContainer() throws Exception {
ContainerProtos.ReadContainerResponseProto readContainerResponseProto =
ContainerProtocolCalls.readContainer(dnClient,
container.containerID().getProtobuf().getId(), encodedToken);
- Assert.assertEquals(ContainerProtos.ContainerDataProto.State.RECOVERING,
+ assertEquals(ContainerProtos.ContainerDataProto.State.RECOVERING,
readContainerResponseProto.getContainerData().getState());
// Container at SCM should be still in closed state.
- Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED,
+ assertEquals(HddsProtos.LifeCycleState.CLOSED,
scm.getContainerManager().getContainerStateManager()
.getContainer(container.containerID()).getState());
// close container call
@@ -505,7 +506,7 @@ public void testCreateRecoveryContainer() throws Exception {
readContainerResponseProto = ContainerProtocolCalls
.readContainer(dnClient,
container.containerID().getProtobuf().getId(), encodedToken);
- Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
+ assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
readContainerResponseProto.getContainerData().getState());
ContainerProtos.ReadChunkResponseProto readChunkResponseProto =
ContainerProtocolCalls.readChunk(dnClient,
@@ -514,10 +515,10 @@ public void testCreateRecoveryContainer() throws Exception {
ByteBuffer[] readOnlyByteBuffersArray = BufferUtils
.getReadOnlyByteBuffersArray(
readChunkResponseProto.getDataBuffers().getBuffersList());
- Assert.assertEquals(readOnlyByteBuffersArray[0].limit(), data.length);
+ assertEquals(readOnlyByteBuffersArray[0].limit(), data.length);
byte[] readBuff = new byte[readOnlyByteBuffersArray[0].limit()];
readOnlyByteBuffersArray[0].get(readBuff, 0, readBuff.length);
- Assert.assertArrayEquals(data, readBuff);
+ assertArrayEquals(data, readBuff);
} finally {
xceiverClientManager.releaseClient(dnClient, false);
}
@@ -563,7 +564,7 @@ public void testCreateRecoveryContainerAfterDNRestart() throws Exception {
cluster.restartHddsDatanode(targetDN, true);
// Recovering container state after DN restart should be UNHEALTHY.
- Assert.assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
+ assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
cluster.getHddsDatanode(targetDN)
.getDatanodeStateMachine()
.getContainer()
@@ -590,7 +591,7 @@ public void testCreateRecoveryContainerAfterDNRestart() throws Exception {
try {
dnClient.sendCommand(writeChunkRequest);
} catch (StorageContainerException e) {
- Assert.assertEquals(CONTAINER_UNHEALTHY, e.getResult());
+ assertEquals(CONTAINER_UNHEALTHY, e.getResult());
}
} finally {
@@ -635,7 +636,7 @@ static Stream> recoverableMissingIndexes() {
@Test
public void testECReconstructionCoordinatorWithMissingIndexes135() {
InsufficientLocationsException exception =
- Assert.assertThrows(InsufficientLocationsException.class, () -> {
+ assertThrows(InsufficientLocationsException.class, () -> {
testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3);
});
@@ -643,7 +644,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() {
"There are insufficient datanodes to read the EC block";
String actualMessage = exception.getMessage();
- Assert.assertEquals(expectedMessage, actualMessage);
+ assertEquals(expectedMessage, actualMessage);
}
private void testECReconstructionCoordinator(List missingIndexes,
@@ -707,7 +708,7 @@ private void testECReconstructionCoordinator(List missingIndexes,
}
}
- Assert.assertEquals(missingIndexes.size(), targetNodes.size());
+ assertEquals(missingIndexes.size(), targetNodes.size());
List
blockDataArrList = new ArrayList<>();
@@ -766,7 +767,7 @@ private void testECReconstructionCoordinator(List missingIndexes,
.listBlock(conID, newTargetPipeline.getFirstNode(),
(ECReplicationConfig) newTargetPipeline
.getReplicationConfig(), cToken);
- Assert.assertEquals(blockDataArrList.get(i).length,
+ assertEquals(blockDataArrList.get(i).length,
reconstructedBlockData.length);
checkBlockData(blockDataArrList.get(i), reconstructedBlockData);
XceiverClientSpi client = xceiverClientManager.acquireClient(
@@ -776,14 +777,14 @@ private void testECReconstructionCoordinator(List missingIndexes,
ContainerProtocolCalls.readContainer(
client, conID,
cToken.encodeToUrlString());
- Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
+ assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
readContainerResponse.getContainerData().getState());
} finally {
xceiverClientManager.releaseClient(client, false);
}
i++;
}
- Assertions.assertEquals(metrics.getReconstructionTotal(), 1L);
+ assertEquals(1L, metrics.getReconstructionTotal());
}
}
}
@@ -796,7 +797,7 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket,
try (OzoneOutputStream out = bucket.createKey(keyString, 4096,
new ECReplicationConfig(3, 2, EcCodec.RS, EC_CHUNK_SIZE),
new HashMap<>())) {
- Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(out.getOutputStream() instanceof KeyOutputStream);
for (int i = 0; i < numChunks; i++) {
out.write(inputChunks[i]);
}
@@ -856,7 +857,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure()
MockDatanodeDetails.randomDatanodeDetails();
targetNodeMap.put(3, invalidTargetNode);
- Assert.assertThrows(IOException.class, () -> {
+ assertThrows(IOException.class, () -> {
try (ECReconstructionCoordinator coordinator =
new ECReconstructionCoordinator(config, certClient,
secretKeyClient,
@@ -868,14 +869,14 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure()
});
final DatanodeDetails targetDNToCheckContainerCLeaned = goodTargetNode;
StorageContainerException ex =
- Assert.assertThrows(StorageContainerException.class, () -> {
+ assertThrows(StorageContainerException.class, () -> {
try (ECContainerOperationClient client =
new ECContainerOperationClient(config, certClient)) {
client.listBlock(conID, targetDNToCheckContainerCLeaned,
new ECReplicationConfig(3, 2), cToken);
}
});
- Assert.assertEquals("ContainerID 1 does not exist", ex.getMessage());
+ assertEquals("ContainerID 1 does not exist", ex.getMessage());
}
private void closeContainer(long conID)
@@ -905,7 +906,7 @@ private void checkBlockData(
// let's ignore the empty chunks
continue;
}
- Assert.assertEquals(chunkInfo, newBlockDataChunks.get(j));
+ assertEquals(chunkInfo, newBlockDataChunks.get(j));
}
}
}
@@ -963,10 +964,10 @@ public static void prepareData(int[][] ranges) throws Exception {
.stream()
.map(ContainerInfo::containerID)
.collect(Collectors.toList());
- Assertions.assertEquals(1, containerIDs.size());
+ assertEquals(1, containerIDs.size());
containerID = containerIDs.get(0).getId();
List pipelines = scm.getPipelineManager().getPipelines(repConfig);
- Assertions.assertEquals(1, pipelines.size());
+ assertEquals(1, pipelines.size());
pipeline = pipelines.get(0);
datanodeDetails = pipeline.getNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index 928de5990ed..caf9cadb165 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -34,6 +34,10 @@
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_FINALIZATION;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.util.ArrayList;
@@ -82,7 +86,6 @@
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.AfterAll;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
import org.apache.ozone.test.tag.Slow;
@@ -229,12 +232,12 @@ private void testPostUpgradePipelineCreation()
throws IOException, TimeoutException {
Pipeline ratisPipeline1 = scmPipelineManager.createPipeline(RATIS_THREE);
scmPipelineManager.openPipeline(ratisPipeline1.getId());
- Assert.assertEquals(0,
+ assertEquals(0,
scmPipelineManager.getNumberOfContainers(ratisPipeline1.getId()));
PipelineID pid = scmContainerManager.allocateContainer(RATIS_THREE,
"Owner1").getPipelineID();
- Assert.assertEquals(1, scmPipelineManager.getNumberOfContainers(pid));
- Assert.assertEquals(pid, ratisPipeline1.getId());
+ assertEquals(1, scmPipelineManager.getNumberOfContainers(pid));
+ assertEquals(pid, ratisPipeline1.getId());
}
/*
@@ -290,7 +293,7 @@ public void testFinalizationFromInitialVersionToLatestVersion()
// Trigger Finalization on the SCM
StatusAndMessages status = scm.getFinalizationManager().finalizeUpgrade(
"xyz");
- Assert.assertEquals(STARTING_FINALIZATION, status.status());
+ assertEquals(STARTING_FINALIZATION, status.status());
// Wait for the Finalization to complete on the SCM.
TestHddsUpgradeUtils.waitForFinalizationFromClient(
@@ -308,7 +311,7 @@ public void testFinalizationFromInitialVersionToLatestVersion()
.stream()
.filter(postUpgradeOpenPipelines::contains)
.count();
- Assert.assertEquals(0, numPreUpgradeOpenPipelines);
+ assertEquals(0, numPreUpgradeOpenPipelines);
// Verify Post-Upgrade conditions on the SCM.
TestHddsUpgradeUtils.testPostUpgradeConditionsSCM(
@@ -447,7 +450,7 @@ public void run() {
});
} catch (Exception e) {
LOG.info("DataNode Restart Failed!");
- Assert.fail(e.getMessage());
+ fail(e.getMessage());
}
return t;
}
@@ -515,7 +518,7 @@ public void testScmFailuresBeforeScmPreFinalizeUpgrade()
BEFORE_PRE_FINALIZE_UPGRADE,
this::injectSCMFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -534,7 +537,7 @@ public void testScmFailuresAfterScmPreFinalizeUpgrade()
AFTER_PRE_FINALIZE_UPGRADE,
this::injectSCMFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -553,7 +556,7 @@ public void testScmFailuresAfterScmCompleteFinalization()
AFTER_COMPLETE_FINALIZATION,
() -> this.injectSCMFailureDuringSCMUpgrade());
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -572,7 +575,7 @@ public void testScmFailuresAfterScmPostFinalizeUpgrade()
AFTER_POST_FINALIZE_UPGRADE,
() -> this.injectSCMFailureDuringSCMUpgrade());
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -591,7 +594,7 @@ public void testAllDataNodeFailuresBeforeScmPreFinalizeUpgrade()
BEFORE_PRE_FINALIZE_UPGRADE,
this::injectDataNodeFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -610,7 +613,7 @@ public void testAllDataNodeFailuresAfterScmPreFinalizeUpgrade()
AFTER_PRE_FINALIZE_UPGRADE,
this::injectDataNodeFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -629,7 +632,7 @@ public void testAllDataNodeFailuresAfterScmCompleteFinalization()
AFTER_COMPLETE_FINALIZATION,
this::injectDataNodeFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -648,7 +651,7 @@ public void testAllDataNodeFailuresAfterScmPostFinalizeUpgrade()
AFTER_POST_FINALIZE_UPGRADE,
this::injectDataNodeFailureDuringSCMUpgrade);
testFinalizationWithFailureInjectionHelper(null);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
}
/*
@@ -683,7 +686,7 @@ public void testDataNodeFailuresDuringDataNodeUpgrade()
.getUpgradeFinalizer())
.setFinalizationExecutor(dataNodeFinalizationExecutor);
testFinalizationWithFailureInjectionHelper(failureInjectionThread);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
@@ -736,7 +739,7 @@ public void testAllPossibleDataNodeFailuresAndSCMFailures()
.setFinalizationExecutor(dataNodeFinalizationExecutor);
testFinalizationWithFailureInjectionHelper(
dataNodefailureInjectionThread);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
@@ -777,7 +780,7 @@ public void testDataNodeAndSCMFailuresTogetherDuringSCMUpgrade()
scm.getFinalizationManager().getUpgradeFinalizer()
.setFinalizationExecutor(finalizationExecutor);
testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
@@ -817,7 +820,7 @@ public void testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade()
.getUpgradeFinalizer())
.setFinalizationExecutor(dataNodeFinalizationExecutor);
testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread);
- Assert.assertTrue(testPassed.get());
+ assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
@@ -843,7 +846,7 @@ public void testFinalizationWithFailureInjectionHelper(
// Trigger Finalization on the SCM
StatusAndMessages status =
scm.getFinalizationManager().finalizeUpgrade("xyz");
- Assert.assertEquals(STARTING_FINALIZATION, status.status());
+ assertEquals(STARTING_FINALIZATION, status.status());
// Make sure that any outstanding thread created by failure injection
// has completed its job.
@@ -907,7 +910,7 @@ public void testFinalizationWithFailureInjectionHelper(
DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
Set pipelines =
scm.getScmNodeManager().getPipelines(dsm.getDatanodeDetails());
- Assert.assertTrue(pipelines != null);
+ assertNotNull(pipelines);
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
index 10617e8a1b4..6fc964fd0ab 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
@@ -34,7 +34,6 @@
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.LambdaTestUtils;
-import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -50,6 +49,10 @@
import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.ALREADY_FINALIZED;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Helper methods for testing HDDS upgrade finalization in integration tests.
@@ -73,7 +76,7 @@ public static void waitForFinalizationFromClient(
.queryUpgradeFinalizationProgress(clientID, true, true)
.status();
LOG.info("Waiting for upgrade finalization to complete from client." +
- " Current status is {}.", status);
+ " Current status is {}.", status);
return status == FINALIZATION_DONE || status == ALREADY_FINALIZED;
});
}
@@ -84,11 +87,11 @@ public static void waitForFinalizationFromClient(
public static void testPreUpgradeConditionsSCM(
List scms) {
for (StorageContainerManager scm : scms) {
- Assert.assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(),
+ assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(),
scm.getLayoutVersionManager().getMetadataLayoutVersion());
for (ContainerInfo ci : scm.getContainerManager()
.getContainers()) {
- Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, ci.getState());
+ assertEquals(HddsProtos.LifeCycleState.OPEN, ci.getState());
}
}
}
@@ -106,15 +109,15 @@ public static void testPostUpgradeConditionsSCM(
}
public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
- int numContainers, int numDatanodes) {
+ int numContainers, int numDatanodes) {
- Assert.assertTrue(scm.getScmContext().getFinalizationCheckpoint()
+ assertTrue(scm.getScmContext().getFinalizationCheckpoint()
.hasCrossed(FinalizationCheckpoint.FINALIZATION_COMPLETE));
HDDSLayoutVersionManager scmVersionManager = scm.getLayoutVersionManager();
- Assert.assertEquals(scmVersionManager.getSoftwareLayoutVersion(),
+ assertEquals(scmVersionManager.getSoftwareLayoutVersion(),
scmVersionManager.getMetadataLayoutVersion());
- Assert.assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1);
+ assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1);
// SCM should not return from finalization until there is at least one
// pipeline to use.
@@ -124,7 +127,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
() -> scmPipelineManager.getPipelines(RATIS_THREE, OPEN).size() >= 1,
500, 60000);
} catch (TimeoutException | InterruptedException e) {
- Assert.fail("Timeout waiting for Upgrade to complete on SCM.");
+ fail("Timeout waiting for Upgrade to complete on SCM.");
}
// SCM will not return from finalization until there is at least one
@@ -137,26 +140,26 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
HddsProtos.LifeCycleState ciState = ci.getState();
LOG.info("testPostUpgradeConditionsSCM: container state is {}",
ciState.name());
- Assert.assertTrue((ciState == HddsProtos.LifeCycleState.CLOSED) ||
+ assertTrue((ciState == HddsProtos.LifeCycleState.CLOSED) ||
(ciState == HddsProtos.LifeCycleState.CLOSING) ||
(ciState == HddsProtos.LifeCycleState.DELETING) ||
(ciState == HddsProtos.LifeCycleState.DELETED) ||
(ciState == HddsProtos.LifeCycleState.QUASI_CLOSED));
countContainers++;
}
- Assert.assertTrue(countContainers >= numContainers);
+ assertTrue(countContainers >= numContainers);
}
/*
* Helper function to test Pre-Upgrade conditions on all the DataNodes.
*/
public static void testPreUpgradeConditionsDataNodes(
- List datanodes) {
+ List datanodes) {
for (HddsDatanodeService dataNode : datanodes) {
DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
HDDSLayoutVersionManager dnVersionManager =
dsm.getLayoutVersionManager();
- Assert.assertEquals(0, dnVersionManager.getMetadataLayoutVersion());
+ assertEquals(0, dnVersionManager.getMetadataLayoutVersion());
}
int countContainers = 0;
@@ -165,12 +168,12 @@ public static void testPreUpgradeConditionsDataNodes(
// Also verify that all the existing containers are open.
for (Container> container :
dsm.getContainer().getController().getContainers()) {
- Assert.assertSame(container.getContainerState(),
+ assertSame(container.getContainerState(),
ContainerProtos.ContainerDataProto.State.OPEN);
countContainers++;
}
}
- Assert.assertTrue(countContainers >= 1);
+ assertTrue(countContainers >= 1);
}
/*
@@ -204,7 +207,7 @@ public static void testPostUpgradeConditionsDataNodes(
return true;
}, 500, 60000);
} catch (TimeoutException | InterruptedException e) {
- Assert.fail("Timeout waiting for Upgrade to complete on Data Nodes.");
+ fail("Timeout waiting for Upgrade to complete on Data Nodes.");
}
int countContainers = 0;
@@ -212,21 +215,20 @@ public static void testPostUpgradeConditionsDataNodes(
DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
HDDSLayoutVersionManager dnVersionManager =
dsm.getLayoutVersionManager();
- Assert.assertEquals(dnVersionManager.getSoftwareLayoutVersion(),
+ assertEquals(dnVersionManager.getSoftwareLayoutVersion(),
dnVersionManager.getMetadataLayoutVersion());
- Assert.assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1);
+ assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1);
// Also verify that all the existing containers are closed.
for (Container> container :
- dsm.getContainer().getController().getContainers()) {
- Assert.assertTrue("Container had unexpected state " +
- container.getContainerState(),
- closeStates.stream().anyMatch(
- state -> container.getContainerState().equals(state)));
+ dsm.getContainer().getController().getContainers()) {
+ assertTrue(closeStates.stream().anyMatch(
+ state -> container.getContainerState().equals(state)),
+ "Container had unexpected state " + container.getContainerState());
countContainers++;
}
}
- Assert.assertTrue(countContainers >= numContainers);
+ assertTrue(countContainers >= numContainers);
}
public static void testDataNodesStateOnSCM(List scms,
@@ -251,14 +253,14 @@ public static void testDataNodesStateOnSCM(StorageContainerManager scm,
try {
HddsProtos.NodeState dnState =
scm.getScmNodeManager().getNodeStatus(dn).getHealth();
- Assert.assertTrue((dnState == state) ||
+ assertTrue((dnState == state) ||
(alternateState != null && dnState == alternateState));
} catch (NodeNotFoundException e) {
e.printStackTrace();
- Assert.fail("Node not found");
+ fail("Node not found");
}
++countNodes;
}
- Assert.assertEquals(expectedDatanodeCount, countNodes);
+ assertEquals(expectedDatanodeCount, countNodes);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index 59e95e7c213..d89e6a6c360 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -34,9 +34,9 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.LambdaTestUtils.VoidCallable;
-
import org.apache.ratis.util.function.CheckedConsumer;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
+
/**
* Helper class for Tests.
@@ -92,7 +92,7 @@ public static void closeContainers(
.updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
HddsProtos.LifeCycleEvent.CLOSE);
}
- Assert.assertFalse(scm.getContainerManager()
+ Assertions.assertFalse(scm.getContainerManager()
.getContainer(ContainerID.valueOf(blockID.getContainerID()))
.isOpen());
}, omKeyLocationInfoGroups);
@@ -144,9 +144,9 @@ public static void expectOmException(
throws Exception {
try {
eval.call();
- Assert.fail("OMException is expected");
+ Assertions.fail("OMException is expected");
} catch (OMException ex) {
- Assert.assertEquals(code, ex.getResult());
+ Assertions.assertEquals(code, ex.getResult());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
index cb6bbc9dd0d..da806ac2a3e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
@@ -56,9 +56,10 @@
import org.apache.hadoop.security.token.Token;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
-
+import org.apache.ratis.util.ExitUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
+
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
@@ -85,13 +86,13 @@
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.slf4j.event.Level.INFO;
-import org.apache.ratis.util.ExitUtils;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThrows;
-import static org.junit.Assert.assertTrue;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
@@ -101,7 +102,6 @@
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.slf4j.event.Level.INFO;
/**
* Test class to for security enabled Ozone cluster.
@@ -362,10 +362,9 @@ public void testDelegationToken(boolean useIp) throws Exception {
"Auth successful for " + username + " (auth:TOKEN)"));
OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND,
() -> omClient.deleteVolume("vol1"));
- assertTrue(
- "Log file doesn't contain successful auth for user " + username,
- logs.getOutput().contains("Auth successful for "
- + username + " (auth:TOKEN)"));
+ assertTrue(logs.getOutput().contains("Auth successful for "
+ + username + " (auth:TOKEN)"),
+ "Log file doesn't contain successful auth for user " + username);
// Case 4: Test failure of token renewal.
// Call to renewDelegationToken will fail but it will confirm that
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 717304a5d0a..55e16989a88 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -86,18 +86,22 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.hamcrest.core.Is.is;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.exceptions.StateMachineException;
import org.apache.ratis.server.storage.FileInfo;
import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import static org.hamcrest.core.Is.is;
import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -212,7 +216,7 @@ public void testContainerStateMachineCloseOnMissingPipeline()
getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
@@ -268,7 +272,7 @@ public void testContainerStateMachineFailures() throws Exception {
(KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -287,7 +291,7 @@ public void testContainerStateMachineFailures() throws Exception {
long containerID = omKeyLocationInfo.getContainerID();
// Make sure the container is marked unhealthy
- Assert.assertTrue(
+ assertTrue(
dn.getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(containerID)
@@ -305,7 +309,7 @@ public void testContainerStateMachineFailures() throws Exception {
cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
ozoneContainer = cluster.getHddsDatanodes().get(index)
.getDatanodeStateMachine().getContainer();
- Assert.assertNull(ozoneContainer.getContainerSet().
+ assertNull(ozoneContainer.getContainerSet().
getContainer(containerID));
}
@@ -323,7 +327,7 @@ public void testUnhealthyContainer() throws Exception {
.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -332,7 +336,7 @@ public void testUnhealthyContainer() throws Exception {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
(KeyValueContainerData) containerData;
// delete the container db file
@@ -348,7 +352,7 @@ public void testUnhealthyContainer() throws Exception {
long containerID = omKeyLocationInfo.getContainerID();
// Make sure the container is marked unhealthy
- Assert.assertTrue(
+ assertTrue(
dn.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
.getContainerState()
@@ -388,7 +392,7 @@ public void testUnhealthyContainer() throws Exception {
request.setCloseContainer(
ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
request.setDatanodeUuid(dnService.getDatanodeDetails().getUuidString());
- Assert.assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY,
+ assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY,
dispatcher.dispatch(request.build(), null)
.getResult());
}
@@ -408,7 +412,7 @@ public void testApplyTransactionFailure() throws Exception {
getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -417,7 +421,7 @@ public void testApplyTransactionFailure() throws Exception {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
(KeyValueContainerData) containerData;
key.close();
@@ -431,8 +435,8 @@ public void testApplyTransactionFailure() throws Exception {
final Path parentPath = snapshot.getPath();
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
- Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
- Assert.assertNotNull(snapshot);
+ assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
+ assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
@@ -452,14 +456,14 @@ public void testApplyTransactionFailure() throws Exception {
try {
xceiverClient.sendCommand(request.build());
- Assert.fail("Expected exception not thrown");
+ fail("Expected exception not thrown");
} catch (IOException e) {
// Exception should be thrown
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
// Make sure the container is marked unhealthy
- Assert.assertTrue(dn.getDatanodeStateMachine()
+ assertTrue(dn.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
.getContainerState()
== ContainerProtos.ContainerDataProto.State.UNHEALTHY);
@@ -467,16 +471,16 @@ public void testApplyTransactionFailure() throws Exception {
// try to take a new snapshot, ideally it should just fail
stateMachine.takeSnapshot();
} catch (IOException ioe) {
- Assert.assertTrue(ioe instanceof StateMachineException);
+ assertTrue(ioe instanceof StateMachineException);
}
if (snapshot.getPath().toFile().exists()) {
// Make sure the latest snapshot is same as the previous one
try {
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
} catch (Throwable e) {
- Assert.assertFalse(snapshot.getPath().toFile().exists());
+ assertFalse(snapshot.getPath().toFile().exists());
}
}
@@ -500,7 +504,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -508,7 +512,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine =
(ContainerStateMachine) TestHelper.getStateMachine(dn,
@@ -518,8 +522,8 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
final FileInfo snapshot = getSnapshotFileInfo(storage);
final Path parentPath = snapshot.getPath();
stateMachine.takeSnapshot();
- Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
- Assert.assertNotNull(snapshot);
+ assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
+ assertNotNull(snapshot);
long markIndex1 = StatemachineImplTestUtil.findLatestSnapshot(storage)
.getIndex();
long containerID = omKeyLocationInfo.getContainerID();
@@ -537,19 +541,19 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
try {
xceiverClient.sendCommand(request.build());
} catch (IOException e) {
- Assert.fail("Exception should not be thrown");
+ fail("Exception should not be thrown");
}
- Assert.assertTrue(
+ assertTrue(
TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
.getContainerState()
== ContainerProtos.ContainerDataProto.State.CLOSED);
- Assert.assertTrue(stateMachine.isStateMachineHealthy());
+ assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
} catch (IOException ioe) {
- Assert.fail("Exception should not be thrown");
+ fail("Exception should not be thrown");
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
@@ -566,7 +570,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
}
}), 1000, 30000);
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
}
// The test injects multiple write chunk requests along with closed container
@@ -590,7 +594,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -599,7 +603,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine =
(ContainerStateMachine) TestHelper.getStateMachine(dn,
@@ -611,8 +615,8 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
stateMachine.takeSnapshot();
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
- Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
- Assert.assertNotNull(snapshot);
+ assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
+ assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerLocationClient()
.getContainerWithPipeline(containerID).getPipeline();
@@ -653,10 +657,9 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
failCount.incrementAndGet();
}
String message = e.getMessage();
- Assert.assertFalse(message,
- message.contains("hello"));
- Assert.assertTrue(message,
- message.contains(HddsUtils.REDACTED.toStringUtf8()));
+ assertFalse(message.contains("hello"), message);
+ assertTrue(message.contains(HddsUtils.REDACTED.toStringUtf8()),
+ message);
}
};
@@ -681,21 +684,21 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
if (failCount.get() > 0) {
fail("testWriteStateMachineDataIdempotencyWithClosedContainer failed");
}
- Assert.assertTrue(
+ assertTrue(
TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
.getContainerState()
== ContainerProtos.ContainerDataProto.State.CLOSED);
- Assert.assertTrue(stateMachine.isStateMachineHealthy());
+ assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
} catch (IOException ioe) {
- Assert.fail("Exception should not be thrown");
+ fail("Exception should not be thrown");
}
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
r2.run();
} finally {
@@ -720,7 +723,7 @@ public void testContainerStateMachineSingleFailureRetry()
getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
@@ -733,7 +736,7 @@ public void testContainerStateMachineSingleFailureRetry()
key.close();
} catch (Exception ioe) {
// Should not fail..
- Assert.fail("Exception " + ioe.getMessage());
+ fail("Exception " + ioe.getMessage());
}
validateData("ratis1", 2, "ratisratisratisratis");
}
@@ -755,7 +758,7 @@ public void testContainerStateMachineDualFailureRetry()
getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
@@ -768,7 +771,7 @@ public void testContainerStateMachineDualFailureRetry()
key.close();
} catch (Exception ioe) {
// Should not fail..
- Assert.fail("Exception " + ioe.getMessage());
+ fail("Exception " + ioe.getMessage());
}
validateData("ratis1", 2, "ratisratisratisratis");
}
@@ -794,7 +797,7 @@ private void induceFollowerFailure(OmKeyLocationInfo omKeyLocationInfo,
ContainerData containerData =
container
.getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
(KeyValueContainerData) containerData;
FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath()));
@@ -817,7 +820,7 @@ private void validateData(String key, int locationCount, String payload) {
try {
keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs);
- Assert.assertEquals(locationCount,
+ assertEquals(locationCount,
keyInfo.getLatestVersionLocations().getLocationListCount());
byte[] buffer = new byte[1024];
try (OzoneInputStream o = objectStore.getVolume(volumeName)
@@ -828,9 +831,9 @@ private void validateData(String key, int locationCount, String payload) {
String response = new String(buffer, 0,
end,
StandardCharsets.UTF_8);
- Assert.assertEquals(payload, response);
+ assertEquals(payload, response);
} catch (IOException e) {
- Assert.fail("Exception not expected " + e.getMessage());
+ fail("Exception not expected " + e.getMessage());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 911650390fa..deeb4214011 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.client.rpc;
import java.io.IOException;
-import static java.nio.charset.StandardCharsets.UTF_8;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
@@ -63,11 +62,14 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -173,16 +175,16 @@ public void testBlockWritesWithDnFailures() throws Exception {
key.write(data);
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// assert that the exclude list's expire time equals to
// default value 600000 ms in OzoneClientConfig.java
- Assert.assertEquals(((KeyOutputStream) key.getOutputStream())
+ assertEquals(((KeyOutputStream) key.getOutputStream())
.getExcludeList().getExpiryTime(), 600000);
KeyOutputStream groupOutputStream =
(KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertTrue(locationInfoList.size() == 1);
+ assertEquals(1, locationInfoList.size());
long containerId = locationInfoList.get(0).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager()
@@ -204,7 +206,7 @@ public void testBlockWritesWithDnFailures() throws Exception {
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals(data.length, keyInfo.getDataSize());
+ assertEquals(data.length, keyInfo.getDataSize());
validateData(keyName, data);
// Verify that the block information is updated correctly in the DB on
@@ -277,12 +279,11 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
.getLocalID()));
// The first Block could have 1 or 2 chunkSize of data
int block1NumChunks = blockData1.getChunks().size();
- Assert.assertTrue(block1NumChunks >= 1);
+ assertTrue(block1NumChunks >= 1);
- Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
- Assert.assertEquals(1, containerData1.getBlockCount());
- Assert.assertEquals(chunkSize * block1NumChunks,
- containerData1.getBytesUsed());
+ assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
+ assertEquals(1, containerData1.getBlockCount());
+ assertEquals(chunkSize * block1NumChunks, containerData1.getBytesUsed());
}
// Verify that the second block has the remaining 0.5*chunkSize of data
@@ -295,17 +296,17 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
containerData2.getBlockKey(locationList.get(1).getBlockID()
.getLocalID()));
// The second Block should have 0.5 chunkSize of data
- Assert.assertEquals(block2ExpectedChunkCount,
+ assertEquals(block2ExpectedChunkCount,
blockData2.getChunks().size());
- Assert.assertEquals(1, containerData2.getBlockCount());
+ assertEquals(1, containerData2.getBlockCount());
int expectedBlockSize;
if (block2ExpectedChunkCount == 1) {
expectedBlockSize = chunkSize / 2;
} else {
expectedBlockSize = chunkSize + chunkSize / 2;
}
- Assert.assertEquals(expectedBlockSize, blockData2.getSize());
- Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed());
+ assertEquals(expectedBlockSize, blockData2.getSize());
+ assertEquals(expectedBlockSize, containerData2.getBytesUsed());
}
}
@@ -319,7 +320,7 @@ public void testWriteSmallFile() throws Exception {
.getFixedLengthString(keyString, chunkSize / 2);
key.write(data.getBytes(UTF_8));
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List locationInfoList =
@@ -347,11 +348,10 @@ public void testWriteSmallFile() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assert.assertNotEquals(
+ assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assert.assertEquals(data.getBytes(UTF_8).length,
- keyInfo.getDataSize());
+ assertEquals(data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.getBytes(UTF_8));
}
@@ -367,14 +367,14 @@ public void testContainerExclusionWithClosedContainerException()
.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
- Assert.assertEquals(1, streamEntryList.size());
+ assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -391,12 +391,10 @@ public void testContainerExclusionWithClosedContainerException()
key.write(data.getBytes(UTF_8));
key.flush();
- Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds()
+ assertTrue(keyOutputStream.getExcludeList().getContainerIds()
.contains(ContainerID.valueOf(containerId)));
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getDatanodes().isEmpty());
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
// The close will just write to the buffer
key.close();
@@ -408,11 +406,10 @@ public void testContainerExclusionWithClosedContainerException()
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assert.assertNotEquals(
+ assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assert.assertEquals(2 * data.getBytes(UTF_8).length,
- keyInfo.getDataSize());
+ assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).getBytes(UTF_8));
}
@@ -426,14 +423,14 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception {
.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
- Assert.assertEquals(1, streamEntryList.size());
+ assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -454,12 +451,10 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception {
key.write(data.getBytes(UTF_8));
key.flush();
- Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes()
+ assertTrue(keyOutputStream.getExcludeList().getDatanodes()
.contains(datanodes.get(0)));
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getContainerIds().isEmpty());
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getPipelineIds().isEmpty());
// The close will just write to the buffer
key.close();
@@ -471,10 +466,10 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assert.assertNotEquals(
+ assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
+ assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
@@ -489,14 +484,14 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
- Assert.assertEquals(1, streamEntryList.size());
+ assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -517,12 +512,10 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
key.write(data.getBytes(UTF_8));
key.write(data.getBytes(UTF_8));
key.flush();
- Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
+ assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
.contains(pipeline.getId()));
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getContainerIds().isEmpty());
- Assert.assertTrue(
- keyOutputStream.getExcludeList().getDatanodes().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
// The close will just write to the buffer
key.close();
@@ -534,10 +527,10 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assert.assertNotEquals(
+ assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
+ assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index c84f6f31419..54153744d7c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -52,15 +52,16 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.Assert;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* This class is to test all the public facing APIs of Ozone Client with an
@@ -132,7 +133,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
@@ -141,7 +142,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
@@ -156,7 +157,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = newBucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
@@ -165,7 +166,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = newBucket.readFile(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
@@ -197,9 +198,9 @@ public void testMultiPartUploadWithStream() throws IOException {
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
- Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
- Assert.assertEquals(bucketName, multipartInfo.getBucketName());
- Assert.assertEquals(keyName, multipartInfo.getKeyName());
+ assertEquals(volumeName, multipartInfo.getVolumeName());
+ assertEquals(bucketName, multipartInfo.getBucketName());
+ assertEquals(keyName, multipartInfo.getKeyName());
assertNotNull(multipartInfo.getUploadID());
OzoneDataStreamOutput ozoneStreamOutput = bucket.createMultipartStreamKey(
@@ -211,11 +212,11 @@ public void testMultiPartUploadWithStream() throws IOException {
OzoneMultipartUploadPartListParts parts =
bucket.listParts(keyName, uploadID, 0, 1);
- Assert.assertEquals(parts.getPartInfoList().size(), 1);
+ assertEquals(parts.getPartInfoList().size(), 1);
OzoneMultipartUploadPartListParts.PartInfo partInfo =
parts.getPartInfoList().get(0);
- Assert.assertEquals(valueLength, partInfo.getSize());
+ assertEquals(valueLength, partInfo.getSize());
}
@@ -269,8 +270,8 @@ public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException {
// verify the key details
final OzoneKeyDetails keyDetails = bucket.getKey(keyName);
- Assertions.assertEquals(keyName, keyDetails.getName());
- Assertions.assertEquals(data.length, keyDetails.getDataSize());
+ assertEquals(keyName, keyDetails.getName());
+ assertEquals(data.length, keyDetails.getDataSize());
// verify the key content
final byte[] buffer = new byte[data.length];
@@ -283,6 +284,6 @@ public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException {
off += n;
}
}
- Assertions.assertArrayEquals(data, buffer);
+ assertArrayEquals(data, buffer);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index 5a46a0edf4e..9289d4fb6df 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -65,8 +65,12 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -183,43 +187,42 @@ public void testWatchForCommitWithKeyWrite() throws Exception {
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
key.write(data1);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream();
OutputStream stream = keyOutputStream.getStreamEntries().get(0)
.getOutputStream();
- Assert.assertTrue(stream instanceof BlockOutputStream);
+ assertTrue(stream instanceof BlockOutputStream);
RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
// we have just written data more than flush Size(2 chunks), at this time
// buffer pool will have 3 buffers allocated worth of chunk size
- Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
+ assertEquals(4, blockOutputStream.getBufferPool().getSize());
// writtenDataLength as well flushedDataLength will be updated here
- Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
- Assert.assertEquals(maxFlushSize,
+ assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
+ assertEquals(maxFlushSize,
blockOutputStream.getTotalDataFlushedLength());
// since data equals to maxBufferSize is written, this will be a blocking
// call and hence will wait for atleast flushSize worth of data to get
// acked by all servers right here
- Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
+ assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
// watchForCommit will clean up atleast one entry from the map where each
// entry corresponds to flushSize worth of data
- Assert.assertTrue(
+ assertTrue(
blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
// Now do a flush. This will flush the data and update the flush length and
// the map.
key.flush();
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
- Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
- Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
- Assert.assertEquals(dataLength,
- blockOutputStream.getTotalDataFlushedLength());
+ assertEquals(4, blockOutputStream.getBufferPool().getSize());
+ assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
+ assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
// flush will make sure one more entry gets updated in the map
- Assert.assertTrue(
+ assertTrue(
blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
XceiverClientRatis raftClient =
(XceiverClientRatis) blockOutputStream.getXceiverClient();
- Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
+ assertEquals(3, raftClient.getCommitInfoMap().size());
Pipeline pipeline = raftClient.getPipeline();
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
@@ -234,16 +237,13 @@ public void testWatchForCommitWithKeyWrite() throws Exception {
// and one flush for partial chunk
key.flush();
// Make sure the retryCount is reset after the exception is handled
- Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
+ assertEquals(0, keyOutputStream.getRetryCount());
// now close the stream, It will update the ack length after watchForCommit
key.close();
- Assert
- .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
+ assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
// make sure the bufferPool is empty
- Assert
- .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
- Assert.assertTrue(
- blockOutputStream.getCommitIndex2flushedDataMap().isEmpty());
+ assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
+ assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().isEmpty());
validateData(keyName, data1);
}
@@ -257,9 +257,8 @@ public void testWatchForCommitForRetryfailure() throws Exception {
HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager
.acquireClient(container1.getPipeline());
- Assert.assertEquals(1, xceiverClient.getRefcount());
- Assert.assertEquals(container1.getPipeline(),
- xceiverClient.getPipeline());
+ assertEquals(1, xceiverClient.getRefcount());
+ assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
TestHelper.createPipelineOnDatanode(pipeline, cluster);
XceiverClientReply reply = xceiverClient.sendCommandAsync(
@@ -280,19 +279,18 @@ public void testWatchForCommitForRetryfailure() throws Exception {
// The basic idea here is just to test if its throws an exception.
xceiverClient
.watchForCommit(index + new Random().nextInt(100) + 10);
- Assert.fail("expected exception not thrown");
+ fail("expected exception not thrown");
} catch (Exception e) {
- Assert.assertTrue(e instanceof ExecutionException);
+ assertTrue(e instanceof ExecutionException);
// since the timeout value is quite long, the watch request will either
// fail with NotReplicated exceptio, RetryFailureException or
// RuntimeException
- Assert.assertFalse(HddsClientUtils
+ assertFalse(HddsClientUtils
.checkForException(e) instanceof TimeoutException);
// client should not attempt to watch with
// MAJORITY_COMMITTED replication level, except the grpc IO issue
if (!logCapturer.getOutput().contains("Connection refused")) {
- Assert.assertFalse(
- e.getMessage().contains("Watch-MAJORITY_COMMITTED"));
+ assertFalse(e.getMessage().contains("Watch-MAJORITY_COMMITTED"));
}
}
clientManager.releaseClient(xceiverClient, false);
@@ -310,9 +308,8 @@ public void test2WayCommitForTimeoutException() throws Exception {
HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager
.acquireClient(container1.getPipeline());
- Assert.assertEquals(1, xceiverClient.getRefcount());
- Assert.assertEquals(container1.getPipeline(),
- xceiverClient.getPipeline());
+ assertEquals(1, xceiverClient.getRefcount());
+ assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
TestHelper.createPipelineOnDatanode(pipeline, cluster);
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
@@ -321,7 +318,7 @@ public void test2WayCommitForTimeoutException() throws Exception {
container1.getContainerInfo().getContainerID(),
xceiverClient.getPipeline()));
reply.getResponse().get();
- Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
+ assertEquals(3, ratisClient.getCommitInfoMap().size());
List nodesInPipeline = pipeline.getNodes();
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
// shutdown the ratis follower
@@ -338,12 +335,12 @@ public void test2WayCommitForTimeoutException() throws Exception {
xceiverClient.watchForCommit(reply.getLogIndex());
// commitInfo Map will be reduced to 2 here
- Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
+ assertEquals(2, ratisClient.getCommitInfoMap().size());
clientManager.releaseClient(xceiverClient, false);
String output = logCapturer.getOutput();
- Assert.assertTrue(output.contains("3 way commit failed"));
- Assert.assertTrue(output.contains("TimeoutException"));
- Assert.assertTrue(output.contains("Committed by majority"));
+ assertTrue(output.contains("3 way commit failed"));
+ assertTrue(output.contains("TimeoutException"));
+ assertTrue(output.contains("Committed by majority"));
}
logCapturer.stopCapturing();
}
@@ -356,9 +353,8 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager
.acquireClient(container1.getPipeline());
- Assert.assertEquals(1, xceiverClient.getRefcount());
- Assert.assertEquals(container1.getPipeline(),
- xceiverClient.getPipeline());
+ assertEquals(1, xceiverClient.getRefcount());
+ assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
long containerId = container1.getContainerInfo().getContainerID();
@@ -366,7 +362,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
ContainerTestHelper.getCreateContainerRequest(containerId,
xceiverClient.getPipeline()));
reply.getResponse().get();
- Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
+ assertEquals(3, ratisClient.getCommitInfoMap().size());
List pipelineList = new ArrayList<>();
pipelineList.add(pipeline);
TestHelper.waitForPipelineClose(pipelineList, cluster);
@@ -377,9 +373,9 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
xceiverClient
.watchForCommit(reply.getLogIndex() +
new Random().nextInt(100) + 10);
- Assert.fail("Expected exception not thrown");
+ fail("Expected exception not thrown");
} catch (Exception e) {
- Assert.assertTrue(HddsClientUtils
+ assertTrue(HddsClientUtils
.checkForException(e) instanceof GroupMismatchException);
}
clientManager.releaseClient(xceiverClient, false);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index 2a7423b15d3..1a1e9bb4c56 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -62,11 +62,13 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.statemachine.StateMachine;
-import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.stream.Collectors.toList;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Helpers for container tests.
@@ -186,7 +188,7 @@ public static void validateData(String keyName, byte[] data,
sha1.update(data);
MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
sha2.update(readData);
- Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest()));
+ assertTrue(Arrays.equals(sha1.digest(), sha2.digest()));
}
}
@@ -203,7 +205,7 @@ public static void waitForContainerClose(OzoneOutputStream outputStream,
containerIdList.add(id);
}
}
- Assert.assertTrue(!containerIdList.isEmpty());
+ assertTrue(!containerIdList.isEmpty());
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
}
@@ -221,7 +223,7 @@ public static void waitForContainerClose(OzoneDataStreamOutput outputStream,
containerIdList.add(id);
}
}
- Assert.assertTrue(!containerIdList.isEmpty());
+ assertTrue(!containerIdList.isEmpty());
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
}
@@ -239,7 +241,7 @@ public static void waitForPipelineClose(OzoneOutputStream outputStream,
containerIdList.add(id);
}
}
- Assert.assertTrue(!containerIdList.isEmpty());
+ assertFalse(containerIdList.isEmpty());
waitForPipelineClose(cluster, waitForContainerCreation,
containerIdList.toArray(new Long[0]));
}
@@ -268,10 +270,10 @@ public static void waitForPipelineClose(MiniOzoneCluster cluster,
GenericTestUtils
.waitFor(() -> isContainerPresent(cluster, containerID, details),
500, 100 * 1000);
- Assert.assertTrue(isContainerPresent(cluster, containerID, details));
+ assertTrue(isContainerPresent(cluster, containerID, details));
// make sure the container gets created first
- Assert.assertFalse(isContainerClosed(cluster, containerID, details));
+ assertFalse(isContainerClosed(cluster, containerID, details));
}
}
}
@@ -294,7 +296,7 @@ public static void waitForPipelineClose(List pipelineList,
XceiverServerSpi server =
cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn))
.getDatanodeStateMachine().getContainer().getWriteChannel();
- Assert.assertTrue(server instanceof XceiverServerRatis);
+ assertTrue(server instanceof XceiverServerRatis);
GenericTestUtils.waitFor(() -> !server.isExist(pipelineId),
100, 30_000);
}
@@ -311,7 +313,7 @@ public static void createPipelineOnDatanode(Pipeline pipeline,
cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn))
.getDatanodeStateMachine().getContainer()
.getWriteChannel();
- Assert.assertTrue(server instanceof XceiverServerRatis);
+ assertTrue(server instanceof XceiverServerRatis);
try {
server.addGroup(pipeline.getId().getProtobuf(), Collections.
unmodifiableList(pipeline.getNodes()));
@@ -343,10 +345,10 @@ public static void waitForContainerClose(MiniOzoneCluster cluster,
GenericTestUtils
.waitFor(() -> isContainerPresent(cluster, containerID, details),
500, 100 * 1000);
- Assert.assertTrue(isContainerPresent(cluster, containerID, details));
+ assertTrue(isContainerPresent(cluster, containerID, details));
// make sure the container gets created first
- Assert.assertFalse(isContainerClosed(cluster, containerID, details));
+ assertFalse(isContainerClosed(cluster, containerID, details));
// send the order to close the container
cluster.getStorageContainerManager().getEventQueue()
.fireEvent(SCMEvents.CLOSE_CONTAINER,
@@ -366,7 +368,7 @@ public static void waitForContainerClose(MiniOzoneCluster cluster,
15 * 1000);
//double check if it's really closed
// (waitFor also throws an exception)
- Assert.assertTrue(
+ assertTrue(
isContainerClosed(cluster, containerID, datanodeDetails));
}
index++;
@@ -410,7 +412,7 @@ public static Set getDatanodeServices(
services.add(service);
}
}
- Assert.assertEquals(pipelineNodes.size(), services.size());
+ assertEquals(pipelineNodes.size(), services.size());
return services;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index a0841980723..b2c0f47997e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -70,7 +70,6 @@
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
@@ -631,7 +630,7 @@ public void testContainerDeleteWithInvalidKeyCount()
= scm.getContainerManager().getContainerReplicas(containerId);
// Ensure for all replica isEmpty are false in SCM
- Assert.assertTrue(scm.getContainerManager().getContainerReplicas(
+ Assertions.assertTrue(scm.getContainerManager().getContainerReplicas(
containerId).stream().
allMatch(replica -> !replica.isEmpty()));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 4948fd23a0a..c62f943ee87 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -45,7 +45,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.AfterAll;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -59,6 +58,9 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test container closing.
@@ -131,12 +133,11 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List datanodes = pipeline.getNodes();
- Assert.assertEquals(datanodes.size(), 1);
+ assertEquals(1, datanodes.size());
DatanodeDetails datanodeDetails = datanodes.get(0);
HddsDatanodeService datanodeService = null;
- Assert
- .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
+ assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
@@ -158,7 +159,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
.waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
500, 5 * 1000);
// Make sure the closeContainerCommandHandler is Invoked
- Assert.assertTrue(
+ assertTrue(
closeContainerHandler.getInvocationCount() > lastInvocationCount);
}
@@ -190,11 +191,10 @@ public void testCloseContainerViaStandAlone()
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List datanodes = pipeline.getNodes();
- Assert.assertEquals(datanodes.size(), 1);
+ assertEquals(1, datanodes.size());
DatanodeDetails datanodeDetails = datanodes.get(0);
- Assert
- .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
+ assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
// Send the order to close the container, give random pipeline id so that
// the container will not be closed via RATIS
@@ -211,13 +211,13 @@ public void testCloseContainerViaStandAlone()
GenericTestUtils
.waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
500, 5 * 1000);
- Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
+ assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
cluster.getStorageContainerManager().getPipelineManager()
.closePipeline(pipeline, false);
Thread.sleep(5000);
// Pipeline close should not affect a container in CLOSED state
- Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
+ assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
}
@Test
@@ -247,11 +247,11 @@ public void testCloseContainerViaRatis() throws IOException,
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List datanodes = pipeline.getNodes();
- Assert.assertEquals(3, datanodes.size());
+ assertEquals(3, datanodes.size());
List metadataStores = new ArrayList<>(datanodes.size());
for (DatanodeDetails details : datanodes) {
- Assert.assertFalse(isContainerClosed(cluster, containerID, details));
+ assertFalse(isContainerClosed(cluster, containerID, details));
//send the order to close the container
SCMCommand> command = new CloseContainerCommand(
containerID, pipeline.getId());
@@ -270,8 +270,7 @@ public void testCloseContainerViaRatis() throws IOException,
}
// There should be as many rocks db as the number of datanodes in pipeline.
- Assert.assertEquals(datanodes.size(),
- metadataStores.stream().distinct().count());
+ assertEquals(datanodes.size(), metadataStores.stream().distinct().count());
// Make sure that it is CLOSED
for (DatanodeDetails datanodeDetails : datanodes) {
@@ -279,8 +278,7 @@ public void testCloseContainerViaRatis() throws IOException,
() -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
15 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assert.assertTrue(isContainerClosed(cluster,
- containerID, datanodeDetails));
+ assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
}
}
@@ -313,11 +311,10 @@ public void testQuasiCloseTransitionViaRatis()
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List datanodes = pipeline.getNodes();
- Assert.assertEquals(datanodes.size(), 1);
+ assertEquals(1, datanodes.size());
DatanodeDetails datanodeDetails = datanodes.get(0);
- Assert
- .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
+ assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
// close the pipeline
cluster.getStorageContainerManager()
@@ -328,7 +325,7 @@ public void testQuasiCloseTransitionViaRatis()
GenericTestUtils.waitFor(
() -> isContainerQuasiClosed(cluster, containerID, datanodeDetails),
500, 5 * 1000);
- Assert.assertTrue(
+ assertTrue(
isContainerQuasiClosed(cluster, containerID, datanodeDetails));
// Send close container command from SCM to datanode with forced flag as
@@ -342,8 +339,7 @@ public void testQuasiCloseTransitionViaRatis()
GenericTestUtils
.waitFor(() -> isContainerClosed(
cluster, containerID, datanodeDetails), 500, 5 * 1000);
- Assert.assertTrue(
- isContainerClosed(cluster, containerID, datanodeDetails));
+ assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
}
private Boolean isContainerClosed(MiniOzoneCluster ozoneCluster,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 4eb57003df0..c055aaf1060 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -30,13 +30,12 @@
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
-import org.junit.Assert;
-import org.junit.Rule;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.migrationsupport.rules.EnableRuleMigrationSupport;
-import org.junit.rules.TemporaryFolder;
+import java.io.File;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
@@ -47,6 +46,11 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests ozone containers.
@@ -55,11 +59,9 @@
@Timeout(300)
public class TestOzoneContainer {
- @Rule
- public TemporaryFolder tempFolder = new TemporaryFolder();
-
@Test
- public void testCreateOzoneContainer() throws Exception {
+ public void testCreateOzoneContainer(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
long containerID = ContainerTestHelper.getTestContainerID();
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
@@ -67,8 +69,8 @@ public void testCreateOzoneContainer() throws Exception {
// We don't start Ozone Container via data node, we will do it
// independently in our test path.
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
@@ -91,14 +93,15 @@ public void testCreateOzoneContainer() throws Exception {
}
@Test
- public void testOzoneContainerStart() throws Exception {
+ public void testOzoneContainerStart(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
try {
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
@@ -112,14 +115,14 @@ public void testOzoneContainerStart() throws Exception {
try {
container.start(clusterId);
} catch (Exception e) {
- Assert.fail();
+ fail();
}
container.stop();
try {
container.stop();
} catch (Exception e) {
- Assert.fail();
+ fail();
}
} finally {
@@ -180,8 +183,8 @@ public static void runTestOzoneContainerViaDataNode(
pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(request);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Put Block
putBlockRequest = ContainerTestHelper.getPutBlockRequest(
@@ -189,8 +192,8 @@ public static void runTestOzoneContainerViaDataNode(
response = client.sendCommand(putBlockRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Get Block
request = ContainerTestHelper.
@@ -210,8 +213,8 @@ public static void runTestOzoneContainerViaDataNode(
updateRequest1 = ContainerTestHelper.getUpdateContainerRequest(
testContainerID, containerUpdate);
updateResponse1 = client.sendCommand(updateRequest1);
- Assert.assertNotNull(updateResponse1);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertNotNull(updateResponse1);
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
//Update an non-existing container
@@ -220,7 +223,7 @@ public static void runTestOzoneContainerViaDataNode(
updateRequest2 = ContainerTestHelper.getUpdateContainerRequest(
nonExistingContinerID, containerUpdate);
updateResponse2 = client.sendCommand(updateRequest2);
- Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND,
+ assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND,
updateResponse2.getResult());
} finally {
if (client != null) {
@@ -230,13 +233,14 @@ public static void runTestOzoneContainerViaDataNode(
}
@Test
- public void testBothGetandPutSmallFile() throws Exception {
+ public void testBothGetandPutSmallFile(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
try {
OzoneConfiguration conf = newOzoneConfiguration();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.build();
@@ -267,22 +271,22 @@ static void runTestBothGetandPutSmallFile(
.toByteArray();
ContainerProtos.ContainerCommandResponseProto response
= client.sendCommand(smallFileRequest);
- Assert.assertNotNull(response);
+ assertNotNull(response);
final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
= ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
smallFileRequest.getPutSmallFile().getBlock());
response = client.sendCommand(getSmallFileRequest);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
ContainerProtos.ReadChunkResponseProto chunkResponse =
response.getGetSmallFile().getData();
if (chunkResponse.hasDataBuffers()) {
- Assert.assertArrayEquals(requestBytes,
+ assertArrayEquals(requestBytes,
chunkResponse.getDataBuffers().toByteArray());
} else {
- Assert.assertArrayEquals(requestBytes,
+ assertArrayEquals(requestBytes,
chunkResponse.getData().toByteArray());
}
} finally {
@@ -295,7 +299,8 @@ static void runTestBothGetandPutSmallFile(
@Test
- public void testCloseContainer() throws Exception {
+ public void testCloseContainer(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
ContainerProtos.ContainerCommandResponseProto response;
@@ -304,8 +309,8 @@ public void testCloseContainer() throws Exception {
try {
OzoneConfiguration conf = newOzoneConfiguration();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.build();
@@ -324,16 +329,16 @@ public void testCloseContainer() throws Exception {
client.getPipeline(), writeChunkRequest.getWriteChunk());
// Put block before closing.
response = client.sendCommand(putBlockRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
// Close the container.
request = ContainerTestHelper.getCloseContainer(
client.getPipeline(), containerID);
response = client.sendCommand(request);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Assert that none of the write operations are working after close.
@@ -341,21 +346,21 @@ public void testCloseContainer() throws Exception {
// Write chunks should fail now.
response = client.sendCommand(writeChunkRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
// Read chunk must work on a closed container.
request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(),
writeChunkRequest.getWriteChunk());
response = client.sendCommand(request);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Put block will fail on a closed container.
response = client.sendCommand(putBlockRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
// Get block must work on the closed container.
@@ -376,7 +381,8 @@ public void testCloseContainer() throws Exception {
}
@Test
- public void testDeleteContainer() throws Exception {
+ public void testDeleteContainer(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
ContainerProtos.ContainerCommandResponseProto response;
@@ -384,8 +390,8 @@ public void testDeleteContainer() throws Exception {
writeChunkRequest, putBlockRequest;
try {
OzoneConfiguration conf = newOzoneConfiguration();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.build();
@@ -403,8 +409,8 @@ public void testDeleteContainer() throws Exception {
client.getPipeline(), writeChunkRequest.getWriteChunk());
// Put key before deleting.
response = client.sendCommand(putBlockRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
// Container cannot be deleted because force flag is set to false and
@@ -413,8 +419,8 @@ public void testDeleteContainer() throws Exception {
client.getPipeline(), containerID, false);
response = client.sendCommand(request);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
response.getResult());
// Container can be deleted, by setting force flag, even with out closing
@@ -422,8 +428,8 @@ public void testDeleteContainer() throws Exception {
client.getPipeline(), containerID, true);
response = client.sendCommand(request);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
} finally {
@@ -466,7 +472,7 @@ static void runAsyncTests(
combinedFuture.get();
// Assert that all futures are indeed done.
for (CompletableFuture future : computeResults) {
- Assert.assertTrue(future.isDone());
+ assertTrue(future.isDone());
}
} finally {
if (client != null) {
@@ -476,13 +482,14 @@ static void runAsyncTests(
}
@Test
- public void testXcieverClientAsync() throws Exception {
+ public void testXcieverClientAsync(
+ @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
MiniOzoneCluster cluster = null;
XceiverClientGrpc client = null;
try {
OzoneConfiguration conf = newOzoneConfiguration();
- conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath());
- conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath());
+ conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
+ conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.build();
@@ -513,7 +520,7 @@ public static void createContainerForTesting(XceiverClientSpi client,
containerID, client.getPipeline());
ContainerProtos.ContainerCommandResponseProto response =
client.sendCommand(request);
- Assert.assertNotNull(response);
+ assertNotNull(response);
}
public static ContainerProtos.ContainerCommandRequestProto
@@ -526,8 +533,8 @@ public static void createContainerForTesting(XceiverClientSpi client,
blockID, dataLen);
ContainerProtos.ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
- Assert.assertNotNull(response);
- Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertNotNull(response);
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
return writeChunkRequest;
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 7e5db1f8e57..0451ba5c98e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -62,23 +62,19 @@
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
import org.apache.ozone.test.GenericTestUtils;
-
import com.google.common.collect.Maps;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
-
import org.apache.ratis.rpc.RpcType;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
import org.apache.ratis.util.function.CheckedBiConsumer;
import org.apache.ratis.util.function.CheckedBiFunction;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
/**
* Test Containers.
@@ -174,7 +170,7 @@ static void runTestClientServer(
ContainerTestHelper
.getCreateContainerRequest(
ContainerTestHelper.getTestContainerID(), pipeline);
- Assert.assertNotNull(request.getTraceID());
+ Assertions.assertNotNull(request.getTraceID());
client.sendCommand(request);
} finally {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
index 20c17279389..cd42a153925 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -96,9 +95,9 @@ void testDefault() {
"--num-of-buckets", "5",
"--num-of-keys", "10");
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
randomKeyGenerator.printStats(System.out);
}
@@ -114,9 +113,9 @@ void testECKey() {
"--type", "EC"
);
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
}
@Test
@@ -133,9 +132,9 @@ void testMultiThread() {
"--type", "RATIS"
);
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
}
@Test
@@ -152,9 +151,9 @@ void testRatisKey() {
"--type", "RATIS"
);
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
}
@Test
@@ -172,10 +171,10 @@ void testKeyLargerThan2GB() {
"--validate-writes"
);
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
+ assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
}
@Test
@@ -193,10 +192,10 @@ void testZeroSizeKey() {
"--validate-writes"
);
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
+ assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount());
}
@Test
@@ -212,8 +211,8 @@ void testThreadPoolSize() {
"--type", "RATIS"
);
- Assert.assertEquals(10, randomKeyGenerator.getThreadPoolSize());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(10, randomKeyGenerator.getThreadPoolSize());
+ assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
}
@Test
@@ -230,10 +229,10 @@ void cleanObjectsTest() {
"--clean-objects"
);
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCleaned());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCleaned());
+ assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
+ assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ assertEquals(2, randomKeyGenerator.getNumberOfVolumesCleaned());
+ assertEquals(10, randomKeyGenerator.getNumberOfBucketsCleaned());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
index f2d6a0d80d2..d438ad09fc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
@@ -49,7 +49,6 @@
import org.apache.ozone.test.tag.Flaky;
import org.apache.ratis.grpc.server.GrpcLogAppender;
import org.apache.ratis.server.leader.FollowerInfo;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -58,6 +57,11 @@
import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT;
import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for OM bootstrap process.
@@ -123,14 +127,14 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception {
// Check that new peer exists in all OMs peers list and also in their Ratis
// server's peer list
for (OzoneManager om : cluster.getOzoneManagersList()) {
- Assert.assertTrue("New OM node " + nodeId + " not present in Peer list " +
- "of OM " + om.getOMNodeId(), om.doesPeerExist(nodeId));
- Assert.assertTrue("New OM node " + nodeId + " not present in Peer list " +
- "of OM " + om.getOMNodeId() + " RatisServer",
- om.getOmRatisServer().doesPeerExist(nodeId));
- Assert.assertTrue("New OM node " + nodeId + " not present in " +
- "OM " + om.getOMNodeId() + "RatisServer's RaftConf",
- om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId));
+ assertTrue(om.doesPeerExist(nodeId), "New OM node " + nodeId
+ + " not present in Peer list of OM " + om.getOMNodeId());
+ assertTrue(om.getOmRatisServer().doesPeerExist(nodeId), "New OM node " + nodeId
+ + " not present in Peer list of OM " + om.getOMNodeId() + " RatisServer");
+ assertTrue(
+ om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId),
+ "New OM node " + nodeId + " not present in " + "OM "
+ + om.getOMNodeId() + "RatisServer's RaftConf");
}
OzoneManager newOM = cluster.getOzoneManager(nodeId);
@@ -140,8 +144,7 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception {
// Check Ratis Dir for log files
File[] logFiles = getRatisLogFiles(newOM);
- Assert.assertTrue("There are no ratis logs in new OM ",
- logFiles.length > 0);
+ assertTrue(logFiles.length > 0, "There are no ratis logs in new OM ");
}
private File[] getRatisLogFiles(OzoneManager om) {
@@ -194,8 +197,9 @@ public void testBootstrap() throws Exception {
GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000);
OzoneManager omLeader = cluster.getOMLeader();
- Assert.assertTrue("New Bootstrapped OM not elected Leader even though " +
- "other OMs are down", newOMNodeIds.contains(omLeader.getOMNodeId()));
+ assertTrue(newOMNodeIds.contains(omLeader.getOMNodeId()),
+ "New Bootstrapped OM not elected Leader even though" +
+ " other OMs are down");
// Perform some read and write operations with new OM leader
IOUtils.closeQuietly(client);
@@ -206,7 +210,7 @@ public void testBootstrap() throws Exception {
OzoneBucket bucket = volume.getBucket(BUCKET_NAME);
String key = createKey(bucket);
- Assert.assertNotNull(bucket.getKey(key));
+ assertNotNull(bucket.getKey(key));
}
/**
@@ -236,16 +240,16 @@ public void testBootstrapWithoutConfigUpdate() throws Exception {
String newNodeId = "omNode-bootstrap-1";
try {
cluster.bootstrapOzoneManager(newNodeId, false, false);
- Assert.fail("Bootstrap should have failed as configs are not updated on" +
+ fail("Bootstrap should have failed as configs are not updated on" +
" all OMs.");
} catch (Exception e) {
- Assert.assertEquals(OmUtils.getOMAddressListPrintString(
+ assertEquals(OmUtils.getOMAddressListPrintString(
Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" +
" have incorrect information of the bootstrapping OM. Update their " +
"ozone-site.xml before proceeding.", e.getMessage());
- Assert.assertTrue(omLog.getOutput().contains("Remote OM config check " +
+ assertTrue(omLog.getOutput().contains("Remote OM config check " +
"failed on OM " + existingOMNodeId));
- Assert.assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
+ assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
" - System Exit"));
}
@@ -264,14 +268,14 @@ public void testBootstrapWithoutConfigUpdate() throws Exception {
try {
cluster.bootstrapOzoneManager(newNodeId, false, true);
} catch (IOException e) {
- Assert.assertTrue(omLog.getOutput().contains("Couldn't add OM " +
+ assertTrue(omLog.getOutput().contains("Couldn't add OM " +
newNodeId + " to peer list."));
- Assert.assertTrue(miniOzoneClusterLog.getOutput().contains(
+ assertTrue(miniOzoneClusterLog.getOutput().contains(
existingOMNodeId + " - System Exit: There is no OM configuration " +
"for node ID " + newNodeId + " in ozone-site.xml."));
// Verify that the existing OM has stopped.
- Assert.assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning());
+ assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning());
}
}
@@ -310,18 +314,18 @@ public void testForceBootstrap() throws Exception {
String newNodeId = "omNode-bootstrap-1";
try {
cluster.bootstrapOzoneManager(newNodeId, true, false);
- Assert.fail("Bootstrap should have failed as configs are not updated on" +
+ fail("Bootstrap should have failed as configs are not updated on" +
" all OMs.");
} catch (IOException e) {
- Assert.assertEquals(OmUtils.getOMAddressListPrintString(
+ assertEquals(OmUtils.getOMAddressListPrintString(
Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " +
"have incorrect information of the bootstrapping OM. Update their " +
"ozone-site.xml before proceeding.", e.getMessage());
- Assert.assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId +
+ assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId +
" configuration returned null"));
- Assert.assertTrue(omLog.getOutput().contains("Remote OM config check " +
+ assertTrue(omLog.getOutput().contains("Remote OM config check " +
"failed on OM " + downOMNodeId));
- Assert.assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
+ assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
" - System Exit"));
}
@@ -338,7 +342,7 @@ public void testForceBootstrap() throws Exception {
OzoneManager newOM = cluster.getOzoneManager(newNodeId);
// Verify that the newly bootstrapped OM is running
- Assert.assertTrue(newOM.isRunning());
+ assertTrue(newOM.isRunning());
}
/**
@@ -375,7 +379,7 @@ public void testDecommission() throws Exception {
OzoneBucket bucket = volume.getBucket(BUCKET_NAME);
String key = createKey(bucket);
- Assert.assertNotNull(bucket.getKey(key));
+ assertNotNull(bucket.getKey(key));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index d140e0aeaf6..d4f1f777877 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -87,7 +87,6 @@
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
@@ -931,7 +930,7 @@ private void confirmServletLocksOutOtherHandler(BootstrapStateHandler handler,
ExecutorService executorService) {
Future test = checkLock(handler, executorService);
// Handler should fail to take the lock because the servlet has taken it.
- Assert.assertThrows(TimeoutException.class,
+ Assertions.assertThrows(TimeoutException.class,
() -> test.get(500, TimeUnit.MILLISECONDS));
}
@@ -943,7 +942,7 @@ private void confirmOtherHandlerLocksOutServlet(BootstrapStateHandler handler,
handler.getBootstrapStateLock().lock()) {
Future test = checkLock(servlet, executorService);
// Servlet should fail to lock when other handler has taken it.
- Assert.assertThrows(TimeoutException.class,
+ Assertions.assertThrows(TimeoutException.class,
() -> test.get(500, TimeUnit.MILLISECONDS));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index ce89f8ffe41..74751dde6de 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
import org.apache.ozone.test.GenericTestUtils;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
-import org.junit.Assert;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.AfterAll;
@@ -60,12 +59,9 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY;
-
-import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
/**
@@ -136,7 +132,7 @@ public void cleanup() {
fs.delete(fileStatus.getPath(), true);
}
} catch (IOException ex) {
- fail("Failed to cleanup files.");
+ Assertions.fail("Failed to cleanup files.");
}
}
@@ -205,19 +201,19 @@ public void testGetDeletedDirectoryInfo()
}
if (directoryObjectId == null) {
- fail("directoryObjectId is null. Test case cannot proceed.");
+ Assertions.fail("directoryObjectId is null. Test case cannot proceed.");
+ } else {
+ // Retrieve Namespace Summary for dir1 from Recon.
+ ReconNamespaceSummaryManagerImpl namespaceSummaryManager =
+ (ReconNamespaceSummaryManagerImpl) cluster.getReconServer()
+ .getReconNamespaceSummaryManager();
+ NSSummary summary =
+ namespaceSummaryManager.getNSSummary(directoryObjectId);
+ // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes.
+ Assertions.assertEquals(10, summary.getNumOfFiles());
+ Assertions.assertEquals(10, summary.getSizeOfFiles());
}
- // Retrieve Namespace Summary for dir1 from Recon.
- ReconNamespaceSummaryManagerImpl namespaceSummaryManager =
- (ReconNamespaceSummaryManagerImpl) cluster.getReconServer()
- .getReconNamespaceSummaryManager();
- NSSummary summary =
- namespaceSummaryManager.getNSSummary(directoryObjectId);
- // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes.
- Assert.assertEquals(10, summary.getNumOfFiles());
- Assert.assertEquals(10, summary.getSizeOfFiles());
-
// Delete the entire directory dir1.
fs.delete(dir1, true);
syncDataFromOM();
@@ -242,7 +238,7 @@ public void testGetDeletedDirectoryInfo()
KeyInsightInfoResponse entity =
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 10.
- Assert.assertEquals(10, entity.getUnreplicatedDataSize());
+ Assertions.assertEquals(10, entity.getUnreplicatedDataSize());
// Cleanup the tables.
cleanupTables();
@@ -331,7 +327,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories()
KeyInsightInfoResponse entity =
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 3.
- Assert.assertEquals(3, entity.getUnreplicatedDataSize());
+ Assertions.assertEquals(3, entity.getUnreplicatedDataSize());
// Cleanup the tables.
cleanupTables();
@@ -393,7 +389,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
KeyInsightInfoResponse entity =
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 100.
- Assert.assertEquals(100, entity.getUnreplicatedDataSize());
+ Assertions.assertEquals(100, entity.getUnreplicatedDataSize());
// Cleanup the tables.
cleanupTables();
@@ -475,7 +471,7 @@ private boolean assertTableRowCount(int expectedCount,
LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
count, expectedCount);
} catch (IOException ex) {
- fail("Test failed with: " + ex);
+ Assertions.fail("Test failed with: " + ex);
}
return count == expectedCount;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java
index 906b2aaf702..e1d1ba31d74 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java
@@ -42,8 +42,6 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
@@ -57,6 +55,10 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto;
import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests failover with SCM HA setup.
@@ -113,10 +115,10 @@ public void testFailover() throws Exception {
scmClientConfig.setRetryCount(1);
scmClientConfig.setRetryInterval(100);
scmClientConfig.setMaxRetryTimeout(1500);
- Assert.assertEquals(scmClientConfig.getRetryCount(), 15);
+ assertEquals(scmClientConfig.getRetryCount(), 15);
conf.setFromObject(scmClientConfig);
StorageContainerManager scm = getLeader(cluster);
- Assert.assertNotNull(scm);
+ assertNotNull(scm);
SCMBlockLocationFailoverProxyProvider failoverProxyProvider =
new SCMBlockLocationFailoverProxyProvider(conf);
failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId());
@@ -131,7 +133,7 @@ public void testFailover() throws Exception {
.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
conf);
scmBlockLocationProtocol.getScmInfo();
- Assert.assertTrue(logCapture.getOutput()
+ assertTrue(logCapture.getOutput()
.contains("Performing failover to suggested leader"));
scm = getLeader(cluster);
SCMContainerLocationFailoverProxyProvider proxyProvider =
@@ -148,7 +150,7 @@ public void testFailover() throws Exception {
scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, "ozone");
- Assert.assertTrue(logCapture.getOutput()
+ assertTrue(logCapture.getOutput()
.contains("Performing failover to suggested leader"));
}
@@ -159,10 +161,10 @@ public void testMoveFailover() throws Exception {
scmClientConfig.setRetryCount(1);
scmClientConfig.setRetryInterval(100);
scmClientConfig.setMaxRetryTimeout(1500);
- Assert.assertEquals(scmClientConfig.getRetryCount(), 15);
+ assertEquals(scmClientConfig.getRetryCount(), 15);
conf.setFromObject(scmClientConfig);
StorageContainerManager scm = getLeader(cluster);
- Assert.assertNotNull(scm);
+ assertNotNull(scm);
final ContainerID id =
getContainer(HddsProtos.LifeCycleState.CLOSED).containerID();
@@ -190,19 +192,19 @@ public void testMoveFailover() throws Exception {
.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
conf);
scmBlockLocationProtocol.getScmInfo();
- Assert.assertTrue(logCapture.getOutput()
+ assertTrue(logCapture.getOutput()
.contains("Performing failover to suggested leader"));
scm = getLeader(cluster);
- Assert.assertNotNull(scm);
+ assertNotNull(scm);
//switch to the new leader successfully, new leader should
//get the same inflightMove
Map inflightMove =
scm.getReplicationManager().getMoveScheduler().getInflightMove();
- Assert.assertTrue(inflightMove.containsKey(id));
+ assertTrue(inflightMove.containsKey(id));
MoveDataNodePair mp = inflightMove.get(id);
- Assert.assertTrue(dn2.equals(mp.getTgt()));
- Assert.assertTrue(dn1.equals(mp.getSrc()));
+ assertTrue(dn2.equals(mp.getTgt()));
+ assertTrue(dn1.equals(mp.getSrc()));
//complete move in the new leader
scm.getReplicationManager().getMoveScheduler()
@@ -223,17 +225,17 @@ public void testMoveFailover() throws Exception {
scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, "ozone");
- Assert.assertTrue(logCapture.getOutput()
+ assertTrue(logCapture.getOutput()
.contains("Performing failover to suggested leader"));
//switch to the new leader successfully, new leader should
//get the same inflightMove , which should not contains
//that container.
scm = getLeader(cluster);
- Assert.assertNotNull(scm);
+ assertNotNull(scm);
inflightMove = scm.getReplicationManager()
.getMoveScheduler().getInflightMove();
- Assert.assertFalse(inflightMove.containsKey(id));
+ assertFalse(inflightMove.containsKey(id));
}
/**
@@ -257,14 +259,14 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs()
conf.getObject(SCMClientConfig.class);
scmClientConfig.setRetryInterval(100);
scmClientConfig.setMaxRetryTimeout(1500);
- Assertions.assertEquals(15, scmClientConfig.getRetryCount());
+ assertEquals(15, scmClientConfig.getRetryCount());
conf.setFromObject(scmClientConfig);
StorageContainerManager leader = getLeader(cluster);
- Assertions.assertNotNull(leader);
+ assertNotNull(leader);
ScmClient scmClient = new ContainerOperationClient(conf);
// assert that container balancer is not running right now
- Assertions.assertFalse(scmClient.getContainerBalancerStatus());
+ assertFalse(scmClient.getContainerBalancerStatus());
ContainerBalancerConfiguration balancerConf =
conf.getObject(ContainerBalancerConfiguration.class);
ContainerBalancer containerBalancer = leader.getContainerBalancer();
@@ -278,7 +280,7 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs()
// assert that balancer has stopped since the cluster is already balanced
GenericTestUtils.waitFor(() -> !containerBalancer.isBalancerRunning(),
10, 500);
- Assertions.assertFalse(containerBalancer.isBalancerRunning());
+ assertFalse(containerBalancer.isBalancerRunning());
ByteString byteString =
leader.getScmMetadataStore().getStatefulServiceConfigTable().get(
@@ -315,7 +317,7 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs()
containerBalancer.getServiceName());
ContainerBalancerConfigurationProto protobuf =
ContainerBalancerConfigurationProto.parseFrom(byteString);
- Assertions.assertFalse(protobuf.getShouldRun());
+ assertFalse(protobuf.getShouldRun());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
index cf8730a9651..c00840c835d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.junit.jupiter.api.AfterEach;
-import org.junit.Assert;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -63,6 +62,7 @@
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Test cases to verify the metrics exposed by SCMPipelineManager.
@@ -151,10 +151,10 @@ public void test() throws IOException, TimeoutException {
getLongCounter("DatanodeChooseFallbackCount", metrics);
// Seems no under-replicated closed containers get replicated
- Assert.assertTrue(totalRequest == 0);
- Assert.assertTrue(tryCount == 0);
- Assert.assertTrue(sucessCount == 0);
- Assert.assertTrue(compromiseCount == 0);
+ assertEquals(0, totalRequest);
+ assertEquals(0, tryCount);
+ assertEquals(0, sucessCount);
+ assertEquals(0, compromiseCount);
}
@AfterEach
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
index 74868bee2af..ab9b687dcec 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
@@ -48,11 +48,13 @@
import org.apache.ozone.test.tag.Flaky;
import org.apache.ratis.server.protocol.TermIndex;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.ratis.util.LifeCycle;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -123,7 +125,7 @@ public void shutdown() {
public void testInstallSnapshot() throws Exception {
// Get the leader SCM
StorageContainerManager leaderSCM = getLeader(cluster);
- Assert.assertNotNull(leaderSCM);
+ assertNotNull(leaderSCM);
// Find the inactive SCM
String followerId = getInactiveSCM(cluster).getSCMNodeId();
@@ -155,7 +157,7 @@ public void testInstallSnapshot() throws Exception {
// made while it was inactive.
SCMMetadataStore followerMetaStore = followerSCM.getScmMetadataStore();
for (ContainerInfo containerInfo : containers) {
- Assert.assertNotNull(followerMetaStore.getContainerTable()
+ assertNotNull(followerMetaStore.getContainerTable()
.get(containerInfo.containerID()));
}
}
@@ -206,12 +208,11 @@ public void testInstallOldCheckpointFailure() throws Exception {
}
String errorMsg = "Reloading old state of SCM";
- Assert.assertTrue(logCapture.getOutput().contains(errorMsg));
- Assert.assertNull(" installed checkpoint even though checkpoint " +
- "logIndex is less than it's lastAppliedIndex", newTermIndex);
- Assert.assertEquals(followerTermIndex,
- followerSM.getLastAppliedTermIndex());
- Assert.assertFalse(followerSM.getLifeCycleState().isPausingOrPaused());
+ assertTrue(logCapture.getOutput().contains(errorMsg));
+ assertNull(newTermIndex, " installed checkpoint even though checkpoint " +
+ "logIndex is less than it's lastAppliedIndex");
+ assertEquals(followerTermIndex, followerSM.getLastAppliedTermIndex());
+ assertFalse(followerSM.getLifeCycleState().isPausingOrPaused());
}
@Test
@@ -235,7 +236,7 @@ public void testInstallCorruptedCheckpointFailure() throws Exception {
.getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation,
new SCMDBDefinition());
- Assert.assertNotNull(leaderCheckpointLocation);
+ assertNotNull(leaderCheckpointLocation);
// Take a backup of the current DB
String dbBackupName =
"SCM_CHECKPOINT_BACKUP" + termIndex.getIndex() + "_" + System
@@ -272,17 +273,16 @@ public void testInstallCorruptedCheckpointFailure() throws Exception {
scmhaManager.installCheckpoint(leaderCheckpointLocation,
leaderCheckpointTrxnInfo);
- Assert.assertTrue(logCapture.getOutput()
+ assertTrue(logCapture.getOutput()
.contains("Failed to reload SCM state and instantiate services."));
final LifeCycle.State s = followerSM.getLifeCycleState();
- Assert.assertTrue("Unexpected lifeCycle state: " + s,
- s == LifeCycle.State.NEW || s.isPausingOrPaused());
+ assertTrue(s == LifeCycle.State.NEW || s.isPausingOrPaused(), "Unexpected lifeCycle state: " + s);
// Verify correct reloading
followerSM.setInstallingSnapshotData(
new RocksDBCheckpoint(checkpointBackup.toPath()), null);
followerSM.reinitialize();
- Assert.assertEquals(followerSM.getLastAppliedTermIndex(),
+ assertEquals(followerSM.getLastAppliedTermIndex(),
leaderCheckpointTrxnInfo.getTermIndex());
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index d28ef3b2703..92381829f0b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -85,7 +85,6 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import org.junit.Assert;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
@@ -1879,7 +1878,7 @@ public void testLinkedAndNonLinkedBucketMetaData()
final ArrayList> bucketListOut =
parseOutputIntoArrayList();
- Assert.assertTrue(bucketListOut.size() == 1);
+ assertEquals(1, bucketListOut.size());
boolean link =
String.valueOf(bucketListOut.get(0).get("link")).equals("false");
assertTrue(link);
@@ -1898,7 +1897,7 @@ public void testLinkedAndNonLinkedBucketMetaData()
final ArrayList> bucketListLinked =
parseOutputIntoArrayList();
- Assert.assertTrue(bucketListLinked.size() == 2);
+ assertEquals(2, bucketListLinked.size());
link = String.valueOf(bucketListLinked.get(1).get("link")).equals("true");
assertTrue(link);
From 9bdd9e223e9fd80746ac36dbdb1ec82c62e29536 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 19 Dec 2023 11:22:58 +0100
Subject: [PATCH 04/28] HDDS-9916. Useless execution of version-info in
rocksdb-checkpoint-differ (#5784)
---
hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 38 -------------------
1 file changed, 38 deletions(-)
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index a54f7bb0700..6da69338308 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -99,45 +99,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-
- ${basedir}/src/main/resources
-
- ozone-version-info.properties
-
- false
-
-
- ${basedir}/src/main/resources
-
- ozone-version-info.properties
-
- true
-
-
-
- org.apache.hadoop
- hadoop-maven-plugins
-
-
- version-info
- generate-resources
-
- version-info
-
-
-
-
-
-
-
com.github.spotbugs
spotbugs-maven-plugin
From aa36940a80c1ed6276d597cede019ae9cf98e1a1 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila"
Date: Tue, 19 Dec 2023 11:56:56 +0100
Subject: [PATCH 05/28] HDDS-9962. Mark TestBlockDeletion#testBlockDeletion as
flaky
---
.../common/statemachine/commandhandler/TestBlockDeletion.java | 2 ++
1 file changed, 2 insertions(+)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index b2c0f47997e..744f8286e6b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -70,6 +70,7 @@
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
+import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
@@ -204,6 +205,7 @@ private static Stream replicationConfigs() {
@ParameterizedTest
@MethodSource("replicationConfigs")
+ @Flaky("HDDS-9962")
public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
From 3066c495987d72b4a204cefd2904b2ac73924cc8 Mon Sep 17 00:00:00 2001
From: VarshaRavi <30603028+VarshaRaviCV@users.noreply.github.com>
Date: Tue, 19 Dec 2023 17:29:18 +0530
Subject: [PATCH 06/28] HDDS-9776. Migrate simple client integration tests to
JUnit5 (#5819)
---
.../ozone/client/TestOzoneClientFactory.java | 10 +-
.../rpc/AbstractTestECKeyOutputStream.java | 76 ++---
.../client/rpc/Test2WayCommitInRatis.java | 37 +--
.../hadoop/ozone/client/rpc/TestBCSID.java | 40 +--
.../client/rpc/TestBlockDataStreamOutput.java | 42 +--
.../TestCloseContainerHandlingByClient.java | 70 ++---
.../rpc/TestContainerReplicationEndToEnd.java | 28 +-
.../client/rpc/TestContainerStateMachine.java | 50 ++-
.../TestContainerStateMachineFailures.java | 263 ++++++++--------
.../TestContainerStateMachineFlushDelay.java | 41 +--
.../rpc/TestContainerStateMachineStream.java | 33 +-
.../rpc/TestDeleteWithInAdequateDN.java | 68 ++--
.../rpc/TestDiscardPreallocatedBlocks.java | 56 ++--
.../client/rpc/TestECKeyOutputStream.java | 4 +-
.../TestECKeyOutputStreamWithZeroCopy.java | 4 +-
...TestFailureHandlingByClientFlushDelay.java | 38 +--
.../rpc/TestHybridPipelineOnDatanode.java | 53 ++--
.../TestMultiBlockWritesWithDnFailures.java | 36 +--
...TestOzoneClientMultipartUploadWithFSO.java | 297 +++++++++---------
...oneClientRetriesOnExceptionFlushDelay.java | 39 +--
.../TestOzoneClientRetriesOnExceptions.java | 106 +++----
.../rpc/TestOzoneRpcClientForAclAuditLog.java | 28 +-
.../rpc/TestOzoneRpcClientWithRatis.java | 14 +-
.../rpc/TestValidateBCSIDOnRestart.java | 105 ++++---
24 files changed, 721 insertions(+), 817 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
index 5dc7e5f5e98..70ccf289453 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
@@ -20,8 +20,8 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@@ -59,14 +59,14 @@ public void testRemoteException() {
public Void run() throws IOException {
conf.set("ozone.security.enabled", "true");
try (OzoneClient ozoneClient =
- OzoneClientFactory.getRpcClient("localhost",
- Integer.parseInt(omPort), conf)) {
+ OzoneClientFactory.getRpcClient("localhost",
+ Integer.parseInt(omPort), conf)) {
ozoneClient.getObjectStore().listVolumes("/");
}
return null;
}
});
- Assert.fail("Should throw exception here");
+ Assertions.fail("Should throw exception here");
} catch (IOException | InterruptedException e) {
assert e instanceof AccessControlException;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
index 518893aa0a0..9691a31efb1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
@@ -47,10 +47,10 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Arrays;
@@ -132,7 +132,7 @@ protected static void init(boolean zeroCopyEnabled) throws Exception {
initInputChunks();
}
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
init(false);
}
@@ -140,7 +140,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -152,9 +152,9 @@ public static void shutdown() {
public void testCreateKeyWithECReplicationConfig() throws Exception {
try (OzoneOutputStream key = TestHelper
.createKey(keyString, new ECReplicationConfig(3, 2,
- ECReplicationConfig.EcCodec.RS, chunkSize), inputSize,
+ ECReplicationConfig.EcCodec.RS, chunkSize), inputSize,
objectStore, volumeName, bucketName)) {
- Assert.assertTrue(key.getOutputStream() instanceof ECKeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof ECKeyOutputStream);
}
}
@@ -163,9 +163,9 @@ public void testCreateKeyWithOutBucketDefaults() throws Exception {
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) {
- Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
- for (int i = 0; i < inputChunks.length; i++) {
- out.write(inputChunks[i]);
+ Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ for (byte[] inputChunk : inputChunks) {
+ out.write(inputChunk);
}
}
}
@@ -184,17 +184,17 @@ public void testCreateKeyWithBucketDefaults() throws Exception {
OzoneBucket bucket = volume.getBucket(myBucket);
try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) {
- Assert.assertTrue(out.getOutputStream() instanceof ECKeyOutputStream);
- for (int i = 0; i < inputChunks.length; i++) {
- out.write(inputChunks[i]);
+ Assertions.assertTrue(out.getOutputStream() instanceof ECKeyOutputStream);
+ for (byte[] inputChunk : inputChunks) {
+ out.write(inputChunk);
}
}
byte[] buf = new byte[chunkSize];
try (OzoneInputStream in = bucket.readKey(keyString)) {
- for (int i = 0; i < inputChunks.length; i++) {
+ for (byte[] inputChunk : inputChunks) {
int read = in.read(buf, 0, chunkSize);
- Assert.assertEquals(chunkSize, read);
- Assert.assertTrue(Arrays.equals(buf, inputChunks[i]));
+ Assertions.assertEquals(chunkSize, read);
+ Assertions.assertArrayEquals(buf, inputChunk);
}
}
}
@@ -236,16 +236,16 @@ public void testOverwriteRatisKeyWithECKey() throws Exception {
}
private void createKeyAndCheckReplicationConfig(String keyName,
- OzoneBucket bucket, ReplicationConfig replicationConfig)
+ OzoneBucket bucket, ReplicationConfig replicationConfig)
throws IOException {
try (OzoneOutputStream out = bucket
.createKey(keyName, inputSize, replicationConfig, new HashMap<>())) {
- for (int i = 0; i < inputChunks.length; i++) {
- out.write(inputChunks[i]);
+ for (byte[] inputChunk : inputChunks) {
+ out.write(inputChunk);
}
}
OzoneKeyDetails key = bucket.getKey(keyName);
- Assert.assertEquals(replicationConfig, key.getReplicationConfig());
+ Assertions.assertEquals(replicationConfig, key.getReplicationConfig());
}
@Test
@@ -255,9 +255,9 @@ public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception {
"testCreateRatisKeyAndWithECBucketDefaults", 2000,
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
new HashMap<>())) {
- Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
- for (int i = 0; i < inputChunks.length; i++) {
- out.write(inputChunks[i]);
+ Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ for (byte[] inputChunk : inputChunks) {
+ out.write(inputChunk);
}
}
}
@@ -288,14 +288,14 @@ public void test21ChunksInSingleWriteOp() throws IOException {
}
private void testMultipleChunksInSingleWriteOp(int offset,
- int bufferChunks, int numChunks)
- throws IOException {
+ int bufferChunks, int numChunks)
+ throws IOException {
byte[] inputData = getInputBytes(offset, bufferChunks, numChunks);
final OzoneBucket bucket = getOzoneBucket();
String keyName =
- String.format("testMultipleChunksInSingleWriteOpOffset" +
- "%dBufferChunks%dNumChunks", offset, bufferChunks,
- numChunks);
+ String.format("testMultipleChunksInSingleWriteOpOffset" +
+ "%dBufferChunks%dNumChunks", offset, bufferChunks,
+ numChunks);
try (OzoneOutputStream out = bucket.createKey(keyName, 4096,
new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
chunkSize), new HashMap<>())) {
@@ -303,7 +303,7 @@ private void testMultipleChunksInSingleWriteOp(int offset,
}
validateContent(offset, numChunks * chunkSize, inputData, bucket,
- bucket.getKey(keyName));
+ bucket.getKey(keyName));
}
private void testMultipleChunksInSingleWriteOp(int numChunks)
@@ -344,7 +344,7 @@ public void testECContainerKeysCountAndNumContainerReplicas()
.getNumberOfKeys() == 1) && (containerOperationClient
.getContainerReplicas(currentKeyContainerID).size() == 5);
} catch (IOException exception) {
- Assert.fail("Unexpected exception " + exception);
+ Assertions.fail("Unexpected exception " + exception);
return false;
}
}, 100, 10000);
@@ -358,12 +358,12 @@ private void validateContent(byte[] inputData, OzoneBucket bucket,
private void validateContent(int offset, int length, byte[] inputData,
OzoneBucket bucket,
- OzoneKey key) throws IOException {
+ OzoneKey key) throws IOException {
try (OzoneInputStream is = bucket.readKey(key.getName())) {
byte[] fileContent = new byte[length];
- Assert.assertEquals(length, is.read(fileContent));
- Assert.assertEquals(new String(Arrays.copyOfRange(inputData, offset,
- offset + length), UTF_8),
+ Assertions.assertEquals(length, is.read(fileContent));
+ Assertions.assertEquals(new String(Arrays.copyOfRange(inputData, offset,
+ offset + length), UTF_8),
new String(fileContent, UTF_8));
}
}
@@ -423,7 +423,7 @@ public void testWriteShouldSucceedWhenDNKilled() throws Exception {
// Check the second blockGroup pipeline to make sure that the failed
// node is not selected.
- Assert.assertFalse(ecOut.getStreamEntries()
+ Assertions.assertFalse(ecOut.getStreamEntries()
.get(1).getPipeline().getNodes().contains(nodeToKill));
}
@@ -432,8 +432,8 @@ public void testWriteShouldSucceedWhenDNKilled() throws Exception {
// data comes back.
for (int i = 0; i < 2; i++) {
byte[] fileContent = new byte[inputData.length];
- Assert.assertEquals(inputData.length, is.read(fileContent));
- Assert.assertEquals(new String(inputData, UTF_8),
+ Assertions.assertEquals(inputData.length, is.read(fileContent));
+ Assertions.assertEquals(new String(inputData, UTF_8),
new String(fileContent, UTF_8));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index 3f5ede5478f..8e87f6207f4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.
- StorageContainerLocationProtocolClientSideTranslatorPB;
+ StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -41,31 +41,22 @@
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.time.Duration;
import java.util.concurrent.TimeUnit;
-import org.junit.Rule;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
- OZONE_SCM_STALENODE_INTERVAL;
+ OZONE_SCM_STALENODE_INTERVAL;
/**
* This class tests the 2 way commit in Ratis.
*/
+@Timeout(300)
public class Test2WayCommitInRatis {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneClient client;
private ObjectStore objectStore;
@@ -93,9 +84,9 @@ private void startCluster(OzoneConfiguration conf) throws Exception {
// Make sure the pipeline does not get destroyed quickly
conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL,
- 60, TimeUnit.SECONDS);
+ 60, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000,
- TimeUnit.SECONDS);
+ TimeUnit.SECONDS);
DatanodeRatisServerConfig ratisServerConfig =
conf.getObject(DatanodeRatisServerConfig.class);
ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
@@ -154,8 +145,8 @@ public void test2WayCommitForRetryfailure() throws Exception {
HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager
.acquireClient(container1.getPipeline());
- Assert.assertEquals(1, xceiverClient.getRefcount());
- Assert.assertEquals(container1.getPipeline(),
+ Assertions.assertEquals(1, xceiverClient.getRefcount());
+ Assertions.assertEquals(container1.getPipeline(),
xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
@@ -164,7 +155,7 @@ public void test2WayCommitForRetryfailure() throws Exception {
container1.getContainerInfo().getContainerID(),
xceiverClient.getPipeline()));
reply.getResponse().get();
- Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
+ Assertions.assertEquals(3, ratisClient.getCommitInfoMap().size());
// wait for the container to be created on all the nodes
xceiverClient.watchForCommit(reply.getLogIndex());
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
@@ -181,10 +172,10 @@ public void test2WayCommitForRetryfailure() throws Exception {
xceiverClient.watchForCommit(reply.getLogIndex());
// commitInfo Map will be reduced to 2 here
- Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
+ Assertions.assertEquals(2, ratisClient.getCommitInfoMap().size());
clientManager.releaseClient(xceiverClient, false);
- Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
- Assert
+ Assertions.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
+ Assertions
.assertTrue(logCapturer.getOutput().contains("Committed by majority"));
logCapturer.stopCapturing();
shutdown();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index bbdc9d27d78..1917cf68fd5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client.rpc;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -32,10 +33,11 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.File;
import java.io.IOException;
@@ -52,22 +54,11 @@
.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import org.junit.Rule;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
-
/**
* Tests the validity BCSID of a container.
*/
+@Timeout(300)
public class TestBCSID {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private static OzoneConfiguration conf = new OzoneConfiguration();
private static MiniOzoneCluster cluster;
private static OzoneClient client;
@@ -80,7 +71,7 @@ public class TestBCSID {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
String path = GenericTestUtils
.getTempPath(TestBCSID.class.getSimpleName());
@@ -110,7 +101,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -122,8 +113,9 @@ public static void shutdown() {
public void testBCSID() throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
key.write("ratis".getBytes(UTF_8));
key.close();
@@ -138,7 +130,7 @@ public void testBCSID() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
List keyLocationInfos =
keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
- Assert.assertEquals(1, keyLocationInfos.size());
+ Assertions.assertEquals(1, keyLocationInfos.size());
OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0);
long blockCommitSequenceId =
@@ -146,16 +138,16 @@ public void testBCSID() throws Exception {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerReport().getBlockCommitSequenceId();
- Assert.assertTrue(blockCommitSequenceId > 0);
+ Assertions.assertTrue(blockCommitSequenceId > 0);
// make sure the persisted block Id in OM is same as that seen in the
// container report to be reported to SCM.
- Assert.assertEquals(blockCommitSequenceId,
+ Assertions.assertEquals(blockCommitSequenceId,
omKeyLocationInfo.getBlockCommitSequenceId());
// verify that on restarting the datanode, it reloads the BCSID correctly.
cluster.restartHddsDatanode(0, true);
- Assert.assertEquals(blockCommitSequenceId,
+ Assertions.assertEquals(blockCommitSequenceId,
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 4d3d1c2c32c..a77218d8915 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -37,14 +37,11 @@
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -57,13 +54,8 @@
/**
* Tests BlockDataStreamOutput class.
*/
+@Timeout(300)
public class TestBlockDataStreamOutput {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf = new OzoneConfiguration();
private static OzoneClient client;
@@ -83,7 +75,7 @@ public class TestBlockDataStreamOutput {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
chunkSize = 100;
flushSize = 2 * chunkSize;
@@ -128,7 +120,7 @@ static String getKeyName() {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -184,7 +176,7 @@ private void testWriteWithFailure(int dataLength) throws Exception {
(KeyDataStreamOutput) key.getByteBufStreamOutput();
ByteBufferStreamOutput stream =
keyDataStreamOutput.getStreamEntries().get(0).getByteBufStreamOutput();
- Assert.assertTrue(stream instanceof BlockDataStreamOutput);
+ Assertions.assertTrue(stream instanceof BlockDataStreamOutput);
TestHelper.waitForContainerClose(key, cluster);
key.write(b);
key.close();
@@ -208,21 +200,21 @@ public void testPutBlockAtBoundary() throws Exception {
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
key.write(ByteBuffer.wrap(data));
- Assert.assertTrue(
+ Assertions.assertTrue(
metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
<= pendingPutBlockCount + 1);
key.close();
// Since data length is 500 , first putBlock will be at 400(flush boundary)
// and the other at 500
- Assert.assertTrue(
- metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
- == putBlockCount + 2);
+ Assertions.assertEquals(
+ metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock),
+ putBlockCount + 2);
validateData(keyName, data);
}
static OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper.createStreamKey(
keyName, type, size, objectStore, volumeName, bucketName);
}
@@ -245,10 +237,10 @@ public void testMinPacketSize() throws Exception {
.getBytes(UTF_8);
key.write(ByteBuffer.wrap(data));
// minPacketSize= 100, so first write of 50 wont trigger a writeChunk
- Assert.assertEquals(writeChunkCount,
+ Assertions.assertEquals(writeChunkCount,
metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
key.write(ByteBuffer.wrap(data));
- Assert.assertEquals(writeChunkCount + 1,
+ Assertions.assertEquals(writeChunkCount + 1,
metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
// now close the stream, It will update the key length.
key.close();
@@ -271,7 +263,7 @@ public void testTotalAckDataLength() throws Exception {
keyDataStreamOutput.getStreamEntries().get(0);
key.write(ByteBuffer.wrap(data));
key.close();
- Assert.assertEquals(dataLength, stream.getTotalAckDataLength());
+ Assertions.assertEquals(dataLength, stream.getTotalAckDataLength());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index c35abee17cf..63c9b275b38 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -53,26 +53,18 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
/**
* Tests Close Container Exception handling by Ozone Client.
*/
+@Timeout(300)
public class TestCloseContainerHandlingByClient {
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf = new OzoneConfiguration();
private static OzoneClient client;
@@ -90,7 +82,7 @@ public class TestCloseContainerHandlingByClient {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
@@ -123,7 +115,7 @@ private String getKeyName() {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -141,7 +133,7 @@ public void testBlockWritesWithFlushAndClose() throws Exception {
.getBytes(UTF_8);
key.write(data);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -156,7 +148,7 @@ public void testBlockWritesWithFlushAndClose() throws Exception {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
+ Assertions.assertEquals(2 * data.length, keyInfo.getDataSize());
// Written the same data twice
String dataString = new String(data, UTF_8);
@@ -174,7 +166,7 @@ public void testBlockWritesCloseConsistency() throws Exception {
.getBytes(UTF_8);
key.write(data);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -187,7 +179,7 @@ public void testBlockWritesCloseConsistency() throws Exception {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals(data.length, keyInfo.getDataSize());
+ Assertions.assertEquals(data.length, keyInfo.getDataSize());
validateData(keyName, data);
}
@@ -200,15 +192,15 @@ public void testMultiBlockWrites() throws Exception {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
- Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
+ Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size());
// write data more than 1 block
byte[] data =
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
.getBytes(UTF_8);
- Assert.assertEquals(data.length, 3 * blockSize);
+ Assertions.assertEquals(data.length, 3 * blockSize);
key.write(data);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -232,10 +224,10 @@ public void testMultiBlockWrites() throws Exception {
// closeContainerException and remaining data in the chunkOutputStream
// buffer will be copied into a different allocated block and will be
// committed.
- Assert.assertEquals(4, keyLocationInfos.size());
- Assert.assertEquals(4 * blockSize, keyInfo.getDataSize());
+ Assertions.assertEquals(4, keyLocationInfos.size());
+ Assertions.assertEquals(4 * blockSize, keyInfo.getDataSize());
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
- Assert.assertEquals(blockSize, locationInfo.getLength());
+ Assertions.assertEquals(blockSize, locationInfo.getLength());
}
}
@@ -247,9 +239,9 @@ public void testMultiBlockWrites2() throws Exception {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
- Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
String dataString =
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
@@ -289,7 +281,7 @@ public void testMultiBlockWrites2() throws Exception {
String dataCommitted =
dataString.concat(dataString2).concat(dataString3).concat(dataString4);
- Assert.assertEquals(dataCommitted.getBytes(UTF_8).length,
+ Assertions.assertEquals(dataCommitted.getBytes(UTF_8).length,
keyInfo.getDataSize());
validateData(keyName, dataCommitted.getBytes(UTF_8));
}
@@ -303,16 +295,16 @@ public void testMultiBlockWrites3() throws Exception {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
- Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
+ Assertions.assertEquals(4, keyOutputStream.getStreamEntries().size());
// write data 4 blocks and one more chunk
byte[] writtenData =
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
.getBytes(UTF_8);
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
- Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
+ Assertions.assertEquals(data.length, 3 * blockSize + chunkSize);
key.write(data);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -337,7 +329,7 @@ public void testMultiBlockWrites3() throws Exception {
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
inputStream.read(readData);
}
- Assert.assertArrayEquals(writtenData, readData);
+ Assertions.assertArrayEquals(writtenData, readData);
// Though we have written only block initially, the close will hit
// closeContainerException and remaining data in the chunkOutputStream
@@ -347,7 +339,7 @@ public void testMultiBlockWrites3() throws Exception {
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
length += locationInfo.getLength();
}
- Assert.assertEquals(4 * blockSize, length);
+ Assertions.assertEquals(4 * blockSize, length);
}
private void waitForContainerClose(OzoneOutputStream outputStream)
@@ -357,7 +349,7 @@ private void waitForContainerClose(OzoneOutputStream outputStream)
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper
.createKey(keyName, type, size, objectStore, volumeName, bucketName);
}
@@ -383,7 +375,7 @@ public void testBlockWriteViaRatis() throws Exception {
.setKeyName(keyName)
.build();
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
waitForContainerClose(key);
// Again Write the Data. This will throw an exception which will be handled
// and new blocks will be allocated
@@ -395,7 +387,7 @@ public void testBlockWriteViaRatis() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
String dataString = new String(data, UTF_8);
dataString = dataString.concat(dataString);
- Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
+ Assertions.assertEquals(2 * data.length, keyInfo.getDataSize());
validateData(keyName, dataString.getBytes(UTF_8));
}
@@ -409,7 +401,7 @@ public void testBlockWrites() throws Exception {
.getBytes(UTF_8);
key.write(data1);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -427,7 +419,7 @@ public void testBlockWrites() throws Exception {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize());
+ Assertions.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize());
// Written the same data twice
String dataString = new String(data1, UTF_8);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index 96f5ac586ca..9a351e77e9c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.client.rpc;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -43,10 +44,10 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.slf4j.LoggerFactory;
import java.io.File;
@@ -84,7 +85,7 @@ public class TestContainerReplicationEndToEnd {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
path = GenericTestUtils
@@ -132,7 +133,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (xceiverClientManager != null) {
@@ -151,8 +152,9 @@ public void testContainerReplication() throws Exception {
String keyName = "testContainerReplication";
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(keyName, 0, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey(keyName, 0,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
@@ -161,7 +163,7 @@ public void testContainerReplication() throws Exception {
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ Assertions.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID =
@@ -203,9 +205,9 @@ public void testContainerReplication() throws Exception {
}
// wait for container to move to closed state in SCM
Thread.sleep(2 * containerReportInterval);
- Assert.assertTrue(
+ Assertions.assertSame(
cluster.getStorageContainerManager().getContainerInfo(containerID)
- .getState() == HddsProtos.LifeCycleState.CLOSED);
+ .getState(), HddsProtos.LifeCycleState.CLOSED);
// shutdown the replica node
cluster.shutdownHddsDatanode(oldReplicaNode);
// now the container is under replicated and will be moved to a different dn
@@ -219,14 +221,14 @@ public void testContainerReplication() throws Exception {
}
}
- Assert.assertNotNull(dnService);
+ Assertions.assertNotNull(dnService);
final HddsDatanodeService newReplicaNode = dnService;
// wait for the container to get replicated
GenericTestUtils.waitFor(() -> {
return newReplicaNode.getDatanodeStateMachine().getContainer()
.getContainerSet().getContainer(containerID) != null;
}, 500, 100000);
- Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer()
+ Assertions.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer()
.getContainerSet().getContainer(containerID).getContainerData()
.getBlockCommitSequenceId() > 0);
// wait for SCM to update the replica Map
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index 7c0fcd43722..1050fdd7f2b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -25,6 +25,7 @@
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -56,26 +57,18 @@
import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
/**
* Tests the containerStateMachine failure handling.
*/
+@Timeout(300)
public class TestContainerStateMachine {
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
@@ -89,7 +82,7 @@ public class TestContainerStateMachine {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void setup() throws Exception {
path = GenericTestUtils
.getTempPath(TestContainerStateMachine.class.getSimpleName());
@@ -137,7 +130,7 @@ public void setup() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -149,8 +142,9 @@ public void shutdown() {
public void testContainerStateMachineFailures() throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
@@ -162,7 +156,7 @@ public void testContainerStateMachineFailures() throws Exception {
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ Assertions.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
// delete the container dir
@@ -174,7 +168,7 @@ public void testContainerStateMachineFailures() throws Exception {
key.close();
// Make sure the container is marked unhealthy
- Assert.assertEquals(
+ Assertions.assertEquals(
ContainerProtos.ContainerDataProto.State.UNHEALTHY,
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
@@ -189,14 +183,15 @@ public void testRatisSnapshotRetention() throws Exception {
(ContainerStateMachine) TestHelper.getStateMachine(cluster);
SimpleStateMachineStorage storage =
(SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
- Assert.assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage));
+ Assertions.assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage));
// Write 10 keys. Num snapshots should be equal to config value.
for (int i = 1; i <= 10; i++) {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(("ratis" + i), 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey(("ratis" + i), 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write(("ratis" + i).getBytes(UTF_8));
key.flush();
@@ -212,15 +207,16 @@ public void testRatisSnapshotRetention() throws Exception {
storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
Path parentPath = getSnapshotPath(storage);
int numSnapshots = parentPath.getParent().toFile().listFiles().length;
- Assert.assertTrue(Math.abs(ratisServerConfiguration
+ Assertions.assertTrue(Math.abs(ratisServerConfiguration
.getNumSnapshotsRetained() - numSnapshots) <= 1);
// Write 10 more keys. Num Snapshots should remain the same.
for (int i = 11; i <= 20; i++) {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(("ratis" + i), 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey(("ratis" + i), 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write(("ratis" + i).getBytes(UTF_8));
key.flush();
@@ -232,7 +228,7 @@ public void testRatisSnapshotRetention() throws Exception {
storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
parentPath = getSnapshotPath(storage);
numSnapshots = parentPath.getParent().toFile().listFiles().length;
- Assert.assertTrue(Math.abs(ratisServerConfiguration
+ Assertions.assertTrue(Math.abs(ratisServerConfiguration
.getNumSnapshotsRetained() - numSnapshots) <= 1);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 55e16989a88..eb84e67398f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -35,6 +35,7 @@
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
@@ -86,14 +87,14 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.hamcrest.core.Is.is;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.hamcrest.MatcherAssert.assertThat;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.exceptions.StateMachineException;
@@ -207,8 +208,9 @@ public void testContainerStateMachineCloseOnMissingPipeline()
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("testQuasiClosed1", 1024, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey("testQuasiClosed1", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
key.write("ratis".getBytes(UTF_8));
key.flush();
@@ -250,9 +252,9 @@ public void testContainerStateMachineCloseOnMissingPipeline()
for (HddsDatanodeService dn : datanodeSet) {
LambdaTestUtils.await(20000, 1000,
() -> (dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(containerID)
- .getContainerState().equals(QUASI_CLOSED)));
+ .getContainer().getContainerSet()
+ .getContainer(containerID)
+ .getContainerState().equals(QUASI_CLOSED)));
}
key.close();
}
@@ -260,27 +262,29 @@ public void testContainerStateMachineCloseOnMissingPipeline()
@Test
public void testContainerStateMachineFailures() throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
key.flush();
key.write(testData);
KeyOutputStream groupOutputStream =
- (KeyOutputStream) key.getOutputStream();
+ (KeyOutputStream) key.getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
+ groupOutputStream.getLocationInfoList();
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
// delete the container dir
FileUtil.fullyDelete(new File(dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID()).
- getContainerData().getContainerPath()));
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID()).
+ getContainerData().getContainerPath()));
try {
// there is only 1 datanode in the pipeline, the pipeline will be closed
// and allocation to new pipeline will fail as there is no other dn in
@@ -291,24 +295,22 @@ public void testContainerStateMachineFailures() throws Exception {
long containerID = omKeyLocationInfo.getContainerID();
// Make sure the container is marked unhealthy
- assertTrue(
- dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
+ assertSame(dn.getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(containerID)
+ .getContainerState(), UNHEALTHY);
OzoneContainer ozoneContainer;
// restart the hdds datanode, container should not in the regular set
OzoneConfiguration config = dn.getConf();
final String dir = config.get(OzoneConfigKeys.
- DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
- + UUID.randomUUID();
+ DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ + UUID.randomUUID();
config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
ozoneContainer = cluster.getHddsDatanodes().get(index)
- .getDatanodeStateMachine().getContainer();
+ .getDatanodeStateMachine().getContainer();
assertNull(ozoneContainer.getContainerSet().
getContainer(containerID));
}
@@ -316,29 +318,31 @@ public void testContainerStateMachineFailures() throws Exception {
@Test
public void testUnhealthyContainer() throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key
- .getOutputStream();
+ .getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
+ groupOutputStream.getLocationInfoList();
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
ContainerData containerData =
- dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
+ dn.getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
- (KeyValueContainerData) containerData;
+ (KeyValueContainerData) containerData;
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath()));
try {
@@ -352,23 +356,21 @@ public void testUnhealthyContainer() throws Exception {
long containerID = omKeyLocationInfo.getContainerID();
// Make sure the container is marked unhealthy
- assertTrue(
- dn.getDatanodeStateMachine()
- .getContainer().getContainerSet().getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
+ assertSame(dn.getDatanodeStateMachine()
+ .getContainer().getContainerSet().getContainer(containerID)
+ .getContainerState(), UNHEALTHY);
// Check metadata in the .container file
File containerFile = new File(keyValueContainerData.getMetadataPath(),
- containerID + OzoneConsts.CONTAINER_EXTENSION);
+ containerID + OzoneConsts.CONTAINER_EXTENSION);
keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
- .readContainerFile(containerFile);
- assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
+ .readContainerFile(containerFile);
+ assertEquals(keyValueContainerData.getState(), UNHEALTHY);
OzoneConfiguration config = dn.getConf();
final String dir = config.get(OzoneConfigKeys.
- DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
- + UUID.randomUUID();
+ DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ + UUID.randomUUID();
config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
// restart the hdds datanode and see if the container is listed in the
@@ -376,21 +378,21 @@ public void testUnhealthyContainer() throws Exception {
cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
// make sure the container state is still marked unhealthy after restart
keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
- .readContainerFile(containerFile);
- assertThat(keyValueContainerData.getState(), is(UNHEALTHY));
+ .readContainerFile(containerFile);
+ assertEquals(keyValueContainerData.getState(), UNHEALTHY);
OzoneContainer ozoneContainer;
HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index);
ozoneContainer = dnService
- .getDatanodeStateMachine().getContainer();
+ .getDatanodeStateMachine().getContainer();
HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer
- .getDispatcher();
+ .getDispatcher();
ContainerProtos.ContainerCommandRequestProto.Builder request =
- ContainerProtos.ContainerCommandRequestProto.newBuilder();
+ ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(
- ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
+ ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
request.setDatanodeUuid(dnService.getDatanodeDetails().getUuidString());
assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY,
dispatcher.dispatch(request.build(), null)
@@ -401,35 +403,37 @@ public void testUnhealthyContainer() throws Exception {
@Flaky("HDDS-6935")
public void testApplyTransactionFailure() throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.
- getOutputStream();
+ getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
+ groupOutputStream.getLocationInfoList();
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
ContainerData containerData = dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
- (KeyValueContainerData) containerData;
+ (KeyValueContainerData) containerData;
key.close();
ContainerStateMachine stateMachine =
(ContainerStateMachine) TestHelper.getStateMachine(cluster.
getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage =
- (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
+ (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
stateMachine.takeSnapshot();
final FileInfo snapshot = getSnapshotFileInfo(storage);
final Path parentPath = snapshot.getPath();
@@ -441,16 +445,16 @@ public void testApplyTransactionFailure() throws Exception {
// delete the container db file
FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
Pipeline pipeline = cluster.getStorageContainerLocationClient()
- .getContainerWithPipeline(containerID).getPipeline();
+ .getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
+ xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request =
- ContainerProtos.ContainerCommandRequestProto.newBuilder();
+ ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(
- ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
+ ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
// close container transaction will fail over Ratis and will initiate
// a pipeline close action
@@ -463,10 +467,9 @@ public void testApplyTransactionFailure() throws Exception {
xceiverClientManager.releaseClient(xceiverClient, false);
}
// Make sure the container is marked unhealthy
- assertTrue(dn.getDatanodeStateMachine()
- .getContainer().getContainerSet().getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
+ assertSame(dn.getDatanodeStateMachine()
+ .getContainer().getContainerSet().getContainer(containerID)
+ .getContainerState(), UNHEALTHY);
try {
// try to take a new snapshot, ideally it should just fail
stateMachine.takeSnapshot();
@@ -478,12 +481,12 @@ public void testApplyTransactionFailure() throws Exception {
// Make sure the latest snapshot is same as the previous one
try {
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertEquals(snapshot.getPath(), latestSnapshot.getPath());
} catch (Throwable e) {
assertFalse(snapshot.getPath().toFile().exists());
}
}
-
+
// when remove pipeline, group dir including snapshot will be deleted
LambdaTestUtils.await(10000, 500,
() -> (!snapshot.getPath().toFile().exists()));
@@ -492,33 +495,35 @@ public void testApplyTransactionFailure() throws Exception {
@Test
@Flaky("HDDS-6115")
public void testApplyTransactionIdempotencyWithClosedContainer()
- throws Exception {
+ throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
+ groupOutputStream.getLocationInfoList();
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
ContainerData containerData = dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine =
- (ContainerStateMachine) TestHelper.getStateMachine(dn,
- omKeyLocationInfo.getPipeline());
+ (ContainerStateMachine) TestHelper.getStateMachine(dn,
+ omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage =
- (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
+ (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
final FileInfo snapshot = getSnapshotFileInfo(storage);
final Path parentPath = snapshot.getPath();
stateMachine.takeSnapshot();
@@ -528,27 +533,27 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
.getIndex();
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerLocationClient()
- .getContainerWithPipeline(containerID).getPipeline();
+ .getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
+ xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request =
- ContainerProtos.ContainerCommandRequestProto.newBuilder();
+ ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(
- ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
+ ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
try {
xceiverClient.sendCommand(request.build());
} catch (IOException e) {
fail("Exception should not be thrown");
}
- assertTrue(
- TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
- .getDatanodeStateMachine()
- .getContainer().getContainerSet().getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.CLOSED);
+ assertSame(
+ TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
+ .getDatanodeStateMachine()
+ .getContainer().getContainerSet().getContainer(containerID)
+ .getContainerState(),
+ ContainerProtos.ContainerDataProto.State.CLOSED);
assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
@@ -570,7 +575,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
}
}), 1000, 30000);
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertNotEquals(snapshot.getPath(), latestSnapshot.getPath());
}
// The test injects multiple write chunk requests along with closed container
@@ -581,35 +586,37 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
// closed here.
@Test
public void testWriteStateMachineDataIdempotencyWithClosedContainer()
- throws Exception {
+ throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis-1", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis-1", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key
- .getOutputStream();
+ .getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
+ groupOutputStream.getLocationInfoList();
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
ContainerData containerData =
- dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
+ dn.getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
assertTrue(containerData instanceof KeyValueContainerData);
key.close();
ContainerStateMachine stateMachine =
- (ContainerStateMachine) TestHelper.getStateMachine(dn,
- omKeyLocationInfo.getPipeline());
+ (ContainerStateMachine) TestHelper.getStateMachine(dn,
+ omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage =
- (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
+ (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
final FileInfo snapshot = getSnapshotFileInfo(storage);
final Path parentPath = snapshot.getPath();
stateMachine.takeSnapshot();
@@ -619,22 +626,22 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
assertNotNull(snapshot);
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerLocationClient()
- .getContainerWithPipeline(containerID).getPipeline();
+ .getContainerWithPipeline(containerID).getPipeline();
XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
+ xceiverClientManager.acquireClient(pipeline);
CountDownLatch latch = new CountDownLatch(100);
int count = 0;
AtomicInteger failCount = new AtomicInteger(0);
Runnable r1 = () -> {
try {
ContainerProtos.ContainerCommandRequestProto.Builder request =
- ContainerProtos.ContainerCommandRequestProto.newBuilder();
+ ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(
- ContainerProtos.CloseContainerRequestProto.
- getDefaultInstance());
+ ContainerProtos.CloseContainerRequestProto.
+ getDefaultInstance());
xceiverClient.sendCommand(request.build());
} catch (IOException e) {
failCount.incrementAndGet();
@@ -647,13 +654,13 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
ContainerTestHelper.newWriteChunkRequestBuilder(pipeline,
omKeyLocationInfo.getBlockID(), data.size());
writeChunkRequest.setWriteChunk(writeChunkRequest.getWriteChunkBuilder()
- .setData(data));
+ .setData(data));
xceiverClient.sendCommand(writeChunkRequest.build());
latch.countDown();
} catch (IOException e) {
latch.countDown();
if (!(HddsClientUtils
- .checkForException(e) instanceof ContainerNotOpenException)) {
+ .checkForException(e) instanceof ContainerNotOpenException)) {
failCount.incrementAndGet();
}
String message = e.getMessage();
@@ -682,14 +689,16 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
}
if (failCount.get() > 0) {
- fail("testWriteStateMachineDataIdempotencyWithClosedContainer failed");
+ fail(
+ "testWriteStateMachineDataIdempotencyWithClosedContainer " +
+ "failed");
}
- assertTrue(
+ assertSame(
TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.CLOSED);
+ .getContainerState(),
+ ContainerProtos.ContainerDataProto.State.CLOSED);
assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
@@ -698,7 +707,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
}
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
- assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
+ assertNotEquals(snapshot.getPath(), latestSnapshot.getPath());
r2.run();
} finally {
@@ -711,8 +720,9 @@ public void testContainerStateMachineSingleFailureRetry()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis1", 1024, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey("ratis1", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
key.write("ratis".getBytes(UTF_8));
key.flush();
@@ -746,8 +756,9 @@ public void testContainerStateMachineDualFailureRetry()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis2", 1024, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey("ratis2", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
key.write("ratis".getBytes(UTF_8));
key.flush();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index c24f209cdeb..fafba729e0d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -38,14 +39,11 @@
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.File;
import java.io.IOException;
@@ -63,14 +61,8 @@
/**
* Tests the containerStateMachine failure handling by set flush delay.
*/
+@Timeout(300)
public class TestContainerStateMachineFlushDelay {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
@@ -89,7 +81,7 @@ public class TestContainerStateMachineFlushDelay {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void setup() throws Exception {
chunkSize = 100;
flushSize = 2 * chunkSize;
@@ -140,7 +132,7 @@ public void setup() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -152,14 +144,15 @@ public void shutdown() {
public void testContainerStateMachineFailures() throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// Now ozone.client.stream.buffer.flush.delay is currently enabled
// by default. Here we written data(length 110) greater than chunk
// Size(length 100), make sure flush will sync data.
byte[] data =
ContainerTestHelper.getFixedLengthString(keyString, 110)
- .getBytes(UTF_8);
+ .getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(data);
key.flush();
@@ -171,7 +164,7 @@ public void testContainerStateMachineFailures() throws Exception {
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ Assertions.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
// delete the container dir
@@ -183,12 +176,12 @@ public void testContainerStateMachineFailures() throws Exception {
key.close();
// Make sure the container is marked unhealthy
- Assert.assertTrue(
+ Assertions.assertSame(
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
+ .getContainerState(),
+ ContainerProtos.ContainerDataProto.State.UNHEALTHY);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index aa755bf6939..ccb3fc992cd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -36,14 +36,11 @@
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -60,14 +57,8 @@
/**
* Tests the containerStateMachine stream handling.
*/
+@Timeout(300)
public class TestContainerStateMachineStream {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
@@ -85,7 +76,7 @@ public class TestContainerStateMachineStream {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void setup() throws Exception {
conf = new OzoneConfiguration();
@@ -150,7 +141,7 @@ public void setup() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -184,9 +175,9 @@ public void testContainerStateMachineForStreaming() throws Exception {
long bytesUsed = dn.getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID()).
- getContainerData().getBytesUsed();
+ getContainerData().getBytesUsed();
- Assert.assertTrue(bytesUsed == size);
+ Assertions.assertEquals(bytesUsed, size);
}
@@ -215,9 +206,9 @@ public void testContainerStateMachineForStreamingSmallFile()
long bytesUsed = dn.getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID()).
- getContainerData().getBytesUsed();
+ getContainerData().getBytesUsed();
- Assert.assertTrue(bytesUsed == size);
+ Assertions.assertEquals(bytesUsed, size);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 131ce705539..c9b1f7c1705 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
@@ -68,11 +69,11 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Tests delete key operation with inadequate datanodes.
@@ -94,7 +95,7 @@ public class TestDeleteWithInAdequateDN {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
path = GenericTestUtils
@@ -111,7 +112,7 @@ public static void init() throws Exception {
conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000,
- TimeUnit.SECONDS);
+ TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000,
TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
@@ -133,7 +134,7 @@ public static void init() throws Exception {
conf.setFromObject(raftClientConfig);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
- 1, TimeUnit.SECONDS);
+ 1, TimeUnit.SECONDS);
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1));
@@ -153,11 +154,11 @@ public static void init() throws Exception {
conf.setQuietMode(false);
int numOfDatanodes = 3;
cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(numOfDatanodes)
- .setTotalPipelineNumLimit(
- numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT)
- .setHbInterval(100)
- .build();
+ .setNumDatanodes(numOfDatanodes)
+ .setTotalPipelineNumLimit(
+ numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT)
+ .setHbInterval(100)
+ .build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(THREE, 60000);
//the easiest way to create an open container is creating a key
@@ -173,7 +174,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (xceiverClientManager != null) {
@@ -199,8 +200,9 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
String keyName = "ratis";
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(keyName, 0, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey(keyName, 0,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
@@ -209,8 +211,9 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assume.assumeTrue("Expected exactly a single location, but got: " +
- locationInfoList.size(), 1 == locationInfoList.size());
+ Assumptions.assumeTrue(1 == locationInfoList.size(),
+ "Expected exactly a single location, but got: " +
+ locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
// A container is created on the datanode. Now figure out a follower node to
@@ -221,7 +224,7 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
List pipelineList =
cluster.getStorageContainerManager().getPipelineManager()
.getPipelines(RatisReplicationConfig.getInstance(THREE));
- Assume.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT);
+ Assumptions.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT);
Pipeline pipeline = pipelineList.get(0);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
if (RatisTestHelper.isRatisFollower(dn, pipeline)) {
@@ -230,9 +233,10 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
leader = dn;
}
}
- Assume.assumeNotNull(follower, leader);
+ Assertions.assertNotNull(follower);
+ Assertions.assertNotNull(leader);
//ensure that the chosen follower is still a follower
- Assume.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline));
+ Assumptions.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline));
// shutdown the follower node
cluster.shutdownHddsDatanode(follower.getDatanodeDetails());
key.write(testData);
@@ -277,7 +281,7 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
keyValueHandler.getBlockManager().getBlock(container, blockID);
//cluster.getOzoneManager().deleteKey(keyArgs);
client.getObjectStore().getVolume(volumeName).getBucket(bucketName).
- deleteKey("ratis");
+ deleteKey("ratis");
// make sure the chunk was never deleted on the leader even though
// deleteBlock handler is invoked
try {
@@ -287,12 +291,12 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
null);
}
} catch (IOException ioe) {
- Assert.fail("Exception should not be thrown.");
+ Assertions.fail("Exception should not be thrown.");
}
long numReadStateMachineOps =
stateMachine.getMetrics().getNumReadStateMachineOps();
- Assert.assertTrue(
- stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
+ Assertions.assertEquals(0,
+ stateMachine.getMetrics().getNumReadStateMachineFails());
stateMachine.evictStateMachineCache();
cluster.restartHddsDatanode(follower.getDatanodeDetails(), false);
// wait for the raft server to come up and join the ratis ring
@@ -300,10 +304,10 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
// Make sure the readStateMachine call got triggered after the follower
// caught up
- Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps()
+ Assertions.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps()
> numReadStateMachineOps);
- Assert.assertTrue(
- stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
+ Assertions.assertEquals(0,
+ stateMachine.getMetrics().getNumReadStateMachineFails());
// wait for the chunk to get deleted now
Thread.sleep(10000);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
@@ -317,11 +321,11 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
keyValueHandler.getChunkManager().readChunk(container, blockID,
ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
- Assert.fail("Expected exception is not thrown");
+ Assertions.fail("Expected exception is not thrown");
} catch (IOException ioe) {
- Assert.assertTrue(ioe instanceof StorageContainerException);
- Assert.assertTrue(((StorageContainerException) ioe).getResult()
- == ContainerProtos.Result.UNABLE_TO_FIND_CHUNK);
+ Assertions.assertTrue(ioe instanceof StorageContainerException);
+ Assertions.assertSame(((StorageContainerException) ioe).getResult(),
+ ContainerProtos.Result.UNABLE_TO_FIND_CHUNK);
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index 75089b3f55c..550c1841b3f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -49,26 +49,17 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
/**
* Tests Close Container Exception handling by Ozone Client.
*/
+@Timeout(300)
public class TestDiscardPreallocatedBlocks {
-
- /**
- * Set a timeout for each test.
- */
-
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf = new OzoneConfiguration();
private static OzoneClient client;
@@ -87,7 +78,7 @@ public class TestDiscardPreallocatedBlocks {
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
@@ -119,10 +110,10 @@ private String getKeyName() {
}
/**
- * Shutdown MiniDFSCluster.
- */
+ * Shutdown MiniDFSCluster.
+ */
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -137,14 +128,14 @@ public void testDiscardPreallocatedBlocks() throws Exception {
createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
- Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
long containerID1 = keyOutputStream.getStreamEntries().get(0)
- .getBlockID().getContainerID();
+ .getBlockID().getContainerID();
long containerID2 = keyOutputStream.getStreamEntries().get(1)
- .getBlockID().getContainerID();
- Assert.assertEquals(containerID1, containerID2);
+ .getBlockID().getContainerID();
+ Assertions.assertEquals(containerID1, containerID2);
String dataString =
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
@@ -161,28 +152,27 @@ public void testDiscardPreallocatedBlocks() throws Exception {
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
List datanodes = pipeline.getNodes();
- Assert.assertEquals(3, datanodes.size());
+ Assertions.assertEquals(3, datanodes.size());
waitForContainerClose(key);
dataString =
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
data = dataString.getBytes(UTF_8);
key.write(data);
- Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
+ Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size());
// the 1st block got written. Now all the containers are closed, so the 2nd
// pre allocated block will be removed from the list and new block should
// have been allocated
- Assert.assertTrue(
- keyOutputStream.getLocationInfoList().get(0).getBlockID()
- .equals(locationInfos.get(0).getBlockID()));
- Assert.assertFalse(
- locationStreamInfos.get(1).getBlockID()
- .equals(keyOutputStream.getLocationInfoList().get(1).getBlockID()));
+ Assertions.assertEquals(
+ keyOutputStream.getLocationInfoList().get(0).getBlockID(),
+ locationInfos.get(0).getBlockID());
+ Assertions.assertNotEquals(locationStreamInfos.get(1).getBlockID(),
+ keyOutputStream.getLocationInfoList().get(1).getBlockID());
key.close();
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper
.createKey(keyName, type, size, objectStore, volumeName, bucketName);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
index dc5622e1e8b..c5147ecfb01 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
@@ -17,14 +17,14 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
/**
* Tests key output stream without zero-copy enabled.
*/
public class TestECKeyOutputStream extends
AbstractTestECKeyOutputStream {
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
init(false);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java
index b9baeb2437f..47c94e03cb2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java
@@ -17,14 +17,14 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
/**
* Tests key output stream with zero-copy enabled.
*/
public class TestECKeyOutputStreamWithZeroCopy extends
AbstractTestECKeyOutputStream {
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
init(true);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index c9183400032..3d10661f69c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -47,13 +47,10 @@
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.time.Duration;
@@ -69,14 +66,9 @@
/**
* Tests Exception handling by Ozone Client by set flush delay.
*/
+@Timeout(300)
public class TestFailureHandlingByClientFlushDelay {
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf;
private OzoneClient client;
@@ -132,7 +124,7 @@ private void init() throws Exception {
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
- Collections.singleton(HddsUtils.getHostName(conf))).get(0),
+ Collections.singleton(HddsUtils.getHostName(conf))).get(0),
"/rack1");
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(10)
@@ -160,7 +152,7 @@ private void startCluster() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -178,14 +170,14 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
- Assert.assertEquals(1, streamEntryList.size());
+ Assertions.assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -205,11 +197,11 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
key.write(data.getBytes(UTF_8));
key.flush();
- Assert.assertTrue(
+ Assertions.assertTrue(
keyOutputStream.getExcludeList().getContainerIds().isEmpty());
- Assert.assertTrue(
+ Assertions.assertTrue(
keyOutputStream.getExcludeList().getDatanodes().isEmpty());
- Assert.assertTrue(
+ Assertions.assertTrue(
keyOutputStream.getExcludeList().getDatanodes().isEmpty());
key.write(data.getBytes(UTF_8));
// The close will just write to the buffer
@@ -225,15 +217,15 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assert.assertNotEquals(
+ Assertions.assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
+ Assertions.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper
.createKey(keyName, type, size, objectStore, volumeName, bucketName);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
index e0e95239b0e..8b39e994b05 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.client.rpc;
-
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -37,33 +37,23 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import static java.nio.charset.StandardCharsets.UTF_8;
-import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.HashMap;
-import org.junit.Rule;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
/**
* Tests Hybrid Pipeline Creation and IO on same set of Datanodes.
*/
+@Timeout(300)
public class TestHybridPipelineOnDatanode {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf;
private static OzoneClient client;
@@ -76,7 +66,7 @@ public class TestHybridPipelineOnDatanode {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3)
@@ -90,7 +80,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -117,8 +107,9 @@ public void testHybridPipelineOnDatanode() throws IOException {
// Write data into a key
OzoneOutputStream out = bucket
- .createKey(keyName1, data.length, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey(keyName1, data.length,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -126,8 +117,9 @@ public void testHybridPipelineOnDatanode() throws IOException {
// Write data into a key
out = bucket
- .createKey(keyName2, data.length, ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
+ .createKey(keyName2, data.length,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -151,17 +143,18 @@ public void testHybridPipelineOnDatanode() throws IOException {
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(pipelineID1);
List dns = pipeline1.getNodes();
- Assert.assertTrue(dns.size() == 1);
+ Assertions.assertEquals(1, dns.size());
Pipeline pipeline2 =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(pipelineID2);
- Assert.assertNotEquals(pipeline1, pipeline2);
- Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS);
- Assert.assertTrue(pipeline1.getType() == pipeline2.getType());
+ Assertions.assertNotEquals(pipeline1, pipeline2);
+ Assertions.assertSame(pipeline1.getType(),
+ HddsProtos.ReplicationType.RATIS);
+ Assertions.assertSame(pipeline1.getType(), pipeline2.getType());
// assert that the pipeline Id1 and pipelineId2 are on the same node
// but different replication factor
- Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
+ Assertions.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
byte[] b1 = new byte[data.length];
byte[] b2 = new byte[data.length];
// now try to read both the keys
@@ -173,8 +166,8 @@ public void testHybridPipelineOnDatanode() throws IOException {
is = bucket.readKey(keyName2);
is.read(b2);
is.close();
- Assert.assertTrue(Arrays.equals(b1, data));
- Assert.assertTrue(Arrays.equals(b1, b2));
+ Assertions.assertArrayEquals(b1, data);
+ Assertions.assertArrayEquals(b1, b2);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index d7600fa2a85..8c8b0a269a8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -42,9 +42,10 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.time.Duration;
@@ -52,11 +53,6 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.junit.Rule;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
-
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
@@ -64,14 +60,8 @@
/**
* Tests MultiBlock Writes with Dn failures by Ozone Client.
*/
+@Timeout(300)
public class TestMultiBlockWritesWithDnFailures {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf;
private OzoneClient client;
@@ -137,7 +127,7 @@ private void startCluster(int datanodes) throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -156,12 +146,12 @@ public void testMultiBlockWritesWithDnFailures() throws Exception {
key.write(data.getBytes(UTF_8));
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream groupOutputStream =
(KeyOutputStream) key.getOutputStream();
List locationInfoList =
groupOutputStream.getLocationInfoList();
- Assert.assertTrue(locationInfoList.size() == 2);
+ Assertions.assertEquals(2, locationInfoList.size());
long containerId = locationInfoList.get(1).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager()
@@ -185,7 +175,7 @@ public void testMultiBlockWritesWithDnFailures() throws Exception {
.setKeyName(keyName)
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
+ Assertions.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).getBytes(UTF_8));
}
@@ -201,14 +191,14 @@ public void testMultiBlockWritesWithIntermittentDnFailures()
key.write(data.getBytes(UTF_8));
// get the name of a valid container
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 6 block will be preallocated
- Assert.assertEquals(6, streamEntryList.size());
+ Assertions.assertEquals(6, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -237,13 +227,13 @@ public void testMultiBlockWritesWithIntermittentDnFailures()
.setKeyName(keyName)
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assert.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
+ Assertions.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName,
data.concat(data).concat(data).concat(data).getBytes(UTF_8));
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper
.createKey(keyName, type, size, objectStore, volumeName, bucketName);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index cc7864a3b53..6eaf051ba45 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -61,15 +61,12 @@
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.nio.file.Path;
@@ -86,13 +83,14 @@
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* This test verifies all the S3 multipart client apis - prefix layout.
*/
+@Timeout(300)
public class TestOzoneClientMultipartUploadWithFSO {
private static ObjectStore store = null;
@@ -100,12 +98,6 @@ public class TestOzoneClientMultipartUploadWithFSO {
private static OzoneClient ozClient = null;
private static String scmId = UUID.randomUUID().toString();
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(new Timeout(300000));
private String volumeName;
private String bucketName;
private String keyName;
@@ -119,7 +111,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
OMRequestTestUtils.configureFSOptimizedPaths(conf, true);
@@ -129,7 +121,7 @@ public static void init() throws Exception {
/**
* Close OzoneClient and shutdown MiniOzoneCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() throws IOException {
shutdownCluster();
}
@@ -142,10 +134,10 @@ public static void shutdown() throws IOException {
*/
static void startCluster(OzoneConfiguration conf) throws Exception {
cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(10)
- .setScmId(scmId)
- .build();
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(10)
+ .setScmId(scmId)
+ .build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
store = ozClient.getObjectStore();
@@ -163,8 +155,8 @@ static void shutdownCluster() throws IOException {
cluster.shutdown();
}
}
-
- @Before
+
+ @BeforeEach
public void preTest() throws Exception {
volumeName = UUID.randomUUID().toString();
bucketName = UUID.randomUUID().toString();
@@ -178,7 +170,7 @@ public void preTest() throws Exception {
@Test
public void testInitiateMultipartUploadWithReplicationInformationSet() throws
- IOException {
+ IOException {
String uploadID = initiateMultipartUpload(bucket, keyName,
ReplicationType.RATIS, ONE);
@@ -186,31 +178,31 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws
// generate a new uploadID.
String uploadIDNew = initiateMultipartUpload(bucket, keyName,
ReplicationType.RATIS, ONE);
- Assert.assertNotEquals(uploadIDNew, uploadID);
+ Assertions.assertNotEquals(uploadIDNew, uploadID);
}
@Test
public void testInitiateMultipartUploadWithDefaultReplication() throws
- IOException {
+ IOException {
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
- Assert.assertNotNull(multipartInfo);
+ Assertions.assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
- Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
- Assert.assertEquals(bucketName, multipartInfo.getBucketName());
- Assert.assertEquals(keyName, multipartInfo.getKeyName());
- Assert.assertNotNull(multipartInfo.getUploadID());
+ Assertions.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assertions.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assertions.assertEquals(keyName, multipartInfo.getKeyName());
+ Assertions.assertNotNull(multipartInfo.getUploadID());
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName);
- Assert.assertNotNull(multipartInfo);
- Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
- Assert.assertEquals(bucketName, multipartInfo.getBucketName());
- Assert.assertEquals(keyName, multipartInfo.getKeyName());
- Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
- Assert.assertNotNull(multipartInfo.getUploadID());
+ Assertions.assertNotNull(multipartInfo);
+ Assertions.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assertions.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assertions.assertEquals(keyName, multipartInfo.getKeyName());
+ Assertions.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+ Assertions.assertNotNull(multipartInfo.getUploadID());
}
@Test
@@ -220,15 +212,15 @@ public void testUploadPartWithNoOverride() throws IOException {
ReplicationType.RATIS, ONE);
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
- sampleData.length(), 1, uploadID);
+ sampleData.length(), 1, uploadID);
ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
- .getCommitUploadPartInfo();
+ .getCommitUploadPartInfo();
- Assert.assertNotNull(commitUploadPartInfo);
- Assert.assertNotNull(commitUploadPartInfo.getPartName());
+ Assertions.assertNotNull(commitUploadPartInfo);
+ Assertions.assertNotNull(commitUploadPartInfo.getPartName());
}
@Test
@@ -253,13 +245,12 @@ public void testUploadPartOverrideWithRatis() throws Exception {
// So, when a part is override partNames will still be same irrespective
// of content in ozone s3. This will make S3 Mpu completeMPU pass when
// comparing part names and large file uploads work using aws cp.
- Assert.assertEquals("Part names should be same", partName,
- partNameNew);
+ Assertions.assertEquals(partName, partNameNew, "Part names should be same");
// old part bytes written needs discard and have only
// new part bytes in quota for this bucket
long byteWritten = "name".length() * 3; // data written with replication
- Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
byteWritten);
}
@@ -277,14 +268,14 @@ public void testUploadTwiceWithEC() throws IOException {
String partName = uploadPart(bucket, keyName, uploadID, partNumber,
data);
-
+
Map partsMap = new HashMap<>();
partsMap.put(partNumber, partName);
bucket.completeMultipartUpload(keyName, uploadID, partsMap);
long replicatedSize = QuotaUtil.getReplicatedSize(data.length,
bucket.getReplicationConfig());
- Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
//upload same key again
@@ -299,7 +290,7 @@ public void testUploadTwiceWithEC() throws IOException {
bucket.completeMultipartUpload(keyName, uploadID, partsMap);
// used sized should remain same, overwrite previous upload
- Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
}
@@ -316,16 +307,16 @@ public void testUploadAbortWithEC() throws IOException {
String uploadID = multipartInfo.getUploadID();
int partNumber = 1;
uploadPart(bucket, keyName, uploadID, partNumber, data);
-
+
long replicatedSize = QuotaUtil.getReplicatedSize(data.length,
bucket.getReplicationConfig());
- Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
bucket.abortMultipartUpload(keyName, uploadID);
// used size should become zero after aport upload
- Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0);
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0);
}
private OzoneBucket getOzoneECBucket(String myBucket)
@@ -339,22 +330,22 @@ private OzoneBucket getOzoneECBucket(String myBucket)
volume.createBucket(myBucket, bucketArgs.build());
return volume.getBucket(myBucket);
}
-
+
@Test
public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
// Initiate multipart upload
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ONE);
// Upload Parts
Map partsMap = new TreeMap<>();
// Uploading part 1 with less than min size
String partName = uploadPart(bucket, keyName, uploadID, 1,
- "data".getBytes(UTF_8));
+ "data".getBytes(UTF_8));
partsMap.put(1, partName);
partName = uploadPart(bucket, keyName, uploadID, 2,
- "data".getBytes(UTF_8));
+ "data".getBytes(UTF_8));
partsMap.put(2, partName);
// Complete multipart upload
@@ -389,14 +380,14 @@ public void testMultipartUploadWithDiscardedUnusedPartSize()
// the unused part size should be discarded from the bucket size,
// 30000000 - 10000000 = 20000000
long bucketSize = volume.getBucket(bucketName).getUsedBytes();
- Assert.assertEquals(bucketSize, data.length * 2);
+ Assertions.assertEquals(bucketSize, data.length * 2);
}
@Test
public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
- throws Exception {
+ throws Exception {
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ONE);
// We have not uploaded any parts, but passing some list it should throw
// error.
@@ -409,9 +400,9 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
@Test
public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
- throws Exception {
+ throws Exception {
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -426,7 +417,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
@Test
public void testMultipartUploadWithMissingParts() throws Exception {
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -461,35 +452,35 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception {
@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
String parentDir = "a/b/c/d/";
- keyName = parentDir + UUID.randomUUID().toString();
+ keyName = parentDir + UUID.randomUUID();
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE);
- Assert.assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4);
+ Assertions.assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4);
// upload part 1.
byte[] data = generateData(5 * 1024 * 1024,
- (byte) RandomUtils.nextLong());
+ (byte) RandomUtils.nextLong());
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
- data.length, 1, uploadID);
+ data.length, 1, uploadID);
ozoneOutputStream.write(data, 0, data.length);
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
- ozoneOutputStream.getCommitUploadPartInfo();
+ ozoneOutputStream.getCommitUploadPartInfo();
// Do not close output stream for part 2.
ozoneOutputStream = bucket.createMultipartKey(keyName,
- data.length, 2, uploadID);
+ data.length, 2, uploadID);
ozoneOutputStream.write(data, 0, data.length);
Map partsMap = new LinkedHashMap<>();
partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
- bucket.completeMultipartUpload(keyName,
- uploadID, partsMap);
- Assert.assertNotNull(omMultipartUploadCompleteInfo);
+ bucket.completeMultipartUpload(keyName,
+ uploadID, partsMap);
+ Assertions.assertNotNull(omMultipartUploadCompleteInfo);
- Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+ Assertions.assertNotNull(omMultipartCommitUploadPartInfo);
byte[] fileContent = new byte[data.length];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
@@ -500,15 +491,15 @@ public void testCommitPartAfterCompleteUpload() throws Exception {
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
sb.append(part1);
- Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
+ Assertions.assertEquals(sb.toString(), new String(fileContent, UTF_8));
try {
ozoneOutputStream.close();
- Assert.fail("testCommitPartAfterCompleteUpload failed");
+ Assertions.fail("testCommitPartAfterCompleteUpload failed");
} catch (IOException ex) {
- Assert.assertTrue(ex instanceof OMException);
- Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
- ((OMException) ex).getResult());
+ Assertions.assertTrue(ex instanceof OMException);
+ Assertions.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+ ((OMException) ex).getResult());
}
}
@@ -521,7 +512,7 @@ public void testAbortUploadFail() throws Exception {
@Test
public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
String parentDir = "a/b/c/d/";
- keyName = parentDir + UUID.randomUUID().toString();
+ keyName = parentDir + UUID.randomUUID();
String uploadID = initiateMultipartUpload(bucket, keyName,
RATIS, ONE);
@@ -548,7 +539,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
@Test
public void testAbortUploadSuccessWithOutAnyParts() throws Exception {
String parentDir = "a/b/c/d/";
- keyName = parentDir + UUID.randomUUID().toString();
+ keyName = parentDir + UUID.randomUUID();
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
@@ -558,7 +549,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception {
@Test
public void testAbortUploadSuccessWithParts() throws Exception {
String parentDir = "a/b/c/d/";
- keyName = parentDir + UUID.randomUUID().toString();
+ keyName = parentDir + UUID.randomUUID();
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager()
@@ -585,8 +576,8 @@ public void testAbortUploadSuccessWithParts() throws Exception {
metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assert.assertNull(omKeyInfo);
- Assert.assertNull(omMultipartKeyInfo);
+ Assertions.assertNull(omKeyInfo);
+ Assertions.assertNull(omMultipartKeyInfo);
// Since deleteTable operation is performed via
// batchOp - Table.putWithBatch(), which is an async operation and
@@ -616,22 +607,22 @@ public void testListMultipartUploadParts() throws Exception {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assert.assertEquals(
+ Assertions.assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
- Assert.assertEquals(3,
+ Assertions.assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
verifyPartNamesInDB(partsMap,
ozoneMultipartUploadPartListParts, uploadID);
- Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
private void verifyPartNamesInDB(Map partsMap,
- OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts,
- String uploadID) throws IOException {
+ OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts,
+ String uploadID) throws IOException {
List listPartNames = new ArrayList<>();
String keyPartName = verifyPartNames(partsMap, 0,
@@ -652,7 +643,7 @@ private void verifyPartNamesInDB(Map partsMap,
keyName, uploadID);
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assert.assertNotNull(omMultipartKeyInfo);
+ Assertions.assertNotNull(omMultipartKeyInfo);
for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo :
omMultipartKeyInfo.getPartKeyInfoMap()) {
@@ -663,21 +654,21 @@ private void verifyPartNamesInDB(Map partsMap,
metadataMgr.getOzoneKey(volumeName, bucketName, keyName);
// partKeyName format in DB - partKeyName + ClientID
- Assert.assertTrue("Invalid partKeyName format in DB: " + partKeyName
- + ", expected name:" + fullKeyPartName,
- partKeyName.startsWith(fullKeyPartName));
+ Assertions.assertTrue(partKeyName.startsWith(fullKeyPartName),
+ "Invalid partKeyName format in DB: " + partKeyName
+ + ", expected name:" + fullKeyPartName);
listPartNames.remove(partKeyName);
}
- Assert.assertTrue("Wrong partKeyName format in DB!",
- listPartNames.isEmpty());
+ Assertions.assertTrue(listPartNames.isEmpty(),
+ "Wrong partKeyName format in DB!");
}
private String verifyPartNames(Map partsMap, int index,
- OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) {
+ OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) {
- Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(index).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
.getPartName());
@@ -707,37 +698,37 @@ public void testListMultipartUploadPartsWithContinuation()
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assert.assertEquals(
+ Assertions.assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
- Assert.assertEquals(2,
+ Assertions.assertEquals(2,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(0).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
.getPartName());
- Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(1).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
.getPartName());
// Get remaining
- Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
+ Assertions.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID,
ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
- Assert.assertEquals(1,
+ Assertions.assertEquals(1,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(0).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
.getPartName());
// As we don't have any parts for this, we should get false here
- Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
@@ -745,7 +736,7 @@ public void testListMultipartUploadPartsWithContinuation()
public void testListPartsInvalidPartMarker() throws Exception {
try {
bucket.listParts(keyName, "random", -1, 2);
- Assert.fail("Should throw exception as partNumber is an invalid number!");
+ Assertions.fail("Should throw exception as partNumber is an invalid number!");
} catch (IllegalArgumentException ex) {
GenericTestUtils.assertExceptionContains("Should be greater than or "
+ "equal to zero", ex);
@@ -756,7 +747,7 @@ public void testListPartsInvalidPartMarker() throws Exception {
public void testListPartsInvalidMaxParts() throws Exception {
try {
bucket.listParts(keyName, "random", 1, -1);
- Assert.fail("Should throw exception as max parts is an invalid number!");
+ Assertions.fail("Should throw exception as max parts is an invalid number!");
} catch (IllegalArgumentException ex) {
GenericTestUtils.assertExceptionContains("Max Parts Should be greater "
+ "than zero", ex);
@@ -777,15 +768,15 @@ public void testListPartsWithPartMarkerGreaterThanPartCount()
// Should return empty
- Assert.assertEquals(0,
+ Assertions.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(
+ Assertions.assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
// As we don't have any parts with greater than partNumberMarker and list
// is not truncated, so it should return false here.
- Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
@@ -824,53 +815,53 @@ public void testListMultipartUpload() throws Exception {
uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8));
OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1");
- Assert.assertEquals(3, listMPUs.getUploads().size());
+ Assertions.assertEquals(3, listMPUs.getUploads().size());
List expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assert.assertEquals(0, expectedList.size());
+ Assertions.assertEquals(0, expectedList.size());
listMPUs = bucket.listMultipartUploads("dir1/dir2");
- Assert.assertEquals(2, listMPUs.getUploads().size());
+ Assertions.assertEquals(2, listMPUs.getUploads().size());
expectedList = new ArrayList<>();
expectedList.add(key2);
expectedList.add(key3);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assert.assertEquals(0, expectedList.size());
+ Assertions.assertEquals(0, expectedList.size());
listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3");
- Assert.assertEquals(1, listMPUs.getUploads().size());
+ Assertions.assertEquals(1, listMPUs.getUploads().size());
expectedList = new ArrayList<>();
expectedList.add(key3);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assert.assertEquals(0, expectedList.size());
+ Assertions.assertEquals(0, expectedList.size());
// partial key
listMPUs = bucket.listMultipartUploads("d");
- Assert.assertEquals(3, listMPUs.getUploads().size());
+ Assertions.assertEquals(3, listMPUs.getUploads().size());
expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assert.assertEquals(0, expectedList.size());
+ Assertions.assertEquals(0, expectedList.size());
// partial key
listMPUs = bucket.listMultipartUploads("");
- Assert.assertEquals(3, listMPUs.getUploads().size());
+ Assertions.assertEquals(3, listMPUs.getUploads().size());
expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assert.assertEquals(0, expectedList.size());
+ Assertions.assertEquals(0, expectedList.size());
}
private String verifyUploadedPart(String uploadID, String partName,
- OMMetadataManager metadataMgr) throws IOException {
+ OMMetadataManager metadataMgr) throws IOException {
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager()
.getBucketKey(volumeName, bucketName);
@@ -888,28 +879,28 @@ private String verifyUploadedPart(String uploadID, String partName,
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assert.assertNotNull(omKeyInfo);
- Assert.assertNotNull(omMultipartKeyInfo);
- Assert.assertEquals(OzoneFSUtils.getFileName(keyName),
+ Assertions.assertNotNull(omKeyInfo);
+ Assertions.assertNotNull(omMultipartKeyInfo);
+ Assertions.assertEquals(OzoneFSUtils.getFileName(keyName),
omKeyInfo.getKeyName());
- Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
+ Assertions.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo :
omMultipartKeyInfo.getPartKeyInfoMap()) {
OmKeyInfo currentKeyPartInfo =
OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
- Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName());
+ Assertions.assertEquals(keyName, currentKeyPartInfo.getKeyName());
// verify dbPartName
- Assert.assertEquals(partName, partKeyInfo.getPartName());
+ Assertions.assertEquals(partName, partKeyInfo.getPartName());
}
return multipartKey;
}
private String getMultipartOpenKey(String multipartUploadID,
- String volName, String buckName, String kName,
- OMMetadataManager omMetadataManager) throws IOException {
+ String volName, String buckName, String kName,
+ OMMetadataManager omMetadataManager) throws IOException {
String fileName = OzoneFSUtils.getFileName(kName);
final long volumeId = omMetadataManager.getVolumeId(volName);
@@ -919,13 +910,13 @@ private String getMultipartOpenKey(String multipartUploadID,
omMetadataManager);
String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId,
- parentID, fileName, multipartUploadID);
+ parentID, fileName, multipartUploadID);
return multipartKey;
}
private long getParentID(String volName, String buckName,
- String kName, OMMetadataManager omMetadataManager) throws IOException {
+ String kName, OMMetadataManager omMetadataManager) throws IOException {
Iterator pathComponents = Paths.get(kName).iterator();
final long volumeId = omMetadataManager.getVolumeId(volName);
final long bucketId = omMetadataManager.getBucketId(volName,
@@ -935,17 +926,17 @@ private long getParentID(String volName, String buckName,
}
private String initiateMultipartUpload(OzoneBucket oBucket, String kName,
- ReplicationType replicationType, ReplicationFactor replicationFactor)
- throws IOException {
+ ReplicationType replicationType, ReplicationFactor replicationFactor)
+ throws IOException {
OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName,
- replicationType, replicationFactor);
+ replicationType, replicationFactor);
- Assert.assertNotNull(multipartInfo);
+ Assertions.assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
- Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
- Assert.assertEquals(bucketName, multipartInfo.getBucketName());
- Assert.assertEquals(kName, multipartInfo.getKeyName());
- Assert.assertNotNull(multipartInfo.getUploadID());
+ Assertions.assertEquals(volumeName, multipartInfo.getVolumeName());
+ Assertions.assertEquals(bucketName, multipartInfo.getBucketName());
+ Assertions.assertEquals(kName, multipartInfo.getKeyName());
+ Assertions.assertNotNull(multipartInfo.getUploadID());
return uploadID;
}
@@ -954,32 +945,32 @@ private String uploadPart(OzoneBucket oBucket, String kName, String
uploadID, int partNumber, byte[] data) throws IOException {
OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName,
- data.length, partNumber, uploadID);
+ data.length, partNumber, uploadID);
ozoneOutputStream.write(data, 0,
- data.length);
+ data.length);
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
- ozoneOutputStream.getCommitUploadPartInfo();
+ ozoneOutputStream.getCommitUploadPartInfo();
- Assert.assertNotNull(omMultipartCommitUploadPartInfo);
- Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+ Assertions.assertNotNull(omMultipartCommitUploadPartInfo);
+ Assertions.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
return omMultipartCommitUploadPartInfo.getPartName();
}
private void completeMultipartUpload(OzoneBucket oBucket, String kName,
- String uploadID, Map partsMap) throws Exception {
+ String uploadID, Map partsMap) throws Exception {
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = oBucket
- .completeMultipartUpload(kName, uploadID, partsMap);
-
- Assert.assertNotNull(omMultipartUploadCompleteInfo);
- Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket
- .getName());
- Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket
- .getVolumeName());
- Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), kName);
- Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+ .completeMultipartUpload(kName, uploadID, partsMap);
+
+ Assertions.assertNotNull(omMultipartUploadCompleteInfo);
+ Assertions.assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket
+ .getName());
+ Assertions.assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket
+ .getVolumeName());
+ Assertions.assertEquals(omMultipartUploadCompleteInfo.getKey(), kName);
+ Assertions.assertNotNull(omMultipartUploadCompleteInfo.getHash());
}
private byte[] generateData(int size, byte val) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index aa048537663..f1d18f4629b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -47,27 +47,18 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
/**
* Tests failure detection and handling in BlockOutputStream Class by set
* flush delay.
*/
+@Timeout(300)
public class TestOzoneClientRetriesOnExceptionFlushDelay {
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
@@ -88,7 +79,7 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void init() throws Exception {
chunkSize = 100;
flushSize = 2 * chunkSize;
@@ -133,7 +124,7 @@ private String getKeyName() {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -152,12 +143,12 @@ public void testGroupMismatchExceptionHandling() throws Exception {
byte[] data1 =
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID =
keyOutputStream.getStreamEntries().get(0).
getBlockID().getContainerID();
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
+ Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size());
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
.getContainer(ContainerID.valueOf(containerID));
@@ -172,17 +163,17 @@ public void testGroupMismatchExceptionHandling() throws Exception {
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0)
.getOutputStream();
- Assert.assertTrue(stream instanceof BlockOutputStream);
+ Assertions.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
- Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
.getIoException()) instanceof GroupMismatchException);
- Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
+ Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
.contains(pipeline.getId()));
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
+ Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
key.close();
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
+ Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size());
validateData(keyName, data1);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 7bc8ca509fe..16f8ef1398f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -51,29 +51,21 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
/**
* Tests failure detection and handling in BlockOutputStream Class.
*/
+@Timeout(300)
public class TestOzoneClientRetriesOnExceptions {
private static final int MAX_RETRIES = 3;
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
@@ -94,7 +86,7 @@ public class TestOzoneClientRetriesOnExceptions {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void init() throws Exception {
chunkSize = 100;
flushSize = 2 * chunkSize;
@@ -141,7 +133,7 @@ private String getKeyName() {
/**
* Shutdown MiniDFSCluster.
*/
- @After
+ @AfterEach
public void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -154,42 +146,42 @@ public void testGroupMismatchExceptionHandling() throws Exception {
String keyName = getKeyName();
int dataLength = maxFlushSize + 50;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS,
- dataLength);
+ dataLength);
// write data more than 1 chunk
byte[] data1 =
- ContainerTestHelper.getFixedLengthString(keyString, dataLength)
- .getBytes(UTF_8);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+ .getBytes(UTF_8);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID =
- keyOutputStream.getStreamEntries().get(0).
- getBlockID().getContainerID();
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
+ keyOutputStream.getStreamEntries().get(0).
+ getBlockID().getContainerID();
+ Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size());
ContainerInfo container =
- cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueOf(containerID));
+ cluster.getStorageContainerManager().getContainerManager()
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
- cluster.getStorageContainerManager().getPipelineManager()
- .getPipeline(container.getPipelineID());
+ cluster.getStorageContainerManager().getPipelineManager()
+ .getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
+ xceiverClientManager.acquireClient(pipeline);
xceiverClient.sendCommand(ContainerTestHelper
- .getCreateContainerRequest(containerID, pipeline));
+ .getCreateContainerRequest(containerID, pipeline));
xceiverClientManager.releaseClient(xceiverClient, false);
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0)
- .getOutputStream();
- Assert.assertTrue(stream instanceof BlockOutputStream);
+ .getOutputStream();
+ Assertions.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
- Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
- .getIoException()) instanceof GroupMismatchException);
- Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
- .contains(pipeline.getId()));
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
+ Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ .getIoException()) instanceof GroupMismatchException);
+ Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
+ .contains(pipeline.getId()));
+ Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
key.close();
- Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
+ Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size());
validateData(keyName, data1);
}
@@ -198,10 +190,10 @@ public void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(
keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
- Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List entries = keyOutputStream.getStreamEntries();
- Assert.assertEquals((MAX_RETRIES + 1),
+ Assertions.assertEquals((MAX_RETRIES + 1),
keyOutputStream.getStreamEntries().size());
int dataLength = maxFlushSize + 50;
// write data more than 1 chunk
@@ -220,7 +212,7 @@ public void testMaxRetriesByOzoneClient() throws Exception {
.getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient =
xceiverClientManager.acquireClient(pipeline);
- Assume.assumeFalse(containerList.contains(containerID));
+ Assumptions.assumeFalse(containerList.contains(containerID));
containerList.add(containerID);
xceiverClient.sendCommand(ContainerTestHelper
.getCreateContainerRequest(containerID, pipeline));
@@ -228,47 +220,47 @@ public void testMaxRetriesByOzoneClient() throws Exception {
}
key.write(data1);
OutputStream stream = entries.get(0).getOutputStream();
- Assert.assertTrue(stream instanceof BlockOutputStream);
+ Assertions.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForContainerClose(key, cluster);
// Ensure that blocks for the key have been allocated to at least N+1
// containers so that write request will be tried on N+1 different blocks
// of N+1 different containers and it will finally fail as it will hit
// the max retry count of N.
- Assume.assumeTrue(containerList.size() + " <= " + MAX_RETRIES,
- containerList.size() > MAX_RETRIES);
+ Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
+ containerList.size() + " <= " + MAX_RETRIES);
try {
key.write(data1);
// ensure that write is flushed to dn
key.flush();
- Assert.fail("Expected exception not thrown");
+ Assertions.fail("Expected exception not thrown");
} catch (IOException ioe) {
- Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream
- .getIoException()) instanceof ContainerNotOpenException);
- Assert.assertTrue(ioe.
- getMessage().contains(
+ Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ .getIoException()) instanceof ContainerNotOpenException);
+ Assertions.assertTrue(ioe.
+ getMessage().contains(
"Retry request failed. " +
- "retries get failed due to exceeded maximum " +
- "allowed retries number: " + MAX_RETRIES));
+ "retries get failed due to exceeded maximum " +
+ "allowed retries number: " + MAX_RETRIES));
}
try {
key.flush();
- Assert.fail("Expected exception not thrown");
+ Assertions.fail("Expected exception not thrown");
} catch (IOException ioe) {
- Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
+ Assertions.assertTrue(ioe.getMessage().contains("Stream is closed"));
}
try {
key.close();
} catch (IOException ioe) {
- Assert.fail("Expected should not be thrown");
+ Assertions.fail("Expected should not be thrown");
}
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
- long size) throws Exception {
+ long size) throws Exception {
return TestHelper
- .createKey(keyName, type, ReplicationFactor.ONE,
- size, objectStore, volumeName, bucketName);
+ .createKey(keyName, type, ReplicationFactor.ONE,
+ size, objectStore, volumeName, bucketName);
}
private void validateData(String keyName, byte[] data) throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index ace1af6d3ca..3f7c590bf6e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -39,13 +39,13 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.UnhealthyTest;
import org.apache.ozone.test.tag.Unhealthy;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Test;
import org.junit.experimental.categories.Category;
-import org.junit.runners.MethodSorters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,7 +64,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class is to test audit logs for xxxACL APIs of Ozone Client.
@@ -75,7 +75,7 @@
* all assertion based test in this class.
*/
@NotThreadSafe
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+@TestMethodOrder(MethodOrderer.MethodName.class)
@Category(UnhealthyTest.class)
@Unhealthy("Fix this after adding audit support for HA Acl code. This will " +
"be fixed by HDDS-2038")
@@ -86,10 +86,10 @@ public class TestOzoneRpcClientForAclAuditLog {
private static UserGroupInformation ugi;
private static final OzoneAcl USER_ACL =
new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER,
- "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS);
+ "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS);
private static final OzoneAcl USER_ACL_2 =
new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER,
- "jane", IAccessAuthorizer.ACLType.ALL, ACCESS);
+ "jane", IAccessAuthorizer.ACLType.ALL, ACCESS);
private static List aclListToAdd = new ArrayList<>();
private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null;
@@ -106,7 +106,7 @@ public class TestOzoneRpcClientForAclAuditLog {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
System.setProperty("log4j.configurationFile", "auditlog.properties");
ugi = UserGroupInformation.getCurrentUser();
@@ -141,7 +141,7 @@ private static void startCluster(OzoneConfiguration conf) throws Exception {
/**
* Close OzoneClient and shutdown MiniOzoneCluster.
*/
- @AfterClass
+ @AfterAll
public static void teardown() throws IOException {
shutdownCluster();
deleteAuditLog();
@@ -195,7 +195,7 @@ public void testXXXAclSuccessAudits() throws Exception {
OzoneVolume retVolumeinfo = store.getVolume(volumeName);
verifyLog(OMAction.READ_VOLUME.name(), volumeName,
AuditEventStatus.SUCCESS.name());
- Assert.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName));
+ Assertions.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName));
OzoneObj volObj = new OzoneObjInfo.Builder()
.setVolumeName(volumeName)
@@ -207,7 +207,7 @@ public void testXXXAclSuccessAudits() throws Exception {
List acls = store.getAcl(volObj);
verifyLog(OMAction.GET_ACL.name(), volumeName,
AuditEventStatus.SUCCESS.name());
- Assert.assertTrue(acls.size() > 0);
+ Assertions.assertTrue(acls.size() > 0);
//Testing addAcl
store.addAcl(volObj, USER_ACL);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index 54153744d7c..a8343c7512f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -24,7 +24,6 @@
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
-import java.util.Arrays;
import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
@@ -60,7 +59,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -118,8 +116,8 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
// Write data into a key
try (OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, ReplicationType.RATIS,
- THREE, new HashMap<>())) {
+ value.getBytes(UTF_8).length, ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS, THREE), new HashMap<>())) {
out.write(value.getBytes(UTF_8));
}
@@ -133,7 +131,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertArrayEquals(b, value.getBytes(UTF_8));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
@@ -142,7 +140,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertArrayEquals(b, value.getBytes(UTF_8));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
@@ -157,7 +155,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = newBucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertArrayEquals(b, value.getBytes(UTF_8));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
@@ -166,7 +164,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException {
try (OzoneInputStream is = newBucket.readFile(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
- assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
+ assertArrayEquals(b, value.getBytes(UTF_8));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
index 31175a06b4d..1dcc8adadda 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
@@ -26,6 +26,7 @@
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
@@ -61,10 +62,10 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Tests the containerStateMachine failure handling.
@@ -84,7 +85,7 @@ public class TestValidateBCSIDOnRestart {
*
* @throws IOException
*/
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
@@ -93,14 +94,14 @@ public static void init() throws Exception {
conf.setFromObject(clientConfig);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
- TimeUnit.MILLISECONDS);
+ TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
- TimeUnit.MILLISECONDS);
+ TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200,
- TimeUnit.MILLISECONDS);
+ TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1,
- TimeUnit.SECONDS);
+ TimeUnit.SECONDS);
RatisClientConfig ratisClientConfig =
conf.getObject(RatisClientConfig.class);
@@ -121,9 +122,9 @@ public static void init() throws Exception {
conf.setFromObject(raftClientConfig);
cluster =
- MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2).
- setHbInterval(200)
- .build();
+ MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2).
+ setHbInterval(200)
+ .build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
//the easiest way to create an open container is creating a key
@@ -139,7 +140,7 @@ public static void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @AfterClass
+ @AfterAll
public static void shutdown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
@@ -150,29 +151,31 @@ public static void shutdown() {
@Test
public void testValidateBCSIDOnDnRestart() throws Exception {
OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(
+ ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis".getBytes(UTF_8));
key.flush();
key.write("ratis".getBytes(UTF_8));
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List locationInfoList =
- groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ groupOutputStream.getLocationInfoList();
+ Assertions.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
ContainerData containerData =
- TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
- .getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
+ .getDatanodeStateMachine()
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
+ Assertions.assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
- (KeyValueContainerData) containerData;
+ (KeyValueContainerData) containerData;
key.close();
long containerID = omKeyLocationInfo.getContainerID();
@@ -182,49 +185,50 @@ public void testValidateBCSIDOnDnRestart() throws Exception {
HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index);
OzoneContainer ozoneContainer =
- dnService.getDatanodeStateMachine()
- .getContainer();
+ dnService.getDatanodeStateMachine()
+ .getContainer();
ozoneContainer.getContainerSet().removeContainer(containerID);
ContainerStateMachine stateMachine =
- (ContainerStateMachine) TestHelper.getStateMachine(cluster.
- getHddsDatanodes().get(index),
- omKeyLocationInfo.getPipeline());
+ (ContainerStateMachine) TestHelper.getStateMachine(cluster.
+ getHddsDatanodes().get(index),
+ omKeyLocationInfo.getPipeline());
SimpleStateMachineStorage storage =
- (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
+ (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
stateMachine.takeSnapshot();
final Path parentPath = StatemachineImplTestUtil.findLatestSnapshot(storage)
.getFile().getPath();
stateMachine.buildMissingContainerSet(parentPath.toFile());
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
- Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
+ Assertions.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
// make sure the missing containerSet is not empty
HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
- Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty());
- Assert
- .assertTrue(dispatcher.getMissingContainerSet()
- .contains(containerID));
+ Assertions.assertFalse(dispatcher.getMissingContainerSet().isEmpty());
+ Assertions
+ .assertTrue(dispatcher.getMissingContainerSet()
+ .contains(containerID));
// write a new key
key = objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis", 1024, ReplicationType.RATIS,
- ReplicationFactor.ONE, new HashMap<>());
+ .createKey("ratis", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.ONE), new HashMap<>());
// First write and flush creates a container in the datanode
key.write("ratis1".getBytes(UTF_8));
key.flush();
groupOutputStream = (KeyOutputStream) key.getOutputStream();
locationInfoList = groupOutputStream.getLocationInfoList();
- Assert.assertEquals(1, locationInfoList.size());
+ Assertions.assertEquals(1, locationInfoList.size());
omKeyLocationInfo = locationInfoList.get(0);
key.close();
containerID = omKeyLocationInfo.getContainerID();
dn = TestHelper.getDatanodeService(omKeyLocationInfo,
- cluster);
+ cluster);
containerData = dn.getDatanodeStateMachine()
- .getContainer().getContainerSet()
- .getContainer(omKeyLocationInfo.getContainerID())
- .getContainerData();
- Assert.assertTrue(containerData instanceof KeyValueContainerData);
+ .getContainer().getContainerSet()
+ .getContainer(omKeyLocationInfo.getContainerID())
+ .getContainerData();
+ Assertions.assertTrue(containerData instanceof KeyValueContainerData);
keyValueContainerData = (KeyValueContainerData) containerData;
try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) {
@@ -239,11 +243,10 @@ public void testValidateBCSIDOnDnRestart() throws Exception {
index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
cluster.restartHddsDatanode(dn.getDatanodeDetails(), true);
// Make sure the container is marked unhealthy
- Assert.assertTrue(
- cluster.getHddsDatanodes().get(index)
- .getDatanodeStateMachine()
- .getContainer().getContainerSet().getContainer(containerID)
- .getContainerState()
- == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
+ Assertions.assertSame(cluster.getHddsDatanodes().get(index)
+ .getDatanodeStateMachine()
+ .getContainer().getContainerSet().getContainer(containerID)
+ .getContainerState(),
+ ContainerProtos.ContainerDataProto.State.UNHEALTHY);
}
}
From 594b900af537e5893773381ee27922a916cd89f7 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 19 Dec 2023 13:08:21 +0100
Subject: [PATCH 07/28] HDDS-9953. Simplify assertions in hadoop-hdds (#5817)
---
...erverNotLeaderExceptionMessageParsing.java | 6 +--
.../common/helpers/TestExcludeList.java | 10 ++---
.../hadoop/hdds/utils/TestResourceCache.java | 4 +-
.../TestChecksumImplsComputeSameValues.java | 6 +--
.../ozone/TestHddsSecureDatanodeInit.java | 20 ++++------
.../common/TestDatanodeStoreCache.java | 2 +-
...leRecoveringContainerScrubbingService.java | 2 +-
.../common/impl/TestContainerPersistence.java | 7 ++--
.../endpoint/TestHeartbeatEndpointTask.java | 8 ++--
.../keyvalue/impl/TestBlockManagerImpl.java | 5 +--
.../hdds/utils/db/TestRDBTableStore.java | 14 ++-----
.../balancer/TestContainerBalancer.java | 40 +++++++------------
.../TestSCMContainerPlacementRackAware.java | 5 +--
.../replication/TestReplicationManager.java | 5 ++-
.../hdds/scm/node/TestNodeReportHandler.java | 12 +++---
.../scm/pipeline/TestPipelineManagerImpl.java | 3 +-
.../server/TestSCMBlockProtocolServer.java | 10 ++---
17 files changed, 67 insertions(+), 92 deletions(-)
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
index 05ad9700578..00c290bc8d6 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java
@@ -56,8 +56,7 @@ public void testServerNotLeaderException() {
"at org.apache.hadoop.hdds.ratis.ServerNotLeaderException" +
".convertToNotLeaderException(ServerNotLeaderException.java:96)";
snle = new ServerNotLeaderException(message);
- Assertions.assertEquals(null,
- snle.getSuggestedLeader());
+ Assertions.assertNull(snle.getSuggestedLeader());
message = "Server:7fdd7170-75cc-4e11-b343-c2657c2f2f39 is not the " +
"leader.Suggested leader is Server:localhost:98634:8988 \n" +
@@ -72,8 +71,7 @@ public void testServerNotLeaderException() {
"at org.apache.hadoop.hdds.ratis.ServerNotLeaderException" +
".convertToNotLeaderException(ServerNotLeaderException.java)";
snle = new ServerNotLeaderException(message);
- Assertions.assertEquals(null,
- snle.getSuggestedLeader());
+ Assertions.assertNull(snle.getSuggestedLeader());
}
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java
index d5330749aec..c878124cd19 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java
@@ -39,9 +39,9 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() {
.setIpAddress("127.0.0.1").setHostName("localhost").addPort(
DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001))
.build());
- Assertions.assertTrue(list.getDatanodes().size() == 1);
+ Assertions.assertEquals(1, list.getDatanodes().size());
clock.fastForward(11);
- Assertions.assertTrue(list.getDatanodes().size() == 0);
+ Assertions.assertEquals(0, list.getDatanodes().size());
list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID())
.setIpAddress("127.0.0.2").setHostName("localhost").addPort(
DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001))
@@ -50,7 +50,7 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() {
.setIpAddress("127.0.0.3").setHostName("localhost").addPort(
DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001))
.build());
- Assertions.assertTrue(list.getDatanodes().size() == 2);
+ Assertions.assertEquals(2, list.getDatanodes().size());
}
@Test
@@ -60,8 +60,8 @@ public void excludeNodeShouldNotBeCleanedIfExpiryTimeIsZero() {
.setIpAddress("127.0.0.1").setHostName("localhost").addPort(
DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001))
.build());
- Assertions.assertTrue(list.getDatanodes().size() == 1);
+ Assertions.assertEquals(1, list.getDatanodes().size());
clock.fastForward(1);
- Assertions.assertTrue(list.getDatanodes().size() == 1);
+ Assertions.assertEquals(1, list.getDatanodes().size());
}
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java
index 54d59af03d1..3acaee85fa4 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java
@@ -49,7 +49,7 @@ public void testResourceCache() throws InterruptedException {
// put to cache with removing old element "6" as eviction FIFO
resourceCache.put(1, "a");
Assertions.assertNull(resourceCache.get(6));
- Assertions.assertTrue(count.get() == 1);
+ Assertions.assertEquals(1, count.get());
// add 5 should be success with no removal
resourceCache.put(5, "a");
@@ -58,7 +58,7 @@ public void testResourceCache() throws InterruptedException {
// remove and check queue
resourceCache.remove(4);
Assertions.assertNull(resourceCache.get(4));
- Assertions.assertTrue(count.get() == 1);
+ Assertions.assertEquals(1, count.get());
}
@Test
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
index 5e02ceaf083..eeba2a8e422 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
@@ -28,7 +28,7 @@
import java.util.List;
import java.util.zip.CRC32;
-import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests to verify that different checksum implementations compute the same
@@ -52,7 +52,7 @@ public void testCRC32ImplsMatch() {
if (NativeCRC32Wrapper.isAvailable()) {
impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(1, bpc)));
}
- assertEquals(true, validateImpls(data, impls, bpc));
+ assertTrue(validateImpls(data, impls, bpc));
}
}
@@ -74,7 +74,7 @@ public void testCRC32CImplsMatch() {
if (NativeCRC32Wrapper.isAvailable()) {
impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(2, bpc)));
}
- assertEquals(true, validateImpls(data, impls, bpc));
+ assertTrue(validateImpls(data, impls, bpc));
}
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index 8c3558879ae..beca5b2ee6e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -53,6 +53,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.mockito.ArgumentMatchers.anyObject;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
@@ -342,8 +343,7 @@ public void testCertificateRotation() throws Exception {
when(scmClient.getAllRootCaCertificates()).thenReturn(rootCaList);
// check that new cert ID should not equal to current cert ID
String certId = newCertHolder.getSerialNumber().toString();
- Assertions.assertFalse(certId.equals(
- client.getCertificate().getSerialNumber().toString()));
+ assertNotEquals(certId, client.getCertificate().getSerialNumber().toString());
// start monitor task to renew key and cert
client.startCertificateRenewerService();
@@ -382,12 +382,10 @@ public void testCertificateRotation() throws Exception {
String newCertId = client.getCertificate().getSerialNumber().toString();
return newCertId.equals(certId2);
}, 1000, CERT_LIFETIME * 1000);
- Assertions.assertFalse(client.getPrivateKey().equals(privateKey1));
- Assertions.assertFalse(client.getPublicKey().equals(publicKey1));
- Assertions.assertFalse(client.getCACertificate().getSerialNumber()
- .toString().equals(caCertId1));
- Assertions.assertFalse(client.getRootCACertificate().getSerialNumber()
- .toString().equals(rootCaCertId1));
+ assertNotEquals(privateKey1, client.getPrivateKey());
+ assertNotEquals(publicKey1, client.getPublicKey());
+ assertNotEquals(caCertId1, client.getCACertificate().getSerialNumber().toString());
+ assertNotEquals(rootCaCertId1, client.getRootCACertificate().getSerialNumber().toString());
}
/**
@@ -417,16 +415,14 @@ public void testCertificateRotationRecoverableFailure() throws Exception {
// check that new cert ID should not equal to current cert ID
String certId = newCertHolder.getSerialNumber().toString();
- Assertions.assertFalse(certId.equals(
- client.getCertificate().getSerialNumber().toString()));
+ assertNotEquals(certId, client.getCertificate().getSerialNumber().toString());
// start monitor task to renew key and cert
client.startCertificateRenewerService();
// certificate failed to renew, client still hold the old expired cert.
Thread.sleep(CERT_LIFETIME * 1000);
- Assertions.assertFalse(certId.equals(
- client.getCertificate().getSerialNumber().toString()));
+ assertNotEquals(certId, client.getCertificate().getSerialNumber().toString());
try {
client.getCertificate().checkValidity();
} catch (Exception e) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
index 546fcf5155d..6cf3b2cee5a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -61,7 +61,7 @@ public void testBasicOperations() throws IOException {
Assertions.assertEquals(2, cache.size());
// test get, test reference the same object using ==
- Assertions.assertTrue(store1 == cache.getDB(dbPath1, conf).getStore());
+ Assertions.assertSame(store1, cache.getDB(dbPath1, conf).getStore());
// test remove
cache.removeDB(dbPath1);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
index 5076fed0b69..0979b2fe6f2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
@@ -162,7 +162,7 @@ public void testScrubbingStaleRecoveringContainers(
testClock.fastForward(1000L);
srcss.runPeriodicalTaskNow();
//closed container should not be scrubbed
- Assertions.assertTrue(containerSet.containerCount() == 5);
+ Assertions.assertEquals(5, containerSet.containerCount());
containerStateMap.putAll(createTestContainers(containerSet, 5,
RECOVERING).stream()
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index de5d8f64561..602e9d82873 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -92,6 +92,7 @@
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
@@ -614,7 +615,7 @@ private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException {
.getVolume().getCommittedBytes();
commitDecrement = commitBytesBefore - commitBytesAfter;
// did we decrement commit bytes by the amount of data we wrote?
- Assertions.assertTrue(commitDecrement == info.getLen());
+ assertEquals(commitDecrement, info.getLen());
return info;
}
@@ -810,7 +811,7 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo)
getBlock(container, blockID1);
Assertions.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
- Assertions.assertTrue(sce.getResult() == UNKNOWN_BCSID);
+ assertSame(UNKNOWN_BCSID, sce.getResult());
}
try {
@@ -821,7 +822,7 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo)
getBlock(container, blockID1);
Assertions.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
- Assertions.assertTrue(sce.getResult() == BCSID_MISMATCH);
+ assertSame(BCSID_MISMATCH, sce.getResult());
}
readBlockData = blockManager.
getBlock(container, blockData.getBlockID());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index d0b9e80f3cc..96643789f4e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -134,7 +134,7 @@ public void testheartbeatWithoutReports() throws Exception {
Assertions.assertTrue(heartbeat.hasDatanodeDetails());
Assertions.assertFalse(heartbeat.hasNodeReport());
Assertions.assertFalse(heartbeat.hasContainerReport());
- Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
+ Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount());
Assertions.assertFalse(heartbeat.hasContainerActions());
OptionalLong termInDatanode = context.getTermOfLeaderSCM();
Assertions.assertTrue(termInDatanode.isPresent());
@@ -169,7 +169,7 @@ public void testheartbeatWithNodeReports() throws Exception {
Assertions.assertTrue(heartbeat.hasDatanodeDetails());
Assertions.assertTrue(heartbeat.hasNodeReport());
Assertions.assertFalse(heartbeat.hasContainerReport());
- Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
+ Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount());
Assertions.assertFalse(heartbeat.hasContainerActions());
}
@@ -201,7 +201,7 @@ public void testheartbeatWithContainerReports() throws Exception {
Assertions.assertTrue(heartbeat.hasDatanodeDetails());
Assertions.assertFalse(heartbeat.hasNodeReport());
Assertions.assertTrue(heartbeat.hasContainerReport());
- Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
+ Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount());
Assertions.assertFalse(heartbeat.hasContainerActions());
}
@@ -266,7 +266,7 @@ public void testheartbeatWithContainerActions() throws Exception {
Assertions.assertTrue(heartbeat.hasDatanodeDetails());
Assertions.assertFalse(heartbeat.hasNodeReport());
Assertions.assertFalse(heartbeat.hasContainerReport());
- Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
+ Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount());
Assertions.assertTrue(heartbeat.hasContainerActions());
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
index a8e4cb81410..a7d6364a967 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
@@ -46,7 +46,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.mock;
@@ -202,7 +201,7 @@ public void testListBlock(ContainerTestVersionInfo versionInfo)
List listBlockData = blockManager.listBlock(
keyValueContainer, 1, 10);
assertNotNull(listBlockData);
- assertTrue(listBlockData.size() == 1);
+ assertEquals(1, listBlockData.size());
for (long i = 2; i <= 10; i++) {
blockID = new BlockID(1L, i);
@@ -221,6 +220,6 @@ public void testListBlock(ContainerTestVersionInfo versionInfo)
listBlockData = blockManager.listBlock(
keyValueContainer, 1, 10);
assertNotNull(listBlockData);
- assertTrue(listBlockData.size() == 10);
+ assertEquals(10, listBlockData.size());
}
}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index df6fb5795fd..3ff9ece6b8c 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -562,8 +562,7 @@ public void testPrefixedIterator() throws Exception {
int keyCount = 0;
while (iter.hasNext()) {
// iterator should only meet keys with samplePrefix
- assertArrayEquals(
- Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix);
+ assertArrayEquals(samplePrefix, Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH));
keyCount++;
}
@@ -573,8 +572,7 @@ public void testPrefixedIterator() throws Exception {
// iterator should be able to seekToFirst
iter.seekToFirst();
assertTrue(iter.hasNext());
- assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH),
- samplePrefix);
+ assertArrayEquals(samplePrefix, Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH));
}
}
}
@@ -708,9 +706,7 @@ public void testDumpAndLoadBasic() throws Exception {
int keyCount = 0;
while (iter.hasNext()) {
// check prefix
- assertTrue(Arrays.equals(
- Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH),
- samplePrefix));
+ assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix);
keyCount++;
}
@@ -751,9 +747,7 @@ public void testDumpAndLoadEmpty() throws Exception {
int keyCount = 0;
while (iter.hasNext()) {
// check prefix
- assertTrue(Arrays.equals(
- Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH),
- samplePrefix));
+ assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix);
keyCount++;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
index d5ae7457db4..1de5d6b1a0b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
@@ -45,6 +45,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT;
+import static org.junit.jupiter.api.Assertions.assertSame;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -127,31 +128,26 @@ public void testStartBalancerStop() throws Exception {
startBalancer(balancerConfiguration);
try {
containerBalancer.startBalancer(balancerConfiguration);
- Assertions.assertTrue(false,
- "Exception should be thrown when startBalancer again");
+ Assertions.fail("Exception should be thrown when startBalancer again");
} catch (IllegalContainerBalancerStateException e) {
// start failed again, valid case
}
try {
containerBalancer.start();
- Assertions.assertTrue(false,
- "Exception should be thrown when start again");
+ Assertions.fail("Exception should be thrown when start again");
} catch (IllegalContainerBalancerStateException e) {
// start failed again, valid case
}
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.RUNNING);
+ assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus());
stopBalancer();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.STOPPED);
+ assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus());
try {
containerBalancer.stopBalancer();
- Assertions.assertTrue(false,
- "Exception should be thrown when stop again");
+ Assertions.fail("Exception should be thrown when stop again");
} catch (Exception e) {
// stop failed as already stopped, valid case
}
@@ -161,23 +157,19 @@ public void testStartBalancerStop() throws Exception {
public void testStartStopSCMCalls() throws Exception {
containerBalancer.saveConfiguration(balancerConfiguration, true, 0);
containerBalancer.start();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.RUNNING);
+ assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus());
containerBalancer.notifyStatusChanged();
try {
containerBalancer.start();
- Assertions.assertTrue(false,
- "Exception should be thrown when start again");
+ Assertions.fail("Exception should be thrown when start again");
} catch (IllegalContainerBalancerStateException e) {
// start failed when triggered again, valid case
}
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.RUNNING);
+ assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus());
containerBalancer.stop();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.STOPPED);
+ assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus());
containerBalancer.saveConfiguration(balancerConfiguration, false, 0);
}
@@ -186,20 +178,16 @@ public void testNotifyStateChangeStopStart() throws Exception {
containerBalancer.startBalancer(balancerConfiguration);
scm.getScmContext().updateLeaderAndTerm(false, 1);
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.RUNNING);
+ assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus());
containerBalancer.notifyStatusChanged();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.STOPPED);
+ assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus());
scm.getScmContext().updateLeaderAndTerm(true, 2);
scm.getScmContext().setLeaderReady();
containerBalancer.notifyStatusChanged();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.RUNNING);
+ assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus());
containerBalancer.stop();
- Assertions.assertTrue(containerBalancer.getBalancerStatus()
- == ContainerBalancerTask.Status.STOPPED);
+ assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus());
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 6016b2f14d3..92f05d772fe 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -423,7 +423,7 @@ public void testNoInfiniteLoop(int datanodeCount) {
policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15);
fail("Storage requested exceeds capacity, this call should fail");
} catch (Exception e) {
- assertTrue(e.getClass().getSimpleName().equals("SCMException"));
+ assertEquals("SCMException", e.getClass().getSimpleName());
}
// get metrics
@@ -833,8 +833,7 @@ public void chooseNodeWithUsedAndFavouredNodesMultipleRack()
// Favoured node should be returned,
// as favoured node is in the different rack as used nodes.
- Assertions.assertTrue(favouredNodes.get(0).getUuid() ==
- datanodeDetails.get(0).getUuid());
+ Assertions.assertSame(favouredNodes.get(0).getUuid(), datanodeDetails.get(0).getUuid());
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index d2b2d18d358..a9093778793 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -84,6 +84,7 @@
import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createReplicasWithSameOrigin;
import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.getNoNodesTestPlacementPolicy;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -518,7 +519,7 @@ public void testHealthyContainerStatus() throws ContainerNotFoundException {
boolean result = replicationManager.checkContainerStatus(
container, repReport);
- assertEquals(false, result);
+ assertFalse(result);
}
@Test
@@ -546,7 +547,7 @@ public void testUnderReplicatedContainerStatus()
container, repReport);
assertEquals(1, repReport.getStat(
ReplicationManagerReport.HealthState.UNDER_REPLICATED));
- assertEquals(true, result);
+ assertTrue(result);
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 6f800a4d15d..dd919548cb1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -96,9 +96,9 @@ public void testNodeReport() throws IOException {
Arrays.asList(metaStorageOne)).getReport(), null);
nodeMetric = nodeManager.getNodeStat(dn);
- Assertions.assertTrue(nodeMetric.get().getCapacity().get() == 100);
- Assertions.assertTrue(nodeMetric.get().getRemaining().get() == 90);
- Assertions.assertTrue(nodeMetric.get().getScmUsed().get() == 10);
+ Assertions.assertEquals(100, (long) nodeMetric.get().getCapacity().get());
+ Assertions.assertEquals(90, (long) nodeMetric.get().getRemaining().get());
+ Assertions.assertEquals(10, (long) nodeMetric.get().getScmUsed().get());
StorageReportProto storageTwo = HddsTestUtils
.createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
@@ -107,9 +107,9 @@ public void testNodeReport() throws IOException {
Arrays.asList(metaStorageOne)), this);
nodeMetric = nodeManager.getNodeStat(dn);
- Assertions.assertTrue(nodeMetric.get().getCapacity().get() == 200);
- Assertions.assertTrue(nodeMetric.get().getRemaining().get() == 180);
- Assertions.assertTrue(nodeMetric.get().getScmUsed().get() == 20);
+ Assertions.assertEquals(200, (long) nodeMetric.get().getCapacity().get());
+ Assertions.assertEquals(180, (long) nodeMetric.get().getRemaining().get());
+ Assertions.assertEquals(20, (long) nodeMetric.get().getScmUsed().get());
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 48f82b5cc95..ce6c78f5b90 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -930,8 +930,7 @@ public void testWaitForAllocatedPipeline()
ContainerInfo c = provider.getContainer(1, repConfig,
owner, new ExcludeList());
- Assertions.assertTrue(c.equals(container),
- "Expected container was returned");
+ Assertions.assertEquals(c, container, "Expected container was returned");
// Confirm that waitOnePipelineReady was called on allocated pipelines
ArgumentCaptor> captor =
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 38f5dee109f..0bed0337d6f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -250,7 +250,7 @@ public void testSortDatanodes() throws Exception {
System.out.println("client = " + client);
datanodeDetails.stream().forEach(
node -> System.out.println(node.toString()));
- Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT);
+ Assertions.assertEquals(NODE_COUNT, datanodeDetails.size());
// illegal client 1
client += "X";
@@ -258,14 +258,14 @@ public void testSortDatanodes() throws Exception {
System.out.println("client = " + client);
datanodeDetails.stream().forEach(
node -> System.out.println(node.toString()));
- Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT);
+ Assertions.assertEquals(NODE_COUNT, datanodeDetails.size());
// illegal client 2
client = "/default-rack";
datanodeDetails = server.sortDatanodes(nodes, client);
System.out.println("client = " + client);
datanodeDetails.stream().forEach(
node -> System.out.println(node.toString()));
- Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT);
+ Assertions.assertEquals(NODE_COUNT, datanodeDetails.size());
// unknown node to sort
nodes.add(UUID.randomUUID().toString());
@@ -278,7 +278,7 @@ public void testSortDatanodes() throws Exception {
.build();
ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp =
service.sortDatanodes(request, ClientVersion.CURRENT_VERSION);
- Assertions.assertTrue(resp.getNodeList().size() == NODE_COUNT);
+ Assertions.assertEquals(NODE_COUNT, resp.getNodeList().size());
System.out.println("client = " + client);
resp.getNodeList().stream().forEach(
node -> System.out.println(node.getNetworkName()));
@@ -295,7 +295,7 @@ public void testSortDatanodes() throws Exception {
.build();
resp = service.sortDatanodes(request, ClientVersion.CURRENT_VERSION);
System.out.println("client = " + client);
- Assertions.assertTrue(resp.getNodeList().size() == 0);
+ Assertions.assertEquals(0, resp.getNodeList().size());
resp.getNodeList().stream().forEach(
node -> System.out.println(node.getNetworkName()));
}
From 71019a866b17c34fe30d3a90d019d47ddebcd5b7 Mon Sep 17 00:00:00 2001
From: Christos Bisias
Date: Tue, 19 Dec 2023 19:41:59 +0200
Subject: [PATCH 08/28] HDDS-9933. Recon datanode 'Last Heartbeat' should print
relative values (#5801)
---
.../src/views/datanodes/datanodes.tsx | 24 +++++++++++++++++--
1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
index e418bf2fefd..6a6118494fa 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
@@ -42,7 +42,7 @@ interface IDatanodeResponse {
hostname: string;
state: DatanodeState;
opState: DatanodeOpState;
- lastHeartbeat: number;
+ lastHeartbeat: string;
storageReport: IStorageReport;
pipelines: IPipeline[];
containers: number;
@@ -182,7 +182,7 @@ const COLUMNS = [
isVisible: true,
sorter: (a: IDatanode, b: IDatanode) => a.lastHeartbeat - b.lastHeartbeat,
render: (heartbeat: number) => {
- return heartbeat > 0 ? moment(heartbeat).format('ll LTS') : 'NA';
+ return heartbeat > 0 ? getTimeDiffFromTimestamp(heartbeat) : 'NA';
}
},
{
@@ -303,6 +303,26 @@ const defaultColumns: IOption[] = COLUMNS.map(column => ({
value: column.key
}));
+const getTimeDiffFromTimestamp = (timestamp: number): string => {
+ const timestampDate = new Date(timestamp);
+ const currentDate = new Date();
+
+ let elapsedTime = "";
+ let duration: moment.Duration = moment.duration(
+ moment(currentDate).diff(moment(timestampDate))
+ )
+
+ const durationKeys = ["seconds", "minutes", "hours", "days", "months", "years"]
+ durationKeys.forEach((k) => {
+ let time = duration["_data"][k]
+ if (time !== 0){
+ elapsedTime = time + `${k.substring(0, 1)} ` + elapsedTime
+ }
+ })
+
+ return elapsedTime.trim().length === 0 ? "Just now" : elapsedTime.trim() + " ago";
+}
+
let cancelSignal: AbortController;
export class Datanodes extends React.Component, IDatanodesState> {
From 42ded03e41c49648e51c1fd9001de85db44b9eab Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 19 Dec 2023 20:03:29 +0100
Subject: [PATCH 09/28] HDDS-9966. Bump maven-shade-plugin to 3.5.1 (#5823)
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index f922e7480c0..95a0be71875 100644
--- a/pom.xml
+++ b/pom.xml
@@ -265,7 +265,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
3.1
3.1.1
3.1.0
- 3.4.1
+ 3.5.1
2.5
3.4.0
3.3.0
From dabdedd5addc3d2c2c94cd210e202c3cb89a8acc Mon Sep 17 00:00:00 2001
From: Devesh Kumar Singh
Date: Wed, 20 Dec 2023 02:15:44 +0530
Subject: [PATCH 10/28] HDDS-5604. Intermittent failure in TestPipelineClose
(#5825)
---
.../apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 6c66ecf3185..99dd1d1768d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -47,7 +47,6 @@
import org.apache.ratis.protocol.RaftGroupId;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
-import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.mockito.ArgumentCaptor;
@@ -209,10 +208,8 @@ public void testPipelineCloseWithPipelineAction() throws Exception {
}
@Test
- @Flaky("HDDS-5604")
public void testPipelineCloseWithLogFailure()
throws IOException, TimeoutException {
-
EventQueue eventQ = (EventQueue) scm.getEventQueue();
PipelineActionHandler pipelineActionTest =
Mockito.mock(PipelineActionHandler.class);
@@ -247,9 +244,7 @@ public void testPipelineCloseWithLogFailure()
* This is expected to trigger an immediate pipeline actions report to SCM
*/
xceiverRatis.handleNodeLogFailure(groupId, null);
-
- // verify SCM receives a pipeline action report "immediately"
- Mockito.verify(pipelineActionTest, Mockito.timeout(100))
+ Mockito.verify(pipelineActionTest, Mockito.timeout(1500).atLeastOnce())
.onMessage(
actionCaptor.capture(),
Mockito.any(EventPublisher.class));
From b55437f06b4ad524ebf4af42b31a92541160bede Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Wed, 20 Dec 2023 00:07:32 +0100
Subject: [PATCH 11/28] HDDS-9829. Bump `jaxb-api` to 2.3.3, `jaxb-runtime` to
2.3.9 (#5777)
---
hadoop-hdds/container-service/pom.xml | 4 ++--
hadoop-ozone/datanode/pom.xml | 8 +++----
.../dist/src/main/license/bin/LICENSE.txt | 7 +-----
.../dist/src/main/license/jar-report.txt | 8 ++-----
hadoop-ozone/httpfsgateway/pom.xml | 4 ++--
hadoop-ozone/insight/pom.xml | 8 +++----
hadoop-ozone/recon/pom.xml | 10 ++++++--
hadoop-ozone/s3gateway/pom.xml | 8 +++----
hadoop-ozone/tools/pom.xml | 8 +++----
pom.xml | 24 +++++++++----------
10 files changed, 43 insertions(+), 46 deletions(-)
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 0c271508e3b..079847f7c49 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -109,8 +109,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml
index 8c5843d382d..2f219334889 100644
--- a/hadoop-ozone/datanode/pom.xml
+++ b/hadoop-ozone/datanode/pom.xml
@@ -48,16 +48,16 @@
hdds-container-service
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
jaxb-runtime
- javax.activation
- activation
+ jakarta.activation
+ jakarta.activation-api
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index a361067ae7f..465f663bc55 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -210,6 +210,7 @@ See licenses/ for text of these licenses.
EDL 1.0
=====================
+ com.sun.activation:jakarta.activation
jakarta.activation:jakarta.activation-api
jakarta.xml.bind:jakarta.xml.bind-api
@@ -232,14 +233,12 @@ CDDL 1.1 + GPLv2 with classpath exception
com.sun.jersey:jersey-json
com.sun.jersey:jersey-server
com.sun.jersey:jersey-servlet
- javax.activation:activation
javax.annotation:javax.annotation-api
javax.el:javax.el-api
javax.interceptor:javax.interceptor-api
javax.servlet:javax.servlet-api
javax.servlet.jsp:jsp-api
javax.ws.rs:jsr311-api
- javax.xml.bind:jaxb-api
org.glassfish.hk2.external:aopalliance-repackaged
org.glassfish.hk2.external:jakarta.inject
org.glassfish.hk2.external:javax.inject
@@ -249,8 +248,6 @@ CDDL 1.1 + GPLv2 with classpath exception
org.glassfish.hk2:hk2-utils
org.glassfish.hk2:osgi-resource-locator
org.glassfish.jaxb:jaxb-runtime
- org.glassfish.jaxb:jaxb-core
- org.glassfish.jaxb:jaxb-runtime
org.glassfish.jaxb:txw2
org.glassfish.jersey.containers:jersey-container-servlet
org.glassfish.jersey.containers:jersey-container-servlet-core
@@ -262,7 +259,6 @@ CDDL 1.1 + GPLv2 with classpath exception
org.glassfish.jersey.inject:jersey-hk2
org.glassfish.jersey.media:jersey-media-jaxb
org.glassfish.jersey.media:jersey-media-json-jackson
- org.jvnet.staxex:stax-ex
Apache License 2.0
@@ -301,7 +297,6 @@ Apache License 2.0
com.nimbusds:nimbus-jose-jwt
com.squareup.okhttp3:okhttp
com.squareup.okio:okio
- com.sun.xml.fastinfoset:FastInfoset
commons-beanutils:commons-beanutils
commons-cli:commons-cli
commons-codec:commons-codec
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index e9a781862d3..8792390b2c2 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -1,4 +1,3 @@
-share/ozone/lib/activation.jar
share/ozone/lib/animal-sniffer-annotations.jar
share/ozone/lib/annotations.jar
share/ozone/lib/annotations.jar
@@ -40,7 +39,6 @@ share/ozone/lib/disruptor.jar
share/ozone/lib/dnsjava.jar
share/ozone/lib/error_prone_annotations.jar
share/ozone/lib/failureaccess.jar
-share/ozone/lib/FastInfoset.jar
share/ozone/lib/gethostname4j.jar
share/ozone/lib/grpc-api.jar
share/ozone/lib/grpc-context.jar
@@ -107,6 +105,7 @@ share/ozone/lib/jaeger-client.jar
share/ozone/lib/jaeger-core.jar
share/ozone/lib/jaeger-thrift.jar
share/ozone/lib/jaeger-tracerresolver.jar
+share/ozone/lib/jakarta.activation.jar
share/ozone/lib/jakarta.activation-api.jar
share/ozone/lib/jakarta.annotation-api.jar
share/ozone/lib/jakarta.inject.jar
@@ -119,8 +118,6 @@ share/ozone/lib/javax.el-api.jar
share/ozone/lib/javax.inject.jar
share/ozone/lib/javax.interceptor-api.jar
share/ozone/lib/javax.servlet-api.jar
-share/ozone/lib/jaxb-api.jar
-share/ozone/lib/jaxb-core.jar
share/ozone/lib/jaxb-runtime.jar
share/ozone/lib/jcip-annotations.jar
share/ozone/lib/jersey-cdi1x.jar
@@ -266,11 +263,10 @@ share/ozone/lib/spring-jdbc.jar
share/ozone/lib/spring-tx.jar
share/ozone/lib/sqlite-jdbc.jar
share/ozone/lib/stax2-api.jar
-share/ozone/lib/stax-ex.jar
share/ozone/lib/txw2.jar
share/ozone/lib/vault-java-driver.jar
share/ozone/lib/weld-servlet-shaded.Final.jar
share/ozone/lib/woodstox-core.jar
share/ozone/lib/zookeeper.jar
share/ozone/lib/zookeeper-jute.jar
-share/ozone/lib/zstd-jni.jar
\ No newline at end of file
+share/ozone/lib/zstd-jni.jar
diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml
index 1ce25c2beac..f56b4006d85 100644
--- a/hadoop-ozone/httpfsgateway/pom.xml
+++ b/hadoop-ozone/httpfsgateway/pom.xml
@@ -150,8 +150,8 @@
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
index c8106b10829..4be02577e09 100644
--- a/hadoop-ozone/insight/pom.xml
+++ b/hadoop-ozone/insight/pom.xml
@@ -66,16 +66,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-tools
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
jaxb-runtime
- javax.activation
- activation
+ jakarta.activation
+ jakarta.activation-api
io.dropwizard.metrics
diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml
index 11f3dee917a..4985bb99374 100644
--- a/hadoop-ozone/recon/pom.xml
+++ b/hadoop-ozone/recon/pom.xml
@@ -338,6 +338,12 @@
org.jooq
jooq
${jooq.version}
+
+
+ javax.xml.bind
+ jaxb-api
+
+
org.jooq
@@ -373,8 +379,8 @@
- javax.activation
- activation
+ jakarta.activation
+ jakarta.activation-api
org.javassist
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
index e22f0dc9ac1..a8e72a362fa 100644
--- a/hadoop-ozone/s3gateway/pom.xml
+++ b/hadoop-ozone/s3gateway/pom.xml
@@ -87,16 +87,16 @@
cdi-api
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
jaxb-runtime
- javax.activation
- activation
+ jakarta.activation
+ jakarta.activation-api
io.grpc
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index b09b92d0e2e..c413d691181 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -74,16 +74,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
org.glassfish.jaxb
jaxb-runtime
- javax.activation
- activation
+ jakarta.activation
+ jakarta.activation-api
io.dropwizard.metrics
diff --git a/pom.xml b/pom.xml
index 95a0be71875..638da75d439 100644
--- a/pom.xml
+++ b/pom.xml
@@ -148,9 +148,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
3.2.4
0.8.5
3.21.0-GA
- 1.1.1
- 2.3.0
- 2.3.0.1
+ 1.2.2
+ 2.3.3
+ 2.3.9
0.1.54
2.0
3.1.0
@@ -202,7 +202,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
3.0.0
3.1.12
2.1.7
- 1.2.2
4.12.0
4.2.2
2.6.1
@@ -806,11 +805,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
zstd-jni
${zstd-jni.version}
-
- javax.activation
- activation
- ${javax-activation.version}
-
javax.annotation
javax.annotation-api
@@ -1393,14 +1387,20 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
${findbugs.version}
- javax.xml.bind
- jaxb-api
+ jakarta.xml.bind
+ jakarta.xml.bind-api
${jaxb-api.version}
org.glassfish.jaxb
jaxb-runtime
${jaxb-runtime.version}
+
+
+ javax.xml.bind
+ jaxb-api
+
+
com.sun.jersey
@@ -1478,7 +1478,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
jakarta.activation
jakarta.activation-api
- ${jakarta.activation.version}
+ ${activation-api.version}
com.squareup.okhttp3
From fdf8b6a93b7d7d0e93349c7632e07848dcd69f22 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Wed, 20 Dec 2023 02:56:34 +0100
Subject: [PATCH 12/28] HDDS-9885. Checkstyle check passing despite config
error (#5755)
---
hadoop-ozone/dev-support/checks/checkstyle.sh | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 97ddfa698df..cb8b6f8f915 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -30,13 +30,14 @@ declare -i rc
mvn ${MAVEN_OPTIONS} checkstyle:check > "${REPORT_DIR}/output.log"
rc=$?
if [[ ${rc} -ne 0 ]]; then
- mvn ${MAVEN_OPTIONS} clean test-compile checkstyle:check
+ mvn ${MAVEN_OPTIONS} clean test-compile checkstyle:check > output.log
rc=$?
mkdir -p "$REPORT_DIR" # removed by mvn clean
-else
- cat "${REPORT_DIR}/output.log"
+ mv output.log "${REPORT_DIR}"/
fi
+cat "${REPORT_DIR}/output.log"
+
#Print out the exact violations with parsing XML results with sed
find "." -name checkstyle-errors.xml -print0 \
| xargs -0 sed '$!N; //g" \
| tee "$REPORT_FILE"
+# check if Maven failed due to some error other than checkstyle violation
+if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then
+ grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}"
+fi
+
## generate counter
grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures"
From faa19906f664f3a68ccd3b1b9d6347dced279605 Mon Sep 17 00:00:00 2001
From: Siddhant Sangwan
Date: Wed, 20 Dec 2023 11:54:06 +0530
Subject: [PATCH 13/28] HDDS-9592. Replication Manager: Save UNHEALTHY replicas
with highest BCSID for a QUASI_CLOSED container (#5794)
---
.../replication/ContainerHealthResult.java | 9 +
.../ECUnderReplicationHandler.java | 2 +-
.../LegacyRatisContainerReplicaCount.java | 9 +-
.../replication/LegacyReplicationManager.java | 10 +-
.../replication/MisReplicationHandler.java | 2 +-
.../RatisContainerReplicaCount.java | 64 ++++--
.../RatisUnderReplicationHandler.java | 109 ++++++++-
.../replication/ReplicationManager.java | 4 +-
.../replication/ReplicationManagerUtil.java | 78 +++++--
.../VulnerableUnhealthyReplicasHandler.java | 102 ++++++++
.../scm/node/DatanodeAdminMonitorImpl.java | 16 +-
.../TestRatisUnderReplicationHandler.java | 70 ++++++
.../replication/TestReplicationManager.java | 59 +++++
.../TestReplicationManagerUtil.java | 94 +++++++-
...estVulnerableUnhealthyReplicasHandler.java | 217 ++++++++++++++++++
.../node/DatanodeAdminMonitorTestUtil.java | 2 +-
.../scm/node/TestDatanodeAdminMonitor.java | 75 ++++++
17 files changed, 864 insertions(+), 58 deletions(-)
create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java
create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
index a2262cdafdd..0abe8f6ea34 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
@@ -113,6 +113,7 @@ public static class UnderReplicatedHealthResult
private boolean hasUnReplicatedOfflineIndexes = false;
private boolean offlineIndexesOkAfterPending = false;
private int requeueCount = 0;
+ private boolean hasVulnerableUnhealthy = false;
public UnderReplicatedHealthResult(ContainerInfo containerInfo,
int remainingRedundancy, boolean dueToOutOfService,
@@ -269,6 +270,14 @@ public boolean isMissing() {
return isMissing;
}
+ public void setHasVulnerableUnhealthy(boolean hasVulnerableUnhealthy) {
+ this.hasVulnerableUnhealthy = hasVulnerableUnhealthy;
+ }
+
+ public boolean hasVulnerableUnhealthy() {
+ return hasVulnerableUnhealthy;
+ }
+
@Override
public String toString() {
StringBuilder sb = new StringBuilder("UnderReplicatedHealthResult{")
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
index daae24f7f2e..07d38c05dab 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
@@ -128,7 +128,7 @@ public int processAndSendCommands(
container.containerID(), replicas);
ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
- ReplicationManagerUtil.getExcludedAndUsedNodes(
+ ReplicationManagerUtil.getExcludedAndUsedNodes(container,
new ArrayList<>(replicas), Collections.emptySet(), pendingOps,
replicationManager);
List excludedNodes
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java
index f708ae1ead9..f491e2bd6f5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import java.util.List;
import java.util.Set;
@@ -130,6 +131,12 @@ && getReplicas().stream()
public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode,
NodeManager nodeManager) {
return super.isSufficientlyReplicated() &&
- super.getVulnerableUnhealthyReplicas(nodeManager).isEmpty();
+ super.getVulnerableUnhealthyReplicas(dn -> {
+ try {
+ return nodeManager.getNodeStatus(dn);
+ } catch (NodeNotFoundException e) {
+ return null;
+ }
+ }).isEmpty();
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
index 07a8f730ec0..04862e0d317 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
@@ -558,7 +558,15 @@ protected void processContainer(ContainerInfo container,
* match the container's Sequence ID.
*/
List vulnerableUnhealthy =
- replicaSet.getVulnerableUnhealthyReplicas(nodeManager);
+ replicaSet.getVulnerableUnhealthyReplicas(dn -> {
+ try {
+ return nodeManager.getNodeStatus(dn);
+ } catch (NodeNotFoundException e) {
+ LOG.warn("Exception for datanode {} while getting vulnerable replicas for container {}, with all " +
+ "replicas {}.", dn, container, replicas, e);
+ return null;
+ }
+ });
if (!vulnerableUnhealthy.isEmpty()) {
report.incrementAndSample(HealthState.UNDER_REPLICATED,
container.containerID());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java
index 70b2a444276..636b0e9589a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java
@@ -148,7 +148,7 @@ public int processAndSendCommands(
.collect(Collectors.toMap(Function.identity(), sources::contains)));
ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes
- = ReplicationManagerUtil.getExcludedAndUsedNodes(
+ = ReplicationManagerUtil.getExcludedAndUsedNodes(container,
new ArrayList(replicas), replicasToBeReplicated,
Collections.emptyList(), replicationManager);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
index bec3b1090e4..d23934184eb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult.OverReplicatedHealthResult;
import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult.UnderReplicatedHealthResult;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
import java.util.ArrayList;
import java.util.Collections;
@@ -34,6 +34,7 @@
import java.util.List;
import java.util.Set;
import java.util.UUID;
+import java.util.function.Function;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
@@ -423,9 +424,48 @@ public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode,
return isSufficientlyReplicated();
}
+ /**
+ * Checks if all replicas (except UNHEALTHY) on in-service nodes are in the
+ * same health state as the container. This is similar to what
+ * {@link ContainerReplicaCount#isHealthy()} does. The difference is in how
+ * both methods treat UNHEALTHY replicas.
+ *
+ * This method is the interface between the decommissioning flow and
+ * Replication Manager. Callers can use it to check whether replicas of a
+ * container are in the same state as the container before a datanode is
+ * taken offline.
+ *
+ *
+ * Note that this method's purpose is to only compare the replica state with
+ * the container state. It does not check if the container has sufficient
+ * number of replicas - that is the job of {@link ContainerReplicaCount
+ * #isSufficientlyReplicatedForOffline(DatanodeDetails, NodeManager)}.
+ * @return true if the container is healthy enough, which is determined by
+ * various checks
+ *
+ */
@Override
public boolean isHealthyEnoughForOffline() {
- return isHealthy();
+ long countInService = getReplicas().stream()
+ .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE)
+ .count();
+ if (countInService == 0) {
+ /*
+ Having no in-service nodes is unexpected and SCM shouldn't allow this
+ to happen in the first place. Return false here just to be safe.
+ */
+ return false;
+ }
+
+ HddsProtos.LifeCycleState containerState = getContainer().getState();
+ return (containerState == HddsProtos.LifeCycleState.CLOSED
+ || containerState == HddsProtos.LifeCycleState.QUASI_CLOSED)
+ && getReplicas().stream()
+ .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE)
+ .filter(r -> r.getState() !=
+ ContainerReplicaProto.State.UNHEALTHY)
+ .allMatch(r -> ReplicationManager.compareState(
+ containerState, r.getState()));
}
/**
@@ -435,14 +475,14 @@ public boolean isHealthyEnoughForOffline() {
* to save at least one copy of each such UNHEALTHY replica. This method
* finds such UNHEALTHY replicas.
*
- * @param nodeManager an instance of NodeManager
+ * @param nodeStatusFn a function used to check the {@link NodeStatus} of a node,
+ * accepting a {@link DatanodeDetails} and returning {@link NodeStatus}
* @return List of UNHEALTHY replicas with the greatest Sequence ID that
* need to be replicated to other nodes. Empty list if this container is not
* QUASI_CLOSED, doesn't have a mix of healthy and UNHEALTHY replicas, or
* if there are no replicas that need to be saved.
*/
- List getVulnerableUnhealthyReplicas(
- NodeManager nodeManager) {
+ public List getVulnerableUnhealthyReplicas(Function nodeStatusFn) {
if (container.getState() != HddsProtos.LifeCycleState.QUASI_CLOSED) {
// this method is only relevant for QUASI_CLOSED containers
return Collections.emptyList();
@@ -456,7 +496,7 @@ List getVulnerableUnhealthyReplicas(
}
if (replica.getSequenceId() == container.getSequenceId()) {
- if (replica.getState() == ContainerReplicaProto.State.UNHEALTHY) {
+ if (replica.getState() == ContainerReplicaProto.State.UNHEALTHY && !replica.isEmpty()) {
unhealthyReplicas.add(replica);
} else if (replica.getState() ==
ContainerReplicaProto.State.QUASI_CLOSED) {
@@ -474,20 +514,16 @@ List getVulnerableUnhealthyReplicas(
unhealthyReplicas.removeIf(
replica -> {
- try {
- return !nodeManager.getNodeStatus(replica.getDatanodeDetails())
- .isHealthy();
- } catch (NodeNotFoundException e) {
- return true;
- }
+ NodeStatus status = nodeStatusFn.apply(replica.getDatanodeDetails());
+ return status == null || !status.isHealthy();
});
/*
- At this point, the list of unhealthyReplicas contains all UNHEALTHY
+ At this point, the list of unhealthyReplicas contains all UNHEALTHY non-empty
replicas with the greatest Sequence ID that are on healthy Datanodes.
Note that this also includes multiple copies of the same UNHEALTHY
replica, that is, replicas with the same Origin ID. We need to consider
the fact that replicas can be uniquely unhealthy. That is, 2 UNHEALTHY
- replicas will difference Origin ID need not be exact copies of each other.
+ replicas with different Origin ID need not be exact copies of each other.
Replicas that don't have at least one instance (multiple instances of a
replica will have the same Origin ID) on an IN_SERVICE node are
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java
index 98c19d16ffc..4a823fb8eea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.container.replication;
+import com.google.common.collect.ImmutableList;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -98,6 +99,14 @@ public int processAndSendCommands(
new RatisContainerReplicaCount(containerInfo, replicas, pendingOps,
minHealthyForMaintenance, false);
+ if (result instanceof ContainerHealthResult.UnderReplicatedHealthResult) {
+ ContainerHealthResult.UnderReplicatedHealthResult
+ underReplicatedResult = (ContainerHealthResult.UnderReplicatedHealthResult) result;
+ if (underReplicatedResult.hasVulnerableUnhealthy()) {
+ return handleVulnerableUnhealthyReplicas(withUnhealthy, pendingOps);
+ }
+ }
+
// verify that this container is still under replicated and we don't have
// sufficient replication after considering pending adds
RatisContainerReplicaCount replicaCount =
@@ -151,6 +160,104 @@ public int processAndSendCommands(
return commandsSent;
}
+ /**
+ * Sends a replicate command for each replica specified in
+ * vulnerableUnhealthy.
+ * @param replicaCount RatisContainerReplicaCount for this container
+ * @param pendingOps List of pending ops
+ * @return number of replicate commands sent
+ */
+ private int handleVulnerableUnhealthyReplicas(RatisContainerReplicaCount replicaCount,
+ List pendingOps) throws NotLeaderException, CommandTargetOverloadedException, SCMException {
+ ContainerInfo container = replicaCount.getContainer();
+ List vulnerableUnhealthy = replicaCount.getVulnerableUnhealthyReplicas(dn -> {
+ try {
+ return replicationManager.getNodeStatus(dn);
+ } catch (NodeNotFoundException e) {
+ LOG.warn("Exception for datanode {} while handling vulnerable replicas for container {}, with all replicas" +
+ " {}.", dn, container, replicaCount.getReplicas(), e);
+ return null;
+ }
+ });
+ LOG.info("Handling vulnerable UNHEALTHY replicas {} for container {}.", vulnerableUnhealthy, container);
+
+ int pendingAdds = 0;
+ for (ContainerReplicaOp op : pendingOps) {
+ if (op.getOpType() == ContainerReplicaOp.PendingOpType.ADD) {
+ pendingAdds++;
+ }
+ }
+ if (pendingAdds >= vulnerableUnhealthy.size()) {
+ LOG.debug("There are {} pending adds for container {}, while the number of UNHEALTHY replicas is {}.",
+ pendingAdds, container.containerID(), vulnerableUnhealthy.size());
+ return 0;
+ }
+
+ /*
+ Since we're replicating UNHEALTHY replicas, it's possible that replication keeps on failing. Shuffling gives
+ other replicas a chance to be replicated since there's a limit on in-flight adds.
+ */
+ Collections.shuffle(vulnerableUnhealthy);
+ return replicateEachSource(replicaCount, vulnerableUnhealthy, pendingOps);
+ }
+
+ /**
+ * Replicates each of the ContainerReplica specified in sources to new
+ * Datanodes. Will not consider Datanodes hosting existing replicas and
+ * Datanodes pending adds as targets. Note that this method simply skips
+ * a replica if its datanode is overloaded with commands, throwing an
+ * exception once all sources have been looked at.
+ * @param replicaCount RatisContainerReplicaCount for this container
+ * @param sources List containing replicas, each will be replicated
+ */
+ private int replicateEachSource(RatisContainerReplicaCount replicaCount, List sources,
+ List pendingOps) throws NotLeaderException, SCMException, CommandTargetOverloadedException {
+ List allReplicas = replicaCount.getReplicas();
+ ContainerInfo container = replicaCount.getContainer();
+
+ /*
+ We use the placement policy to get a target Datanode to which a vulnerable replica will be replicated. In
+ placement policy terms, a 'used node' is a Datanode which has a legit replica of this container. An 'excluded
+ node' is a Datanode that should not be considered to host a replica of this container, but other Datanodes in this
+ Datanode's rack are available. So, Datanodes of any vulnerable replicas should be excluded nodes while Datanodes
+ of other replicas, including UNHEALTHY replicas that are not pending delete (because they have unique origin),
+ should be used nodes.
+ */
+ ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
+ ReplicationManagerUtil.getExcludedAndUsedNodes(container, allReplicas, Collections.emptySet(), pendingOps,
+ replicationManager);
+
+ CommandTargetOverloadedException firstException = null;
+ int numCommandsSent = 0;
+ for (ContainerReplica replica : sources) {
+ // find a target for each source and send replicate command
+ final List target =
+ ReplicationManagerUtil.getTargetDatanodes(placementPolicy, 1, excludedAndUsedNodes.getUsedNodes(),
+ excludedAndUsedNodes.getExcludedNodes(), currentContainerSize, container);
+ int count = 0;
+ try {
+ count = sendReplicationCommands(container, ImmutableList.of(replica.getDatanodeDetails()), target);
+ } catch (CommandTargetOverloadedException e) {
+ LOG.info("Exception while replicating {} to target {} for container {}.", replica, target, container, e);
+ if (firstException == null) {
+ firstException = e;
+ }
+ }
+
+ if (count == 1) {
+ // a command was sent to target, so it needs to be in the used nodes list because it's pending an add
+ excludedAndUsedNodes.getUsedNodes().add(target.get(0));
+ }
+ numCommandsSent += count;
+ }
+
+ if (firstException != null) {
+ throw firstException;
+ }
+
+ return numCommandsSent;
+ }
+
private void removeUnhealthyReplicaIfPossible(ContainerInfo containerInfo,
Set replicas, List pendingOps)
throws NotLeaderException {
@@ -337,7 +444,7 @@ private List getTargets(
replicaCount.getContainer().containerID(), replicaCount.getReplicas());
ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
- ReplicationManagerUtil.getExcludedAndUsedNodes(
+ ReplicationManagerUtil.getExcludedAndUsedNodes(replicaCount.getContainer(),
replicaCount.getReplicas(), Collections.emptySet(), pendingOps,
replicationManager);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index 3b9f66595f4..979cff799fa 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.hdds.scm.container.replication.health.QuasiClosedContainerHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.RatisReplicationCheckHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.RatisUnhealthyReplicationCheckHandler;
+import org.apache.hadoop.hdds.scm.container.replication.health.VulnerableUnhealthyReplicasHandler;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMService;
@@ -279,7 +280,8 @@ public ReplicationManager(final ConfigurationSource conf,
.addNext(ratisReplicationCheckHandler)
.addNext(new ClosedWithUnhealthyReplicasHandler(this))
.addNext(ecMisReplicationCheckHandler)
- .addNext(new RatisUnhealthyReplicationCheckHandler());
+ .addNext(new RatisUnhealthyReplicationCheckHandler())
+ .addNext(new VulnerableUnhealthyReplicasHandler(this));
start();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
index 076a81e69b5..3dcd6aa23ba 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
@@ -32,6 +32,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
+import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.UUID;
@@ -116,6 +117,7 @@ public static List getTargetDatanodes(PlacementPolicy policy,
* @return ExcludedAndUsedNodes object containing the excluded and used lists
*/
public static ExcludedAndUsedNodes getExcludedAndUsedNodes(
+ ContainerInfo container,
List replicas,
Set toBeRemoved,
List pendingReplicaOps,
@@ -123,12 +125,37 @@ public static ExcludedAndUsedNodes getExcludedAndUsedNodes(
List excludedNodes = new ArrayList<>();
List usedNodes = new ArrayList<>();
+ List nonUniqueUnhealthy = null;
+ if (container.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) {
+ /*
+ An UNHEALTHY replica with unique origin node id of a QUASI_CLOSED container should be a used node (not excluded
+ node) because we preserve it. The following code will find non-unique UNHEALTHY replicas. Later in the method
+ this list will be used to determine whether an UNHEALTHY replica's DN should be a used node or excluded node.
+ */
+ nonUniqueUnhealthy =
+ selectUnhealthyReplicasForDelete(container, new HashSet<>(replicas), 0, dn -> {
+ try {
+ return replicationManager.getNodeStatus(dn);
+ } catch (NodeNotFoundException e) {
+ LOG.warn("Exception for {} while selecting used and excluded nodes for container {}.", dn, container);
+ return null;
+ }
+ });
+ }
for (ContainerReplica r : replicas) {
if (r.getState() == ContainerReplicaProto.State.UNHEALTHY) {
- // Hosts with an Unhealthy replica cannot receive a new replica, but
- // they are not considered used as they will be removed later.
- excludedNodes.add(r.getDatanodeDetails());
- continue;
+ if (container.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) {
+ // any unique UNHEALTHY will get added as used nodes in the catch-all at the end of the loop
+ if (nonUniqueUnhealthy != null && nonUniqueUnhealthy.contains(r)) {
+ excludedNodes.add(r.getDatanodeDetails());
+ continue;
+ }
+ } else {
+ // Hosts with an UNHEALTHY replica (of a non QUASI_CLOSED container) cannot receive a new replica, but
+ // they are not considered used as they will be removed later.
+ excludedNodes.add(r.getDatanodeDetails());
+ continue;
+ }
}
if (toBeRemoved.contains(r)) {
// This node is currently present, but we plan to remove it so it is not
@@ -195,22 +222,8 @@ public List getUsedNodes() {
}
}
- /**
- * This is intended to be call when a container is under replicated, but there
- * are no spare nodes to create new replicas on, due to having too many
- * unhealthy replicas or quasi-closed replicas which cannot be closed due to
- * having a lagging sequence ID. The logic here will select a replica to
- * delete, or return null if there are none which can be safely deleted.
- *
- * @param containerInfo The container to select a replica to delete from
- * @param replicas The list of replicas for the container
- * @param pendingDeletes number pending deletes for this container
- * @return A replica to delete, or null if there are none which can be safely
- * deleted.
- */
- public static ContainerReplica selectUnhealthyReplicaForDelete(
- ContainerInfo containerInfo, Set replicas,
- int pendingDeletes, Function nodeStatusFn) {
+ public static List selectUnhealthyReplicasForDelete(ContainerInfo containerInfo,
+ Set replicas, int pendingDeletes, Function nodeStatusFn) {
if (pendingDeletes > 0) {
LOG.debug("Container {} has {} pending deletes which will free nodes.",
containerInfo, pendingDeletes);
@@ -261,18 +274,39 @@ public static ContainerReplica selectUnhealthyReplicaForDelete(
deleteCandidates.sort(
Comparator.comparingLong(ContainerReplica::getSequenceId));
if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) {
- return deleteCandidates.size() > 0 ? deleteCandidates.get(0) : null;
+ return deleteCandidates.size() > 0 ? deleteCandidates : null;
}
if (containerInfo.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) {
List nonUniqueOrigins =
findNonUniqueDeleteCandidates(replicas, deleteCandidates,
nodeStatusFn);
- return nonUniqueOrigins.size() > 0 ? nonUniqueOrigins.get(0) : null;
+ return nonUniqueOrigins.size() > 0 ? nonUniqueOrigins : null;
}
return null;
}
+ /**
+ * This is intended to be called when a container is under replicated, but there
+ * are no spare nodes to create new replicas on, due to having too many
+ * unhealthy replicas or quasi-closed replicas which cannot be closed due to
+ * having a lagging sequence ID. The logic here will select a replica to
+ * delete, or return null if there are none which can be safely deleted.
+ *
+ * @param containerInfo The container to select a replica to delete from
+ * @param replicas The list of replicas for the container
+ * @param pendingDeletes number pending deletes for this container
+ * @return A replica to delete, or null if there are none which can be safely
+ * deleted.
+ */
+ public static ContainerReplica selectUnhealthyReplicaForDelete(
+ ContainerInfo containerInfo, Set replicas,
+ int pendingDeletes, Function nodeStatusFn) {
+ List containerReplicas =
+ selectUnhealthyReplicasForDelete(containerInfo, replicas, pendingDeletes, nodeStatusFn);
+ return containerReplicas != null ? containerReplicas.get(0) : null;
+ }
+
/**
* Given a list of all replicas (including deleteCandidates), finds and
* returns replicas which don't have unique origin node IDs. This method
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java
new file mode 100644
index 00000000000..21b2d8151d2
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.replication.health;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult;
+import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+
+/**
+ * A QUASI_CLOSED container may have some UNHEALTHY replicas with the
+ * same Sequence ID as the container. RM should try to maintain one
+ * copy of such replicas when there are no healthy replicas that
+ * match the container's Sequence ID.
+ */
+public class VulnerableUnhealthyReplicasHandler extends AbstractCheck {
+ public static final Logger LOG = LoggerFactory.getLogger(VulnerableUnhealthyReplicasHandler.class);
+ private final ReplicationManager replicationManager;
+
+ public VulnerableUnhealthyReplicasHandler(ReplicationManager replicationManager) {
+ this.replicationManager = replicationManager;
+ }
+
+ /**
+ * Checks if the container is QUASI_CLOSED has some vulnerable UNHEALTHY replicas that need to replicated to
+ * other Datanodes. These replicas have the same sequence ID as the container while other healthy replicas don't.
+ * If the node hosting such a replica is being taken offline, then the replica may have to be replicated to another
+ * node.
+ * @param request ContainerCheckRequest object representing the container
+ * @return true if some vulnerable UNHEALTHY replicas were found, else false
+ */
+ @Override
+ public boolean handle(ContainerCheckRequest request) {
+ ContainerInfo container = request.getContainerInfo();
+ if (container.getReplicationType() != RATIS) {
+ // This handler is only for Ratis containers.
+ return false;
+ }
+ if (container.getState() != HddsProtos.LifeCycleState.QUASI_CLOSED) {
+ return false;
+ }
+ Set replicas = request.getContainerReplicas();
+ LOG.debug("Checking whether container {} with replicas {} has vulnerable UNHEALTHY replicas.", container, replicas);
+ RatisContainerReplicaCount replicaCount =
+ new RatisContainerReplicaCount(container, replicas, request.getPendingOps(), request.getMaintenanceRedundancy(),
+ true);
+
+ List vulnerableUnhealthy = replicaCount.getVulnerableUnhealthyReplicas(dn -> {
+ try {
+ return replicationManager.getNodeStatus(dn);
+ } catch (NodeNotFoundException e) {
+ LOG.warn("Exception for datanode {} while handling vulnerable replicas for container {}, with all replicas" +
+ " {}.", dn, container, replicaCount.getReplicas(), e);
+ return null;
+ }
+ });
+
+ if (!vulnerableUnhealthy.isEmpty()) {
+ LOG.info("Found vulnerable UNHEALTHY replicas {} for container {}.", vulnerableUnhealthy, container);
+ ReplicationManagerReport report = request.getReport();
+ report.incrementAndSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED, container.containerID());
+ if (!request.isReadOnly()) {
+ ContainerHealthResult.UnderReplicatedHealthResult underRepResult =
+ replicaCount.toUnderHealthResult();
+ underRepResult.setHasVulnerableUnhealthy(true);
+ request.getReplicationQueue().enqueue(underRepResult);
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index 455307c6be3..a7423a79dcc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -362,19 +362,7 @@ private boolean checkContainersReplicatedOnNode(DatanodeDetails dn)
continue;
}
- boolean isHealthy;
- /*
- If LegacyReplicationManager is enabled, then use the
- isHealthyEnoughForOffline API. ReplicationManager doesn't support this
- API yet.
- */
- boolean legacyEnabled = conf.getBoolean("hdds.scm.replication.enable" +
- ".legacy", false);
- if (legacyEnabled) {
- isHealthy = replicaSet.isHealthyEnoughForOffline();
- } else {
- isHealthy = replicaSet.isHealthy();
- }
+ boolean isHealthy = replicaSet.isHealthyEnoughForOffline();
if (!isHealthy) {
if (LOG.isDebugEnabled()) {
unClosedIDs.add(cid);
@@ -391,6 +379,8 @@ private boolean checkContainersReplicatedOnNode(DatanodeDetails dn)
// state, except for any which are unhealthy. As the container is closed, we can check
// if it is sufficiently replicated using replicationManager, but this only works if the
// legacy RM is not enabled.
+ boolean legacyEnabled = conf.getBoolean("hdds.scm.replication.enable" +
+ ".legacy", false);
boolean replicatedOK;
if (legacyEnabled) {
replicatedOK = replicaSet.isSufficientlyReplicatedForOffline(dn, nodeManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
index 17548bc5fef..dd7747e1271 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
@@ -551,6 +551,76 @@ public void testUnderReplicationDueToQuasiClosedReplicaWithWrongSequenceID()
command.getKey()));
}
+ /**
+ * A QUASI_CLOSED container may end up having UNHEALTHY replicas with the correct sequence ID, while none of the
+ * healthy replicas have the correct sequence ID. If any of these UNHEALTHY replicas is unique and is being taken
+ * offline, then it needs to be replicated to another DN for decommission to progress. This test asserts that a
+ * replicate command is sent for one such replica.
+ */
+ @Test
+ public void testUnderReplicationWithVulnerableReplicas() throws IOException {
+ final long sequenceID = 20;
+ container = ReplicationTestUtil.createContainerInfo(RATIS_REPLICATION_CONFIG, 1,
+ HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID);
+
+ final Set replicas = new HashSet<>(4);
+ for (int i = 0; i < 3; i++) {
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED,
+ sequenceID - 1));
+ }
+ final ContainerReplica unhealthyReplica = createContainerReplica(container.containerID(), 0,
+ DECOMMISSIONING, State.UNHEALTHY, sequenceID);
+ replicas.add(unhealthyReplica);
+ UnderReplicatedHealthResult result = getUnderReplicatedHealthResult();
+ Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true);
+
+ final Set>> commands = testProcessing(replicas, Collections.emptyList(),
+ result, 2, 1);
+ assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey());
+ }
+
+ /**
+ * In the push replication model, a replicate command is sent to the DN hosting the replica, and that DN is
+ * expected to "push" the replica to another DN. If the DN hosting the replica has too many commands already, an
+ * exception is thrown. This test asserts that other vulnerable UNHEALTHY replicas are still handled when an
+ * exception is caught for one of the replicas. Also asserts that the first thrown exception isn't lost and is
+ * actually rethrown once other replicas are processed, so that the container can be re-queued.
+ */
+ @Test
+ public void testUnderReplicationWithVulnerableReplicasAndTargetOverloadedException()
+ throws NotLeaderException, CommandTargetOverloadedException {
+ final long sequenceID = 20;
+ container = ReplicationTestUtil.createContainerInfo(RATIS_REPLICATION_CONFIG, 1,
+ HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID);
+
+ final Set replicas = new HashSet<>(5);
+ for (int i = 0; i < 3; i++) {
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED,
+ sequenceID - 1));
+ }
+
+ /*
+ Create 2 unhealthy vulnerable replicas. An exception is thrown for one of the replicas, but the other replica
+ should still be processed and 1 command should be sent.
+ */
+ final ContainerReplica unhealthyReplica = createContainerReplica(container.containerID(), 0,
+ DECOMMISSIONING, State.UNHEALTHY, sequenceID);
+ final ContainerReplica unhealthyReplica2 = createContainerReplica(container.containerID(), 0,
+ ENTERING_MAINTENANCE, State.UNHEALTHY, sequenceID);
+ replicas.add(unhealthyReplica);
+ replicas.add(unhealthyReplica2);
+ UnderReplicatedHealthResult result = getUnderReplicatedHealthResult();
+ Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true);
+ ReplicationTestUtil.mockRMSendThrottleReplicateCommand(replicationManager, commandsSent, new AtomicBoolean(true));
+
+ RatisUnderReplicationHandler handler = new RatisUnderReplicationHandler(policy, conf, replicationManager);
+ assertThrows(CommandTargetOverloadedException.class, () -> handler.processAndSendCommands(replicas,
+ Collections.emptyList(), result, 2));
+ assertEquals(1, commandsSent.size());
+ DatanodeDetails dn = commandsSent.iterator().next().getKey();
+ assertTrue(unhealthyReplica.getDatanodeDetails().equals(dn) || unhealthyReplica2.getDatanodeDetails().equals(dn));
+ }
+
@Test
public void testOnlyQuasiClosedReplicaWithWrongSequenceIdIsAvailable()
throws IOException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index a9093778793..32463a5a6eb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.container.replication;
+import com.google.common.collect.ImmutableList;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -91,6 +92,7 @@
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
/**
@@ -444,6 +446,63 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnUniqueOrigin()
assertEquals(0, repQueue.overReplicatedQueueSize());
}
+ @Test
+ public void testQuasiClosedContainerWithVulnerableUnhealthyReplica()
+ throws IOException, NodeNotFoundException {
+ RatisReplicationConfig ratisRepConfig =
+ RatisReplicationConfig.getInstance(THREE);
+ long sequenceID = 10;
+ ContainerInfo container = createContainerInfo(ratisRepConfig, 1,
+ HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID);
+
+ // this method creates replicas with same origin id and zero sequence id
+ Set replicas =
+ createReplicasWithSameOrigin(container.containerID(),
+ ContainerReplicaProto.State.QUASI_CLOSED, 0, 0, 0);
+ replicas.add(createContainerReplica(container.containerID(), 0,
+ IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY, sequenceID));
+ ContainerReplica decommissioning =
+ createContainerReplica(container.containerID(), 0, DECOMMISSIONING,
+ ContainerReplicaProto.State.UNHEALTHY, sequenceID);
+ replicas.add(decommissioning);
+ storeContainerAndReplicas(container, replicas);
+ Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
+ .thenAnswer(invocation -> {
+ DatanodeDetails dn = invocation.getArgument(0);
+ if (dn.equals(decommissioning.getDatanodeDetails())) {
+ return new NodeStatus(DECOMMISSIONING, HddsProtos.NodeState.HEALTHY);
+ }
+
+ return NodeStatus.inServiceHealthy();
+ });
+
+ replicationManager.processContainer(container, repQueue, repReport);
+ assertEquals(1, repReport.getStat(
+ ReplicationManagerReport.HealthState.UNDER_REPLICATED));
+ assertEquals(0, repReport.getStat(
+ ReplicationManagerReport.HealthState.OVER_REPLICATED));
+ assertEquals(1, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+
+ Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(),
+ anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails()));
+ Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any()))
+ .thenAnswer(invocation -> {
+ Map map = new HashMap<>();
+ map.put(SCMCommandProto.Type.replicateContainerCommand, 0);
+ map.put(SCMCommandProto.Type.reconstructECContainersCommand, 0);
+ return map;
+ });
+ RatisUnderReplicationHandler handler =
+ new RatisUnderReplicationHandler(ratisPlacementPolicy, configuration, replicationManager);
+
+ handler.processAndSendCommands(replicas, Collections.emptyList(), repQueue.dequeueUnderReplicatedContainer(), 2);
+ assertEquals(1, commandsSent.size());
+ Pair> command = commandsSent.iterator().next();
+ assertEquals(SCMCommandProto.Type.replicateContainerCommand, command.getValue().getType());
+ assertEquals(decommissioning.getDatanodeDetails().getUuid(), command.getKey());
+ }
+
/**
* When there is Quasi Closed Replica with incorrect sequence id
* for a Closed container, it's treated as unhealthy and deleted.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
index 3b81db7767c..c68130e79ee 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
@@ -17,11 +17,13 @@
*/
package org.apache.hadoop.hdds.scm.container.replication;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -37,6 +39,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainer;
import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerReplica;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -55,7 +58,9 @@ public void setup() {
@Test
public void testGetExcludedAndUsedNodes() throws NodeNotFoundException {
- ContainerID cid = ContainerID.valueOf(1L);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED,
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
+ ContainerID cid = container.containerID();
Set replicas = new HashSet<>();
ContainerReplica good = createContainerReplica(cid, 0,
IN_SERVICE, ContainerReplicaProto.State.CLOSED, 1);
@@ -108,7 +113,7 @@ public void testGetExcludedAndUsedNodes() throws NodeNotFoundException {
});
ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
- ReplicationManagerUtil.getExcludedAndUsedNodes(
+ ReplicationManagerUtil.getExcludedAndUsedNodes(container,
new ArrayList<>(replicas), toBeRemoved, pending,
replicationManager);
@@ -131,4 +136,89 @@ public void testGetExcludedAndUsedNodes() throws NodeNotFoundException {
.contains(pendingDelete));
}
+ @Test
+ public void testGetUsedAndExcludedNodesForQuasiClosedContainer() throws NodeNotFoundException {
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.QUASI_CLOSED,
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
+ ContainerID cid = container.containerID();
+ Set replicas = new HashSet<>();
+ ContainerReplica good = createContainerReplica(cid, 0, IN_SERVICE,
+ ContainerReplicaProto.State.QUASI_CLOSED, 1);
+ replicas.add(good);
+
+ ContainerReplica remove = createContainerReplica(cid, 0,
+ IN_SERVICE, ContainerReplicaProto.State.QUASI_CLOSED, 1);
+ replicas.add(remove);
+ Set toBeRemoved = new HashSet<>();
+ toBeRemoved.add(remove);
+
+ // this replica should be on the used nodes list
+ ContainerReplica unhealthyWithUniqueOrigin = createContainerReplica(
+ cid, 0, IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY, 1);
+ replicas.add(unhealthyWithUniqueOrigin);
+
+ // this one should be on the excluded nodes list
+ ContainerReplica unhealthyWithNonUniqueOrigin = createContainerReplica(cid, 0, IN_SERVICE,
+ ContainerReplicaProto.State.UNHEALTHY, container.getNumberOfKeys(), container.getUsedBytes(),
+ MockDatanodeDetails.randomDatanodeDetails(), good.getOriginDatanodeId());
+ replicas.add(unhealthyWithNonUniqueOrigin);
+
+ ContainerReplica decommissioning =
+ createContainerReplica(cid, 0,
+ DECOMMISSIONING, ContainerReplicaProto.State.QUASI_CLOSED, 1);
+ replicas.add(decommissioning);
+
+ ContainerReplica maintenance =
+ createContainerReplica(cid, 0,
+ IN_MAINTENANCE, ContainerReplicaProto.State.QUASI_CLOSED, 1);
+ replicas.add(maintenance);
+
+ // Finally, add a pending add and delete. The add should go onto the used
+ // list and the delete added to the excluded nodes.
+ DatanodeDetails pendingAdd = MockDatanodeDetails.randomDatanodeDetails();
+ DatanodeDetails pendingDelete = MockDatanodeDetails.randomDatanodeDetails();
+ List pending = new ArrayList<>();
+ pending.add(ContainerReplicaOp.create(
+ ContainerReplicaOp.PendingOpType.ADD, pendingAdd, 0));
+ pending.add(ContainerReplicaOp.create(
+ ContainerReplicaOp.PendingOpType.DELETE, pendingDelete, 0));
+
+ Mockito.when(replicationManager.getNodeStatus(Mockito.any())).thenAnswer(
+ invocation -> {
+ final DatanodeDetails dn = invocation.getArgument(0);
+ for (ContainerReplica r : replicas) {
+ if (r.getDatanodeDetails().equals(dn)) {
+ return new NodeStatus(
+ r.getDatanodeDetails().getPersistedOpState(),
+ HddsProtos.NodeState.HEALTHY);
+ }
+ }
+ throw new NodeNotFoundException(dn.getUuidString());
+ });
+
+ ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
+ ReplicationManagerUtil.getExcludedAndUsedNodes(container,
+ new ArrayList<>(replicas), toBeRemoved, pending,
+ replicationManager);
+
+ assertEquals(4, excludedAndUsedNodes.getUsedNodes().size());
+ assertTrue(excludedAndUsedNodes.getUsedNodes()
+ .contains(good.getDatanodeDetails()));
+ assertTrue(excludedAndUsedNodes.getUsedNodes()
+ .contains(maintenance.getDatanodeDetails()));
+ assertTrue(excludedAndUsedNodes.getUsedNodes()
+ .contains(pendingAdd));
+ assertTrue(excludedAndUsedNodes.getUsedNodes().contains(unhealthyWithUniqueOrigin.getDatanodeDetails()));
+
+ assertEquals(4, excludedAndUsedNodes.getExcludedNodes().size());
+ assertTrue(excludedAndUsedNodes.getExcludedNodes()
+ .contains(unhealthyWithNonUniqueOrigin.getDatanodeDetails()));
+ assertTrue(excludedAndUsedNodes.getExcludedNodes()
+ .contains(decommissioning.getDatanodeDetails()));
+ assertTrue(excludedAndUsedNodes.getExcludedNodes()
+ .contains(remove.getDatanodeDetails()));
+ assertTrue(excludedAndUsedNodes.getExcludedNodes()
+ .contains(pendingDelete));
+ }
+
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
new file mode 100644
index 00000000000..72a89f02862
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.replication.health;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationQueue;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerInfo;
+import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerReplica;
+import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createReplicas;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Tests for {@link VulnerableUnhealthyReplicasHandler}.
+ */
+public class TestVulnerableUnhealthyReplicasHandler {
+ private ReplicationManager replicationManager;
+ private ReplicationConfig repConfig;
+ private ReplicationQueue repQueue;
+ private ContainerCheckRequest.Builder requestBuilder;
+ private ReplicationManagerReport report;
+ private VulnerableUnhealthyReplicasHandler handler;
+
+ @BeforeEach
+ public void setup() throws NodeNotFoundException {
+ replicationManager = Mockito.mock(ReplicationManager.class);
+ handler = new VulnerableUnhealthyReplicasHandler(replicationManager);
+ repConfig = RatisReplicationConfig.getInstance(THREE);
+ repQueue = new ReplicationQueue();
+ report = new ReplicationManagerReport();
+ requestBuilder = new ContainerCheckRequest.Builder()
+ .setReplicationQueue(repQueue)
+ .setMaintenanceRedundancy(2)
+ .setPendingOps(Collections.emptyList())
+ .setReport(report);
+
+ Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ .thenReturn(NodeStatus.inServiceHealthy());
+ }
+
+ @Test
+ public void testReturnsFalseForECContainer() {
+ ContainerInfo container = createContainerInfo(new ECReplicationConfig(3, 2));
+ Set replicas = createReplicas(container.containerID(), 1, 2, 3, 4);
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertFalse(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testReturnsFalseForClosedContainer() {
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.CLOSED);
+ Set replicas = createReplicas(container.containerID(), 0, 0, 0);
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertFalse(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testReturnsFalseForQuasiClosedContainerWithNoUnhealthyReplicas() {
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED);
+ Set replicas = createReplicas(container.containerID(), State.QUASI_CLOSED, 0, 0, 0);
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertFalse(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testReturnsFalseForQuasiClosedContainerWithNoVulnerableReplicas() {
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED);
+ Set replicas = createReplicas(container.containerID(), 0, 0, 0);
+ // create UNHEALTHY replica with unique origin id on an IN_SERVICE node
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.UNHEALTHY));
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertFalse(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplica() throws NodeNotFoundException {
+ long sequenceId = 10;
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId);
+ Set replicas = new HashSet<>(4);
+ for (int i = 0; i < 3; i++) {
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED,
+ container.getSequenceId() - 1));
+ }
+ // create UNHEALTHY replica with unique origin id on a DECOMMISSIONING node
+ ContainerReplica unhealthy =
+ createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId);
+ replicas.add(unhealthy);
+ Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ .thenAnswer(invocation -> {
+ DatanodeDetails dn = invocation.getArgument(0);
+ if (dn.equals(unhealthy.getDatanodeDetails())) {
+ return new NodeStatus(DECOMMISSIONING, HEALTHY);
+ }
+ return NodeStatus.inServiceHealthy();
+ });
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertTrue(handler.handle(requestBuilder.build()));
+ assertEquals(1, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testReturnsFalseForVulnerableReplicaWithAnotherCopy() throws NodeNotFoundException {
+ long sequenceId = 10;
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId);
+ Set replicas = new HashSet<>(4);
+ for (int i = 0; i < 3; i++) {
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED,
+ container.getSequenceId() - 1));
+ }
+ // create UNHEALTHY replica with a non-unique origin id on a DECOMMISSIONING node
+ ContainerReplica unhealthy =
+ createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId);
+ replicas.add(unhealthy);
+ Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ .thenAnswer(invocation -> {
+ DatanodeDetails dn = invocation.getArgument(0);
+ if (dn.equals(unhealthy.getDatanodeDetails())) {
+ return new NodeStatus(DECOMMISSIONING, HEALTHY);
+ }
+ return NodeStatus.inServiceHealthy();
+ });
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.UNHEALTHY,
+ container.getNumberOfKeys(), container.getUsedBytes(), MockDatanodeDetails.randomDatanodeDetails(),
+ unhealthy.getOriginDatanodeId(), container.getSequenceId()));
+ requestBuilder.setContainerReplicas(replicas).setContainerInfo(container);
+
+ assertFalse(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+
+ @Test
+ public void testDoesNotEnqueueForReadOnlyRequest() throws NodeNotFoundException {
+ long sequenceId = 10;
+ ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId);
+ Set replicas = new HashSet<>(4);
+ for (int i = 0; i < 3; i++) {
+ replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED,
+ container.getSequenceId() - 1));
+ }
+ // create UNHEALTHY replica with unique origin id on a DECOMMISSIONING node
+ ContainerReplica unhealthy =
+ createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId);
+ replicas.add(unhealthy);
+ Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ .thenAnswer(invocation -> {
+ DatanodeDetails dn = invocation.getArgument(0);
+ if (dn.equals(unhealthy.getDatanodeDetails())) {
+ return new NodeStatus(DECOMMISSIONING, HEALTHY);
+ }
+ return NodeStatus.inServiceHealthy();
+ });
+ requestBuilder.setContainerReplicas(replicas)
+ .setContainerInfo(container)
+ .setReadOnly(true);
+
+ assertTrue(handler.handle(requestBuilder.build()));
+ assertEquals(0, repQueue.underReplicatedQueueSize());
+ assertEquals(0, repQueue.overReplicatedQueueSize());
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
index 4433c0cb6f2..4ff937f98c5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
@@ -193,7 +193,7 @@ public static void mockGetContainerReplicaCountForEC(
mockCheckContainerState(repManager, underReplicated);
}
- private static void mockCheckContainerState(ReplicationManager repManager, boolean underReplicated)
+ static void mockCheckContainerState(ReplicationManager repManager, boolean underReplicated)
throws ContainerNotFoundException {
Mockito.when(repManager.checkContainerStatus(Mockito.any(ContainerInfo.class),
Mockito.any(ReplicationManagerReport.class)))
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 4b389fbcf2f..17107cfa958 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.replication.LegacyRatisContainerReplicaCount;
+import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil;
@@ -298,6 +299,80 @@ public void testDecommissionWaitsForUnhealthyReplicaToReplicate()
nodeManager.getNodeStatus(dn1).getOperationalState());
}
+ /**
+ * Situation: A QUASI_CLOSED container has an UNHEALTHY replica with the
+ * greatest BCSID, and three QUASI_CLOSED replicas with a smaller BCSID. The
+ * UNHEALTHY container is on a decommissioning node, and there are no other
+ * copies of this replica, that is, replicas with the same Origin ID as
+ * this replica.
+ *
+ * Expectation: Decommissioning should not complete until the UNHEALTHY
+ * replica has been replicated to another node.
+ */
+ @Test
+ public void testDecommissionWaitsForUnhealthyReplicaToReplicateNewRM()
+ throws NodeNotFoundException, ContainerNotFoundException {
+ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ // create 3 QUASI_CLOSED replicas with containerID 1 and same origin ID
+ ContainerID containerID = ContainerID.valueOf(1);
+ Set replicas =
+ ReplicationTestUtil.createReplicasWithSameOrigin(containerID,
+ State.QUASI_CLOSED, 0, 0, 0);
+
+ // the container's sequence id is greater than the healthy replicas'
+ ContainerInfo container = ReplicationTestUtil.createContainerInfo(
+ RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.THREE), containerID.getId(),
+ HddsProtos.LifeCycleState.QUASI_CLOSED,
+ replicas.iterator().next().getSequenceId() + 1);
+ // UNHEALTHY replica is on a unique origin and has same sequence id as
+ // the container
+ ContainerReplica unhealthy =
+ ReplicationTestUtil.createContainerReplica(containerID, 0,
+ dn1.getPersistedOpState(), State.UNHEALTHY,
+ container.getNumberOfKeys(), container.getUsedBytes(), dn1,
+ dn1.getUuid(), container.getSequenceId());
+ replicas.add(unhealthy);
+ nodeManager.setContainers(dn1, ImmutableSet.of(containerID));
+
+ Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ .thenReturn(new RatisContainerReplicaCount(container, replicas,
+ Collections.emptyList(), 2, false));
+ DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true);
+
+ // start monitoring dn1
+ monitor.startMonitoring(dn1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+
+ // Running the monitor again causes it to remain DECOMMISSIONING
+ // as nothing has changed.
+ monitor.run();
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+
+ // add a copy of the UNHEALTHY replica on a new node, dn1 should get
+ // decommissioned now
+ ContainerReplica copyOfUnhealthyOnNewNode = unhealthy.toBuilder()
+ .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
+ .build();
+ replicas.add(copyOfUnhealthyOnNewNode);
+ Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ .thenReturn(new RatisContainerReplicaCount(container, replicas,
+ Collections.emptyList(), 2, false));
+ DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
/**
* Consider a QUASI_CLOSED container with only UNHEALTHY replicas. If one
* of its nodes is decommissioned, the decommissioning should succeed.
From cd0c55e4ffa879b3e7cffe182237f0e1c824a7c0 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Wed, 20 Dec 2023 08:47:59 +0100
Subject: [PATCH 14/28] HDDS-9828. Do not use Files.createTempFile in tests
(#5824)
---
.../keyvalue/helpers/TestChunkUtils.java | 188 ++++++++----------
.../security/symmetric/TestLocalKeyStore.java | 18 +-
.../hdds/utils/TestNativeLibraryLoader.java | 12 +-
.../managed/TestManagedSSTDumpIterator.java | 11 +-
.../util/TestManagedSstFileReader.java | 15 +-
.../ha/TestInterSCMGrpcProtocolService.java | 8 +-
.../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 29 +--
.../hdds/scm/TestSCMDbCheckpointServlet.java | 120 +++++------
.../ozone/om/TestOMDbCheckpointServlet.java | 35 ++--
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 8 +-
.../ratis/TestOzoneManagerRatisRequest.java | 5 +-
.../om/service/TestRangerBGSyncService.java | 3 +-
.../om/snapshot/TestSnapshotDiffManager.java | 11 +-
13 files changed, 234 insertions(+), 229 deletions(-)
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
index 037de863c00..bda8b7d5a9a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
@@ -28,7 +28,6 @@
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
-import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutorService;
@@ -55,22 +54,25 @@
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests for {@link ChunkUtils}.
*/
-public class TestChunkUtils {
+class TestChunkUtils {
private static final Logger LOG =
LoggerFactory.getLogger(TestChunkUtils.class);
- private static final String PREFIX = TestChunkUtils.class.getSimpleName();
private static final int BUFFER_CAPACITY = 1 << 20;
private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10;
private static final Random RANDOM = new Random();
+ @TempDir
+ private Path tempDir;
+
static ChunkBuffer readData(File file, long off, long len)
throws StorageContainerException {
LOG.info("off={}, len={}", off, len);
@@ -79,130 +81,112 @@ static ChunkBuffer readData(File file, long off, long len)
}
@Test
- public void concurrentReadOfSameFile() throws Exception {
+ void concurrentReadOfSameFile() throws Exception {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = Files.createTempFile(PREFIX, "concurrent");
- try {
- int len = data.limit();
- int offset = 0;
- File file = tempFile.toFile();
- ChunkUtils.writeData(file, data, offset, len, null, true);
- int threads = 10;
- ExecutorService executor = new ThreadPoolExecutor(threads, threads,
- 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
- AtomicInteger processed = new AtomicInteger();
- AtomicBoolean failed = new AtomicBoolean();
- for (int i = 0; i < threads; i++) {
- final int threadNumber = i;
- executor.execute(() -> {
- try {
- final ChunkBuffer chunk = readData(file, offset, len);
- // There should be only one element in readBuffers
- final List buffers = chunk.asByteBufferList();
- Assertions.assertEquals(1, buffers.size());
- final ByteBuffer readBuffer = buffers.get(0);
-
- LOG.info("Read data ({}): {}", threadNumber,
- new String(readBuffer.array(), UTF_8));
- if (!Arrays.equals(array, readBuffer.array())) {
- failed.set(true);
- }
- assertEquals(len, readBuffer.remaining());
- } catch (Exception e) {
- LOG.error("Failed to read data ({})", threadNumber, e);
+ Path tempFile = tempDir.resolve("concurrent");
+ int len = data.limit();
+ int offset = 0;
+ File file = tempFile.toFile();
+ ChunkUtils.writeData(file, data, offset, len, null, true);
+ int threads = 10;
+ ExecutorService executor = new ThreadPoolExecutor(threads, threads,
+ 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
+ AtomicInteger processed = new AtomicInteger();
+ AtomicBoolean failed = new AtomicBoolean();
+ for (int i = 0; i < threads; i++) {
+ final int threadNumber = i;
+ executor.execute(() -> {
+ try {
+ final ChunkBuffer chunk = readData(file, offset, len);
+ // There should be only one element in readBuffers
+ final List buffers = chunk.asByteBufferList();
+ Assertions.assertEquals(1, buffers.size());
+ final ByteBuffer readBuffer = buffers.get(0);
+
+ LOG.info("Read data ({}): {}", threadNumber,
+ new String(readBuffer.array(), UTF_8));
+ if (!Arrays.equals(array, readBuffer.array())) {
failed.set(true);
}
- processed.incrementAndGet();
- });
- }
- try {
- GenericTestUtils.waitFor(() -> processed.get() == threads,
- 100, (int) TimeUnit.SECONDS.toMillis(5));
- } finally {
- executor.shutdownNow();
- }
- assertFalse(failed.get());
+ assertEquals(len, readBuffer.remaining());
+ } catch (Exception e) {
+ LOG.error("Failed to read data ({})", threadNumber, e);
+ failed.set(true);
+ }
+ processed.incrementAndGet();
+ });
+ }
+ try {
+ GenericTestUtils.waitFor(() -> processed.get() == threads,
+ 100, (int) TimeUnit.SECONDS.toMillis(5));
} finally {
- Files.deleteIfExists(tempFile);
+ executor.shutdownNow();
}
+ assertFalse(failed.get());
}
@Test
- public void concurrentProcessing() throws Exception {
+ void concurrentProcessing() throws Exception {
final int perThreadWait = 1000;
final int maxTotalWait = 5000;
int threads = 20;
- List paths = new LinkedList<>();
+ ExecutorService executor = new ThreadPoolExecutor(threads, threads,
+ 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
+ AtomicInteger processed = new AtomicInteger();
+ for (int i = 0; i < threads; i++) {
+ Path path = tempDir.resolve(String.valueOf(i));
+ executor.execute(() -> {
+ try {
+ ChunkUtils.processFileExclusively(path, () -> {
+ try {
+ Thread.sleep(perThreadWait);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ processed.incrementAndGet();
+ return null;
+ });
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ });
+ }
try {
- ExecutorService executor = new ThreadPoolExecutor(threads, threads,
- 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
- AtomicInteger processed = new AtomicInteger();
- for (int i = 0; i < threads; i++) {
- Path path = Files.createTempFile(PREFIX, String.valueOf(i));
- paths.add(path);
- executor.execute(() -> {
- try {
- ChunkUtils.processFileExclusively(path, () -> {
- try {
- Thread.sleep(perThreadWait);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- processed.incrementAndGet();
- return null;
- });
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- });
- }
- try {
- GenericTestUtils.waitFor(() -> processed.get() == threads,
- 100, maxTotalWait);
- } finally {
- executor.shutdownNow();
- }
+ GenericTestUtils.waitFor(() -> processed.get() == threads,
+ 100, maxTotalWait);
} finally {
- for (Path path : paths) {
- FileUtils.deleteQuietly(path.toFile());
- }
+ executor.shutdownNow();
}
}
@Test
- public void serialRead() throws Exception {
+ void serialRead() throws IOException {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = Files.createTempFile(PREFIX, "serial");
- try {
- File file = tempFile.toFile();
- int len = data.limit();
- int offset = 0;
- ChunkUtils.writeData(file, data, offset, len, null, true);
-
- final ChunkBuffer chunk = readData(file, offset, len);
- // There should be only one element in readBuffers
- final List buffers = chunk.asByteBufferList();
- Assertions.assertEquals(1, buffers.size());
- final ByteBuffer readBuffer = buffers.get(0);
-
- assertArrayEquals(array, readBuffer.array());
- assertEquals(len, readBuffer.remaining());
- } catch (Exception e) {
- LOG.error("Failed to read data", e);
- } finally {
- Files.deleteIfExists(tempFile);
- }
+ Path tempFile = tempDir.resolve("serial");
+ File file = tempFile.toFile();
+ int len = data.limit();
+ int offset = 0;
+ ChunkUtils.writeData(file, data, offset, len, null, true);
+
+ final ChunkBuffer chunk = readData(file, offset, len);
+ // There should be only one element in readBuffers
+ final List buffers = chunk.asByteBufferList();
+ Assertions.assertEquals(1, buffers.size());
+ final ByteBuffer readBuffer = buffers.get(0);
+
+ assertArrayEquals(array, readBuffer.array());
+ assertEquals(len, readBuffer.remaining());
}
@Test
- public void validateChunkForOverwrite() throws IOException {
+ void validateChunkForOverwrite() throws IOException {
- Path tempFile = Files.createTempFile(PREFIX, "overwrite");
+ Path tempFile = tempDir.resolve("overwrite");
FileUtils.write(tempFile.toFile(), "test", UTF_8);
Assertions.assertTrue(
@@ -226,7 +210,7 @@ public void validateChunkForOverwrite() throws IOException {
}
@Test
- public void readMissingFile() {
+ void readMissingFile() {
// given
int len = 123;
int offset = 0;
@@ -242,7 +226,7 @@ public void readMissingFile() {
}
@Test
- public void testReadData() throws Exception {
+ void testReadData() throws Exception {
final File dir = GenericTestUtils.getTestDir("testReadData");
try {
Assertions.assertTrue(dir.mkdirs());
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
index b5c717399d0..393a0c5f011 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
@@ -21,6 +21,7 @@
import com.google.common.collect.ImmutableList;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -53,17 +54,20 @@
/**
* Test cases for {@link LocalSecretKeyStore}.
*/
-public class TestLocalKeyStore {
+class TestLocalKeyStore {
private SecretKeyStore secretKeyStore;
private Path testSecretFile;
+ @TempDir
+ private Path tempDir;
+
@BeforeEach
- private void setup() throws Exception {
- testSecretFile = Files.createTempFile("key-strore-test", ".json");
+ void setup() throws IOException {
+ testSecretFile = Files.createFile(tempDir.resolve("key-store-test.json"));
secretKeyStore = new LocalSecretKeyStore(testSecretFile);
}
- public static Stream saveAndLoadTestCases() throws Exception {
+ static Stream saveAndLoadTestCases() throws Exception {
return Stream.of(
// empty
Arguments.of(ImmutableList.of()),
@@ -81,7 +85,7 @@ public static Stream saveAndLoadTestCases() throws Exception {
@ParameterizedTest
@MethodSource("saveAndLoadTestCases")
- public void testSaveAndLoad(List keys) throws IOException {
+ void testSaveAndLoad(List keys) throws IOException {
secretKeyStore.save(keys);
// Ensure the intended file exists and is readable and writeable to
@@ -100,7 +104,7 @@ public void testSaveAndLoad(List keys) throws IOException {
* Verifies that secret keys are overwritten by subsequent writes.
*/
@Test
- public void testOverwrite() throws Exception {
+ void testOverwrite() throws Exception {
List initialKeys =
newArrayList(generateKey("HmacSHA256"));
secretKeyStore.save(initialKeys);
@@ -123,7 +127,7 @@ public void testOverwrite() throws Exception {
* test fails, instead, analyse the backward-compatibility of the change.
*/
@Test
- public void testLoadExistingFile() throws Exception {
+ void testLoadExistingFile() throws Exception {
// copy test file content to the backing file.
String testJson = "[\n" +
" {\n" +
diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
index 472954f2bd5..24218c5687e 100644
--- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
+++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
@@ -20,6 +20,7 @@
import org.apache.ozone.test.tag.Native;
import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.MockedStatic;
@@ -27,7 +28,7 @@
import java.io.ByteArrayInputStream;
import java.io.File;
-import java.io.IOException;
+import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
@@ -42,10 +43,11 @@
*/
public class TestNativeLibraryLoader {
- private static Stream nativeLibraryDirectoryLocations()
- throws IOException {
- return Stream.of("", File.createTempFile("prefix", "suffix")
- .getParentFile().getAbsolutePath(), null);
+ @TempDir
+ private static Path tempDir;
+
+ private static Stream nativeLibraryDirectoryLocations() {
+ return Stream.of("", tempDir.toAbsolutePath().toString(), null);
}
@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)
diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
index 99d2a6ced59..505d68d9413 100644
--- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
+++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
@@ -28,6 +28,7 @@
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Named;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -40,6 +41,8 @@
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -61,10 +64,12 @@
*/
class TestManagedSSTDumpIterator {
+ @TempDir
+ private Path tempDir;
+
private File createSSTFileWithKeys(
TreeMap, String> keys) throws Exception {
- File file = File.createTempFile("tmp_sst_file", ".sst");
- file.deleteOnExit();
+ File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile();
try (ManagedEnvOptions envOptions = new ManagedEnvOptions();
ManagedOptions managedOptions = new ManagedOptions();
ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(
@@ -252,7 +257,7 @@ public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes)
ByteArrayInputStream byteArrayInputStream =
new ByteArrayInputStream(inputBytes);
ManagedSSTDumpTool tool = Mockito.mock(ManagedSSTDumpTool.class);
- File file = File.createTempFile("tmp", ".sst");
+ File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile();
Future future = Mockito.mock(Future.class);
Mockito.when(future.isDone()).thenReturn(false);
Mockito.when(future.get()).thenReturn(0);
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
index 8c897b01d2e..588e54ad8b3 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
@@ -31,6 +31,7 @@
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.rocksdb.RocksDBException;
@@ -47,6 +48,7 @@
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
@@ -58,6 +60,11 @@
*/
class TestManagedSstFileReader {
+ @TempDir
+ private File tempDir;
+
+ private final AtomicInteger fileCounter = new AtomicInteger();
+
// Key prefix containing all characters, to check if all characters can be
// written & read from rocksdb through SSTDumptool
private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
@@ -65,9 +72,8 @@ class TestManagedSstFileReader {
.collect(Collectors.joining(""));
private String createRandomSSTFile(TreeMap keys)
- throws IOException, RocksDBException {
- File file = File.createTempFile("tmp_sst_file", ".sst");
- file.deleteOnExit();
+ throws RocksDBException {
+ File file = new File(tempDir, "tmp_sst_file" + fileCounter.incrementAndGet() + ".sst");
try (ManagedOptions managedOptions = new ManagedOptions();
ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions();
@@ -84,6 +90,7 @@ private String createRandomSSTFile(TreeMap keys)
}
sstFileWriter.finish();
}
+ Assertions.assertTrue(file.exists());
return file.getAbsolutePath();
}
@@ -142,7 +149,7 @@ public void testGetKeyStream(int numberOfFiles)
new ManagedSstFileReader(files).getKeyStream(
lowerBound.orElse(null), upperBound.orElse(null))) {
keyStream.forEach(key -> {
- Assertions.assertEquals(keysInBoundary.get(key), 1);
+ Assertions.assertEquals(1, keysInBoundary.get(key));
Assertions.assertNotNull(keysInBoundary.remove(key));
});
keysInBoundary.values()
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
index f966f1b65ba..95b6abc04ac 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
@@ -72,7 +72,7 @@
*
* @see HDDS-8901
*/
-public class TestInterSCMGrpcProtocolService {
+class TestInterSCMGrpcProtocolService {
private static final String CP_FILE_NAME = "cpFile";
private static final String CP_CONTENTS = "Hello world!";
@@ -89,7 +89,7 @@ public class TestInterSCMGrpcProtocolService {
private Path temp;
@Test
- public void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception {
+ void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception {
int port = new Random().nextInt(1000) + 45000;
OzoneConfiguration conf = setupConfiguration(port);
SCMCertificateClient
@@ -100,7 +100,7 @@ public void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception {
InterSCMGrpcClient client =
new InterSCMGrpcClient("localhost", port, conf, scmCertClient);
- Path tempFile = Files.createTempFile(temp, CP_FILE_NAME, "");
+ Path tempFile = temp.resolve(CP_FILE_NAME);
CompletableFuture res = client.download(tempFile);
Path downloaded = res.get();
@@ -182,7 +182,7 @@ private DBStore dbStore() throws IOException {
}
private DBCheckpoint checkPoint() throws IOException {
- Path checkPointLocation = Files.createTempDirectory(temp, "cpDir");
+ Path checkPointLocation = Files.createDirectory(temp.resolve("cpDir"));
Path cpFile = Paths.get(checkPointLocation.toString(), CP_FILE_NAME);
Files.write(cpFile, CP_CONTENTS.getBytes(UTF_8));
DBCheckpoint checkpoint = mock(DBCheckpoint.class);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
index 8b1b2adfdf3..90b5daabada 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
@@ -43,6 +43,7 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -60,14 +61,14 @@
* Setting a timeout for every test method to 300 seconds.
*/
@Timeout(value = 300)
-public class TestOzoneFsSnapshot {
+class TestOzoneFsSnapshot {
private static MiniOzoneCluster cluster;
private static final String OM_SERVICE_ID = "om-service-test1";
private static OzoneManager ozoneManager;
private static OzoneFsShell shell;
private static final String VOLUME =
- "vol-" + RandomStringUtils.randomNumeric(5);;
+ "vol-" + RandomStringUtils.randomNumeric(5);
private static final String BUCKET =
"buck-" + RandomStringUtils.randomNumeric(5);
private static final String KEY =
@@ -80,7 +81,7 @@ public class TestOzoneFsSnapshot {
BUCKET_PATH + OM_KEY_PREFIX + KEY;
@BeforeAll
- public static void initClass() throws Exception {
+ static void initClass() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
// Enable filesystem snapshot feature for the test regardless of the default
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
@@ -106,7 +107,7 @@ public static void initClass() throws Exception {
}
@AfterAll
- public static void shutdown() throws IOException {
+ static void shutdown() throws IOException {
shell.close();
if (cluster != null) {
cluster.shutdown();
@@ -129,7 +130,7 @@ private static void createVolBuckKey()
}
@Test
- public void testCreateSnapshotDuplicateName() throws Exception {
+ void testCreateSnapshotDuplicateName() throws Exception {
String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5);
int res = ToolRunner.run(shell,
@@ -144,7 +145,7 @@ public void testCreateSnapshotDuplicateName() throws Exception {
}
@Test
- public void testCreateSnapshotWithSubDirInput() throws Exception {
+ void testCreateSnapshotWithSubDirInput() throws Exception {
// Test that:
// $ ozone fs -createSnapshot ofs://om/vol1/buck2/dir3/ snap1
//
@@ -185,7 +186,7 @@ public void testCreateSnapshotWithSubDirInput() throws Exception {
@ValueSource(strings = {"snap-1",
"snap75795657617173401188448010125899089001363595171500499231286",
"sn1"})
- public void testCreateSnapshotSuccess(String snapshotName)
+ void testCreateSnapshotSuccess(String snapshotName)
throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
@@ -241,7 +242,7 @@ private static Stream createSnapshotFailureScenarios() {
@ParameterizedTest(name = "{0}")
@MethodSource("createSnapshotFailureScenarios")
- public void testCreateSnapshotFailure(String description,
+ void testCreateSnapshotFailure(String description,
String paramBucketPath,
String snapshotName,
String expectedMessage,
@@ -258,12 +259,12 @@ public void testCreateSnapshotFailure(String description,
* Test list snapshot and snapshot keys with "ozone fs -ls".
*/
@Test
- public void testFsLsSnapshot() throws Exception {
+ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
String newKey = "key-" + RandomStringUtils.randomNumeric(5);
String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + newKey;
// Write a non-zero byte key.
- Path tempFile = Files.createTempFile("testFsLsSnapshot-", "any-suffix");
+ Path tempFile = tempDir.resolve("testFsLsSnapshot-any-suffix");
FileUtils.write(tempFile.toFile(), "random data", UTF_8);
execShellCommandAndGetOutput(0,
new String[]{"-put", tempFile.toString(), newKeyPath});
@@ -294,7 +295,7 @@ public void testFsLsSnapshot() throws Exception {
}
@Test
- public void testDeleteBucketWithSnapshot() throws Exception {
+ void testDeleteBucketWithSnapshot() throws Exception {
String snapshotName = createSnapshot();
String snapshotPath = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH
@@ -326,7 +327,7 @@ public void testDeleteBucketWithSnapshot() throws Exception {
}
@Test
- public void testSnapshotDeleteSuccess() throws Exception {
+ void testSnapshotDeleteSuccess() throws Exception {
String snapshotName = createSnapshot();
// Delete the created snapshot
int res = ToolRunner.run(shell,
@@ -372,7 +373,7 @@ private static Stream deleteSnapshotFailureScenarios() {
@ParameterizedTest(name = "{0}")
@MethodSource("deleteSnapshotFailureScenarios")
- public void testSnapshotDeleteFailure(String description,
+ void testSnapshotDeleteFailure(String description,
String paramBucketPath,
String snapshotName,
String expectedMessage,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
index b180b224755..a8a8fba852b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -25,11 +25,12 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -44,7 +45,6 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.commons.io.FileUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConsts.MULTIPART_FORM_DATA_BOUNDARY;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
@@ -54,6 +54,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -154,67 +155,66 @@ public void shutdown() {
@ParameterizedTest
@MethodSource("getHttpMethods")
- public void testEndpoint(String httpMethod)
+ void testEndpoint(String httpMethod, @TempDir Path tempDir)
throws ServletException, IOException, InterruptedException {
this.method = httpMethod;
- File tempFile = null;
- try {
- List toExcludeList = new ArrayList<>();
- toExcludeList.add("sstFile1.sst");
- toExcludeList.add("sstFile2.sst");
-
- setupHttpMethod(toExcludeList);
-
- doNothing().when(responseMock).setContentType("application/x-tgz");
- doNothing().when(responseMock).setHeader(Mockito.anyString(),
- Mockito.anyString());
-
- tempFile = File.createTempFile("testEndpoint_" + System
- .currentTimeMillis(), ".tar");
-
- FileOutputStream fileOutputStream = new FileOutputStream(tempFile);
- when(responseMock.getOutputStream()).thenReturn(
- new ServletOutputStream() {
- @Override
- public boolean isReady() {
- return true;
- }
-
- @Override
- public void setWriteListener(WriteListener writeListener) {
- }
-
- @Override
- public void write(int b) throws IOException {
- fileOutputStream.write(b);
- }
- });
-
- when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn(
- new DBCheckpointServlet.Lock());
- scmDbCheckpointServletMock.init();
- long initialCheckpointCount =
- scmMetrics.getDBCheckpointMetrics().getNumCheckpoints();
-
- doEndpoint();
-
- Assertions.assertTrue(tempFile.length() > 0);
- Assertions.assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointCreationTimeTaken() > 0);
- Assertions.assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointStreamingTimeTaken() > 0);
- Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics().
- getNumCheckpoints() > initialCheckpointCount);
-
- Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(),
- any(), any(), eq(toExcludeList), any(), any());
- } finally {
- FileUtils.deleteQuietly(tempFile);
- }
+ List toExcludeList = new ArrayList<>();
+ toExcludeList.add("sstFile1.sst");
+ toExcludeList.add("sstFile2.sst");
+
+ setupHttpMethod(toExcludeList);
+
+ doNothing().when(responseMock).setContentType("application/x-tgz");
+ doNothing().when(responseMock).setHeader(Mockito.anyString(),
+ Mockito.anyString());
+
+ final Path outputPath = tempDir.resolve("testEndpoint.tar");
+ when(responseMock.getOutputStream()).thenReturn(
+ new ServletOutputStream() {
+ private final OutputStream fileOutputStream = Files.newOutputStream(outputPath);
+
+ @Override
+ public boolean isReady() {
+ return true;
+ }
+ @Override
+ public void setWriteListener(WriteListener writeListener) {
+ }
+
+ @Override
+ public void close() throws IOException {
+ fileOutputStream.close();
+ super.close();
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ fileOutputStream.write(b);
+ }
+ });
+
+ when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn(
+ new DBCheckpointServlet.Lock());
+ scmDbCheckpointServletMock.init();
+ long initialCheckpointCount =
+ scmMetrics.getDBCheckpointMetrics().getNumCheckpoints();
+
+ doEndpoint();
+
+ Assertions.assertTrue(outputPath.toFile().length() > 0);
+ Assertions.assertTrue(
+ scmMetrics.getDBCheckpointMetrics().
+ getLastCheckpointCreationTimeTaken() > 0);
+ Assertions.assertTrue(
+ scmMetrics.getDBCheckpointMetrics().
+ getLastCheckpointStreamingTimeTaken() > 0);
+ Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics().
+ getNumCheckpoints() > initialCheckpointCount);
+
+ Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(),
+ any(), any(), eq(toExcludeList), any(), any());
}
@Test
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index d4f1f777877..a835944eefe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -29,6 +29,7 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
@@ -67,7 +68,6 @@
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.commons.io.FileUtils;
import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdds.utils.HddsServerUtil.OZONE_RATIS_SNAPSHOT_COMPLETE_FLAG_NAME;
@@ -140,9 +140,10 @@ public class TestOMDbCheckpointServlet {
private Path compactionDirPath;
private DBCheckpoint dbCheckpoint;
private String method;
- private File folder;
+ @TempDir
+ private Path folder;
private static final String FABRICATED_FILE_NAME = "fabricatedFile.sst";
- private FileOutputStream fileOutputStream;
+
/**
* Create a MiniDFSCluster for testing.
*
@@ -151,16 +152,15 @@ public class TestOMDbCheckpointServlet {
* @throws Exception
*/
@BeforeEach
- public void init(@TempDir File tempDir) throws Exception {
- folder = tempDir;
+ void init() throws Exception {
conf = new OzoneConfiguration();
- tempFile = File.createTempFile("temp_" + System
- .currentTimeMillis(), ".tar");
-
- fileOutputStream = new FileOutputStream(tempFile);
+ final Path tempPath = folder.resolve("temp.tar");
+ tempFile = tempPath.toFile();
servletOutputStream = new ServletOutputStream() {
+ private final OutputStream fileOutputStream = Files.newOutputStream(tempPath);
+
@Override
public boolean isReady() {
return true;
@@ -170,6 +170,12 @@ public boolean isReady() {
public void setWriteListener(WriteListener writeListener) {
}
+ @Override
+ public void close() throws IOException {
+ fileOutputStream.close();
+ super.close();
+ }
+
@Override
public void write(int b) throws IOException {
fileOutputStream.write(b);
@@ -185,7 +191,6 @@ public void shutdown() throws InterruptedException {
if (cluster != null) {
cluster.shutdown();
}
- FileUtils.deleteQuietly(tempFile);
}
private void setupCluster() throws Exception {
@@ -458,7 +463,7 @@ public void testWriteDbDataToStream() throws Exception {
dbCheckpoint = realCheckpoint.get();
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME;
int newDbDirLength = newDbDirName.length() + 1;
@@ -556,14 +561,14 @@ public void testWriteDbDataWithoutOmSnapshot()
.thenReturn(null);
// Get the tarball.
- Path tmpdir = Files.createTempDirectory("bootstrapData");
+ Path tmpdir = folder.resolve("bootstrapData");
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock,
fileOutputStream, new ArrayList<>(), new ArrayList<>(), tmpdir);
}
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
FileUtil.unTar(tempFile, new File(testDirName));
@@ -603,14 +608,14 @@ public void testWriteDbDataWithToExcludeFileList()
.thenReturn(null);
// Get the tarball.
- Path tmpdir = Files.createTempDirectory("bootstrapData");
+ Path tmpdir = folder.resolve("bootstrapData");
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock,
fileOutputStream, toExcludeList, excludedList, tmpdir);
}
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
FileUtil.unTar(tempFile, new File(testDirName));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index cd932f6efde..093f1107b5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -202,7 +202,7 @@ public void shutdown() {
@ValueSource(ints = {100})
// tried up to 1000 snapshots and this test works, but some of the
// timeouts have to be increased.
- public void testInstallSnapshot(int numSnapshotsToCreate) throws Exception {
+ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws Exception {
// Get the leader OM
String leaderOMNodeId = OmFailoverProxyUtil
.getFailoverProxyProvider(objectStore.getClientProxy())
@@ -221,7 +221,7 @@ public void testInstallSnapshot(int numSnapshotsToCreate) throws Exception {
FaultInjector faultInjector =
new SnapshotMaxSizeInjector(leaderOM,
followerOM.getOmSnapshotProvider().getSnapshotDir(),
- sstSetList);
+ sstSetList, tempDir);
followerOM.getOmSnapshotProvider().setInjector(faultInjector);
// Create some snapshots, each with new keys
@@ -1186,11 +1186,11 @@ private static class SnapshotMaxSizeInjector extends FaultInjector {
private final List> sstSetList;
private final Path tempDir;
SnapshotMaxSizeInjector(OzoneManager om, File snapshotDir,
- List> sstSetList) throws IOException {
+ List> sstSetList, Path tempDir) {
this.om = om;
this.snapshotDir = snapshotDir;
this.sstSetList = sstSetList;
- this.tempDir = Files.createTempDirectory("tmpDirPrefix");
+ this.tempDir = tempDir;
init();
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
index e5d9605711a..d25cdf298ed 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
@@ -39,7 +39,6 @@
import org.mockito.Mockito;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
@@ -65,7 +64,7 @@ public class TestOzoneManagerRatisRequest {
public void testRequestWithNonExistentBucket() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder, "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration,
ozoneManager);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
@@ -106,7 +105,7 @@ public void testUnknownRequestHandling()
ozoneManager = Mockito.mock(OzoneManager.class);
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder, "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration,
ozoneManager);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index 3b70d8af1a5..08358054fcc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -59,7 +59,6 @@
import org.slf4j.event.Level;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
@@ -188,7 +187,7 @@ public void setUp() throws IOException {
omMetrics = OMMetrics.create();
conf.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder.toAbsolutePath(), "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
// No need to conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, ...) here
// as we did the trick earlier with mockito.
omMetadataManager = new OmMetadataManagerImpl(conf, ozoneManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
index 5229ea46fbc..28af68e2539 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
@@ -101,7 +101,6 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -207,6 +206,8 @@ public class TestSnapshotDiffManager {
private final OMMetrics omMetrics = OMMetrics.create();
@TempDir
private File dbDir;
+ @TempDir
+ private File snapDiffDir;
@Mock
private RocksDBCheckpointDiffer differ;
@Mock
@@ -431,7 +432,7 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException {
UUID snap1 = UUID.randomUUID();
UUID snap2 = UUID.randomUUID();
- String diffDir = Files.createTempDirectory("snapdiff_dir").toString();
+ String diffDir = snapDiffDir.getAbsolutePath();
Set randomStrings = IntStream.range(0, numberOfFiles)
.mapToObj(i -> RandomStringUtils.randomAlphabetic(10))
.collect(Collectors.toSet());
@@ -526,8 +527,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles,
toSnapshotInfo,
false,
Collections.emptyMap(),
- Files.createTempDirectory("snapdiff_dir").toAbsolutePath()
- .toString());
+ snapDiffDir.getAbsolutePath());
assertEquals(deltaStrings, deltaFiles);
}
}
@@ -591,8 +591,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles)
toSnapshotInfo,
false,
Collections.emptyMap(),
- Files.createTempDirectory("snapdiff_dir").toAbsolutePath()
- .toString());
+ snapDiffDir.getAbsolutePath());
assertEquals(deltaStrings, deltaFiles);
rcFromSnapshot.close();
From be2e19948b6c8fb3f3b9a0c69ee6dfd88967f840 Mon Sep 17 00:00:00 2001
From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com>
Date: Wed, 20 Dec 2023 19:03:36 +0800
Subject: [PATCH 15/28] HDDS-9942. Move BufferAllocator to test (#5836)
---
.../java/org/apache/ozone/erasurecode/BufferAllocator.java | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename hadoop-hdds/erasurecode/src/{main => test}/java/org/apache/ozone/erasurecode/BufferAllocator.java (100%)
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/BufferAllocator.java
similarity index 100%
rename from hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java
rename to hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/BufferAllocator.java
From aa2aa742762d70b45e085b72b96b75cedb8f4f64 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Dec 2023 13:38:00 +0100
Subject: [PATCH 16/28] HDDS-9969. Bump maven-compiler-plugin to 3.9.0 (#5774)
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index 638da75d439..359f86451b4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -261,7 +261,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
${maven-surefire-plugin.version}
3.3.2
- 3.1
+ 3.9.0
3.1.1
3.1.0
3.5.1
From 477c8decfd4ca5c2fce69d9a4e89d00a4f0e472c Mon Sep 17 00:00:00 2001
From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com>
Date: Wed, 20 Dec 2023 22:12:35 +0800
Subject: [PATCH 17/28] HDDS-9948. Compose annotation for tests parameterized
with ContainerLayoutVersion (#5839)
---
.../common/impl/TestContainerDataYaml.java | 34 +---
.../TestContainerDeletionChoosingPolicy.java | 36 ++--
.../common/impl/TestContainerSet.java | 28 +--
.../common/impl/TestHddsDispatcher.java | 12 +-
.../TestCloseContainerCommandHandler.java | 33 +---
.../keyvalue/ContainerLayoutTestInfo.java | 20 +-
.../TestKeyValueContainerMarkUnhealthy.java | 25 +--
.../keyvalue/TestKeyValueHandler.java | 37 ++--
.../TestReplicationSupervisor.java | 178 ++++++++----------
.../client/rpc/read/TestChunkInputStream.java | 12 +-
.../client/rpc/read/TestKeyInputStream.java | 8 +-
11 files changed, 163 insertions(+), 260 deletions(-)
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 93f2a11a59e..4bd2ece41eb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -29,11 +29,8 @@
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.junit.jupiter.api.Assertions;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.io.IOException;
@@ -43,6 +40,7 @@
import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -69,10 +67,6 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) {
this.layoutVersion = layoutVersion;
}
- private static Iterable