From d71904d2d40e226151c8a56e8555b7753fb40e3c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 19 Dec 2023 07:02:52 +0100 Subject: [PATCH 01/28] HDDS-6152. Migrate TestOzoneFileSystem to JUnit5 (#5795) --- .../dev-support/findbugsExcludeFile.xml | 4 - ....java => AbstractOzoneFileSystemTest.java} | 302 +++++++----------- ...> AbstractOzoneFileSystemTestWithFSO.java} | 168 ++++------ .../org/apache/hadoop/fs/ozone/TestO3FS.java | 28 ++ .../hadoop/fs/ozone/TestO3FSWithFSO.java | 27 ++ .../fs/ozone/TestO3FSWithFSOAndOMRatis.java | 27 ++ .../hadoop/fs/ozone/TestO3FSWithFSPaths.java | 28 ++ .../ozone/TestO3FSWithFSPathsAndOMRatis.java | 28 ++ .../hadoop/fs/ozone/TestO3FSWithOMRatis.java | 28 ++ 9 files changed, 337 insertions(+), 303 deletions(-) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/{TestOzoneFileSystem.java => AbstractOzoneFileSystemTest.java} (87%) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/{TestOzoneFileSystemWithFSO.java => AbstractOzoneFileSystemTestWithFSO.java} (80%) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml index 098d27980c3..99ca98f85ed 100644 --- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml @@ -125,10 +125,6 @@ - - - - diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java similarity index 87% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index b5d83704833..ba55b2afcf7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -64,15 +64,11 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.TestClock; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -106,20 +102,20 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; /** * Ozone file system tests that are not covered by contract tests. */ -@RunWith(Parameterized.class) -public class TestOzoneFileSystem { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +abstract class AbstractOzoneFileSystemTest { private static final float TRASH_INTERVAL = 0.05f; // 3 seconds @@ -132,58 +128,31 @@ public class TestOzoneFileSystem { private static final PathFilter EXCLUDE_TRASH = p -> !p.toUri().getPath().startsWith(TRASH_ROOT.toString()); - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList( - new Object[]{true, true}, - new Object[]{true, false}, - new Object[]{false, true}, - new Object[]{false, false}); - } - - public TestOzoneFileSystem(boolean setDefaultFs, boolean enableOMRatis) { - // Checking whether 'defaultFS' and 'omRatis' flags represents next - // parameter index values. This is to ensure that initialize - // TestOzoneFileSystem#init() function will be invoked only at the - // beginning of every new set of Parameterized.Parameters. - if (enabledFileSystemPaths != setDefaultFs || - omRatisEnabled != enableOMRatis || cluster == null) { - enabledFileSystemPaths = setDefaultFs; - omRatisEnabled = enableOMRatis; - try { - teardown(); - init(); - } catch (Exception e) { - LOG.info("Unexpected exception", e); - fail("Unexpected exception:" + e.getMessage()); - } - } + AbstractOzoneFileSystemTest(boolean setDefaultFs, boolean enableOMRatis, BucketLayout layout) { + enabledFileSystemPaths = setDefaultFs; + omRatisEnabled = enableOMRatis; + bucketLayout = layout; } - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(600)); - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneFileSystem.class); - - private static BucketLayout bucketLayout = BucketLayout.LEGACY; - private static boolean enabledFileSystemPaths; - private static boolean omRatisEnabled; - - private static MiniOzoneCluster cluster; - private static OzoneClient client; - private static OzoneManagerProtocol writeClient; - private static FileSystem fs; - private static OzoneFileSystem o3fs; - private static OzoneBucket ozoneBucket; - private static String volumeName; - private static String bucketName; - private static Trash trash; - - private void init() throws Exception { + LoggerFactory.getLogger(AbstractOzoneFileSystemTest.class); + + private final BucketLayout bucketLayout; + private final boolean enabledFileSystemPaths; + private final boolean omRatisEnabled; + + private MiniOzoneCluster cluster; + private OzoneClient client; + private OzoneManagerProtocol writeClient; + private FileSystem fs; + private OzoneFileSystem o3fs; + private OzoneBucket ozoneBucket; + private String volumeName; + private String bucketName; + private Trash trash; + + @BeforeAll + void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); @@ -224,8 +193,8 @@ private void init() throws Exception { o3fs = (OzoneFileSystem) fs; } - @AfterClass - public static void teardown() { + @AfterAll + void teardown() { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); @@ -233,7 +202,7 @@ public static void teardown() { IOUtils.closeQuietly(fs); } - @After + @AfterEach public void cleanup() { try { deleteRootDir(); @@ -243,28 +212,24 @@ public void cleanup() { } } - public static MiniOzoneCluster getCluster() { + public MiniOzoneCluster getCluster() { return cluster; } - public static FileSystem getFs() { + public FileSystem getFs() { return fs; } - public static void setBucketLayout(BucketLayout bLayout) { - bucketLayout = bLayout; - } - - public static String getBucketName() { + public String getBucketName() { return bucketName; } - public static String getVolumeName() { + public String getVolumeName() { return volumeName; } public BucketLayout getBucketLayout() { - return BucketLayout.DEFAULT; + return bucketLayout; } @Test @@ -284,7 +249,7 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); try (FSDataOutputStream outputStream = fs.create(file1, false)) { - assertNotNull("Should be able to create file", outputStream); + assertNotNull(outputStream, "Should be able to create file"); } Path dir1 = new Path("/d1/d2/d3/d4/key2"); @@ -297,7 +262,7 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() Path file2 = new Path("/d1/d2/d3/d4/key3"); try (FSDataOutputStream outputStream2 = fs.create(file2, false)) { - assertNotNull("Should be able to create file", outputStream2); + assertNotNull(outputStream2, "Should be able to create file"); } try { fs.mkdirs(file2); @@ -316,10 +281,8 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() // Directory FileStatus fileStatus = fs.getFileStatus(parent); - assertEquals("FileStatus did not return the directory", - "/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath()); - assertTrue("FileStatus did not return the directory", - fileStatus.isDirectory()); + assertEquals("/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath()); + assertTrue(fileStatus.isDirectory()); // invalid sub directory try { @@ -351,12 +314,12 @@ public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); try (FSDataOutputStream outputStream = fs.create(file1, false)) { - assertNotNull("Should be able to create file", outputStream); + assertNotNull(outputStream, "Should be able to create file"); } Path subdir = new Path("/d1/d2/"); boolean status = fs.mkdirs(subdir); - assertTrue("Shouldn't send error if dir exists", status); + assertTrue(status, "Shouldn't send error if dir exists"); } @Test @@ -412,9 +375,8 @@ private void checkInvalidPath(Path path) { public void testOzoneFsServiceLoader() throws IOException { assumeFalse(FILE_SYSTEM_OPTIMIZED.equals(getBucketLayout())); - assertEquals( - FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null), - OzoneFileSystem.class); + assertEquals(OzoneFileSystem.class, + FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null)); } @Test @@ -435,10 +397,8 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { } // List status on the parent should show the child file - assertEquals("List status of parent should include the 1 child file", 1L, - fs.listStatus(parent).length); - assertTrue("Parent directory does not appear to be a directory", - fs.getFileStatus(parent).isDirectory()); + assertEquals(1L, fs.listStatus(parent).length, "List status of parent should include the 1 child file"); + assertTrue(fs.getFileStatus(parent).isDirectory(), "Parent directory does not appear to be a directory"); } @Test @@ -601,22 +561,19 @@ public void testListStatus() throws Exception { Path file2 = new Path(parent, "key2"); FileStatus[] fileStatuses = o3fs.listStatus(ROOT, EXCLUDE_TRASH); - assertEquals("Should be empty", 0, fileStatuses.length); + assertEquals(0, fileStatuses.length, "Should be empty"); ContractTestUtils.touch(fs, file1); ContractTestUtils.touch(fs, file2); fileStatuses = o3fs.listStatus(ROOT, EXCLUDE_TRASH); - assertEquals("Should have created parent", - 1, fileStatuses.length); - assertEquals("Parent path doesn't match", - fileStatuses[0].getPath().toUri().getPath(), parent.toString()); + assertEquals(1, fileStatuses.length, "Should have created parent"); + assertEquals(fileStatuses[0].getPath().toUri().getPath(), parent.toString(), "Parent path doesn't match"); // ListStatus on a directory should return all subdirs along with // files, even if there exists a file and sub-dir with the same name. fileStatuses = o3fs.listStatus(parent); - assertEquals("FileStatus did not return all children of the directory", - 2, fileStatuses.length); + assertEquals(2, fileStatuses.length, "FileStatus did not return all children of the directory"); // ListStatus should return only the immediate children of a directory. Path file3 = new Path(parent, "dir1/key3"); @@ -624,8 +581,7 @@ public void testListStatus() throws Exception { ContractTestUtils.touch(fs, file3); ContractTestUtils.touch(fs, file4); fileStatuses = o3fs.listStatus(parent); - assertEquals("FileStatus did not return all children of the directory", - 3, fileStatuses.length); + assertEquals(3, fileStatuses.length, "FileStatus did not return all children of the directory"); } @Test @@ -661,7 +617,7 @@ public void testListStatusWithIntermediateDir() throws Exception { FileStatus[] fileStatuses = fs.listStatus(ROOT, EXCLUDE_TRASH); // the number of immediate children of root is 1 - assertEquals(Arrays.toString(fileStatuses), 1, fileStatuses.length); + assertEquals(1, fileStatuses.length, Arrays.toString(fileStatuses)); writeClient.deleteKey(keyArgs); } @@ -694,8 +650,7 @@ public void testListStatusWithIntermediateDirWithECEnabled() FileStatus[] fileStatuses = fs.listStatus(ROOT, EXCLUDE_TRASH); // the number of immediate children of root is 1 assertEquals(1, fileStatuses.length); - assertEquals(fileStatuses[0].isErasureCoded(), - !bucketLayout.isFileSystemOptimized()); + assertEquals(fileStatuses[0].isErasureCoded(), !bucketLayout.isFileSystemOptimized()); fileStatuses = fs.listStatus(new Path( fileStatuses[0].getPath().toString() + "/object-name1")); assertEquals(1, fileStatuses.length); @@ -718,8 +673,7 @@ public void testListStatusOnRoot() throws Exception { // exist) and dir2 only. dir12 is not an immediate child of root and // hence should not be listed. FileStatus[] fileStatuses = o3fs.listStatus(ROOT, EXCLUDE_TRASH); - assertEquals("FileStatus should return only the immediate children", - 2, fileStatuses.length); + assertEquals(2, fileStatuses.length, "FileStatus should return only the immediate children"); // Verify that dir12 is not included in the result of the listStatus on root String fileStatus1 = fileStatuses[0].getPath().toUri().getPath(); @@ -767,9 +721,7 @@ public void testListStatusOnLargeDirectory() throws Exception { LOG.info("actualPathList: {}", actualPathList); } } - assertEquals( - "Total directories listed do not match the existing directories", - numDirs, fileStatuses.length); + assertEquals(numDirs, fileStatuses.length, "Total directories listed do not match the existing directories"); for (int i = 0; i < numDirs; i++) { assertTrue(paths.contains(fileStatuses[i].getPath().getName())); @@ -802,8 +754,7 @@ public void testListStatusOnKeyNameContainDelimiter() throws Exception { fileStatuses = fs.listStatus(new Path("/dir1/dir2")); assertEquals(1, fileStatuses.length); - assertEquals("/dir1/dir2/key1", - fileStatuses[0].getPath().toUri().getPath()); + assertEquals("/dir1/dir2/key1", fileStatuses[0].getPath().toUri().getPath()); assertTrue(fileStatuses[0].isFile()); } @@ -824,11 +775,11 @@ protected void deleteRootDir() throws IOException, InterruptedException { for (FileStatus fileStatus : fileStatuses) { LOG.error("Unexpected file, should have been deleted: {}", fileStatus); } - assertEquals("Delete root failed!", 0, fileStatuses.length); + assertEquals(0, fileStatuses.length, "Delete root failed!"); } } - private static void deleteRootRecursively(FileStatus[] fileStatuses) + private void deleteRootRecursively(FileStatus[] fileStatuses) throws IOException { for (FileStatus fStatus : fileStatuses) { fs.delete(fStatus.getPath(), true); @@ -861,8 +812,7 @@ public void testListStatusOnSubDirs() throws Exception { fs.mkdirs(dir2); FileStatus[] fileStatuses = o3fs.listStatus(dir1); - assertEquals("FileStatus should return only the immediate children", 2, - fileStatuses.length); + assertEquals(2, fileStatuses.length, "FileStatus should return only the immediate children"); // Verify that the two children of /dir1 returned by listStatus operation // are /dir1/dir11 and /dir1/dir12. @@ -894,8 +844,7 @@ public void testListStatusIteratorWithDir() throws Exception { while (it.hasNext()) { FileStatus fileStatus = it.next(); assertNotNull(fileStatus); - assertEquals("Parent path doesn't match", - fileStatus.getPath().toUri().getPath(), parent.toString()); + assertEquals(fileStatus.getPath().toUri().getPath(), parent.toString(), "Parent path doesn't match"); } // Iterator on a directory should return all subdirs along with // files, even if there exists a file and sub-dir with the same name. @@ -906,9 +855,7 @@ public void testListStatusIteratorWithDir() throws Exception { FileStatus fileStatus = it.next(); assertNotNull(fileStatus); } - assertEquals( - "Iterator did not return all the file status", - 2, iCount); + assertEquals(2, iCount, "Iterator did not return all the file status"); // Iterator should return file status for only the // immediate children of a directory. Path file3 = new Path(parent, "dir1/key3"); @@ -923,8 +870,8 @@ public void testListStatusIteratorWithDir() throws Exception { FileStatus fileStatus = it.next(); assertNotNull(fileStatus); } - assertEquals("Iterator did not return file status " + - "of all the children of the directory", 3, iCount); + assertEquals(3, iCount, "Iterator did not return file status " + + "of all the children of the directory"); } finally { // Cleanup @@ -955,11 +902,9 @@ public void testListStatusIteratorOnRoot() throws Exception { assertNotNull(fileStatus); // Verify that dir12 is not included in the result // of the listStatusIterator on root. - assertNotEquals(fileStatus.getPath().toUri().getPath(), - dir12.toString()); + assertNotEquals(fileStatus.getPath().toUri().getPath(), dir12.toString()); } - assertEquals("FileStatus should return only the immediate children", - 2, iCount); + assertEquals(2, iCount, "FileStatus should return only the immediate children"); } finally { // Cleanup fs.delete(dir2, true); @@ -1010,8 +955,7 @@ public void testListStatusIteratorOnSubDirs() throws Exception { equals(dir11.toString()) || fileStatus.getPath().toUri().getPath() .equals(dir12.toString())); } - assertEquals("FileStatus should return only the immediate children", 2, - iCount); + assertEquals(2, iCount, "FileStatus should return only the immediate children"); } finally { // Cleanup fs.delete(dir2, true); @@ -1035,8 +979,7 @@ public void testSeekOnFileLength() throws IOException { fs.open(fileNotExists); fail("Should throw FileNotFoundException as file doesn't exist!"); } catch (FileNotFoundException fnfe) { - assertTrue("Expected KEY_NOT_FOUND error", - fnfe.getMessage().contains("KEY_NOT_FOUND")); + assertTrue(fnfe.getMessage().contains("KEY_NOT_FOUND"), "Expected KEY_NOT_FOUND error"); } } @@ -1059,14 +1002,12 @@ public void testAllocateMoreThanOneBlock() throws IOException { FileStatus fileStatus = fs.getFileStatus(file); long blkSize = fileStatus.getBlockSize(); long fileLength = fileStatus.getLen(); - assertTrue("Block allocation should happen", - fileLength > blkSize); + assertTrue(fileLength > blkSize, "Block allocation should happen"); long newNumBlockAllocations = cluster.getOzoneManager().getMetrics().getNumBlockAllocates(); - assertTrue("Block allocation should happen", - (newNumBlockAllocations > numBlockAllocationsOrg)); + assertTrue((newNumBlockAllocations > numBlockAllocationsOrg), "Block allocation should happen"); stream.seek(fileLength); assertEquals(-1, stream.read()); @@ -1097,11 +1038,10 @@ public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved() // after rename listStatus for interimPath should succeed and // interimPath should have no children FileStatus[] statuses = fs.listStatus(interimPath); - assertNotNull("liststatus returns a null array", statuses); - assertEquals("Statuses array is not empty", 0, statuses.length); + assertNotNull(statuses, "liststatus returns a null array"); + assertEquals(0, statuses.length, "Statuses array is not empty"); FileStatus fileStatus = fs.getFileStatus(interimPath); - assertEquals("FileStatus does not point to interimPath", - interimPath.getName(), fileStatus.getPath().getName()); + assertEquals(interimPath.getName(), fileStatus.getPath().getName(), "FileStatus does not point to interimPath"); } /** @@ -1120,8 +1060,7 @@ public void testRenameWithNonExistentSource() throws Exception { LOG.info("Created destin dir: {}", destin); LOG.info("Rename op-> source:{} to destin:{}}", source, destin); - assertFalse("Expected to fail rename as src doesn't exist", - fs.rename(source, destin)); + assertFalse(fs.rename(source, destin), "Expected to fail rename as src doesn't exist"); } /** @@ -1188,12 +1127,12 @@ public void testRenameToExistingDir() throws Exception { fs.mkdirs(acPath); // Rename from /a to /b. - assertTrue("Rename failed", fs.rename(aSourcePath, bDestinPath)); + assertTrue(fs.rename(aSourcePath, bDestinPath), "Rename failed"); final Path baPath = new Path(fs.getUri().toString() + "/b/a"); final Path bacPath = new Path(fs.getUri().toString() + "/b/a/c"); - assertTrue("Rename failed", fs.exists(baPath)); - assertTrue("Rename failed", fs.exists(bacPath)); + assertTrue(fs.exists(baPath), "Rename failed"); + assertTrue(fs.exists(bacPath), "Rename failed"); } /** @@ -1219,8 +1158,7 @@ public void testRenameToNewSubDirShouldNotExist() throws Exception { final Path baPath = new Path(fs.getUri().toString() + "/b/a/c"); fs.mkdirs(baPath); - assertFalse("New destin sub-path /b/a already exists", - fs.rename(aSourcePath, bDestinPath)); + assertFalse(fs.rename(aSourcePath, bDestinPath), "New destin sub-path /b/a already exists"); // Case-5.b) Rename file from /a/b/c/file1 to /a. // Should be failed since /a/file1 exists. @@ -1234,8 +1172,7 @@ public void testRenameToNewSubDirShouldNotExist() throws Exception { final Path aDestinPath = new Path(fs.getUri().toString() + "/a"); - assertFalse("New destin sub-path /b/a already exists", - fs.rename(abcFile1, aDestinPath)); + assertFalse(fs.rename(abcFile1, aDestinPath), "New destin sub-path /b/a already exists"); } /** @@ -1251,8 +1188,7 @@ public void testRenameDirToFile() throws Exception { ContractTestUtils.touch(fs, file1Destin); Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c"); fs.mkdirs(abcRootPath); - assertFalse("key already exists /root_dir/file1", - fs.rename(abcRootPath, file1Destin)); + assertFalse(fs.rename(abcRootPath, file1Destin), "key already exists /root_dir/file1"); } /** @@ -1268,8 +1204,8 @@ public void testRenameFile() throws Exception { + "/file1_Copy"); ContractTestUtils.touch(fs, file1Source); Path file1Destin = new Path(fs.getUri().toString() + root + "/file1"); - assertTrue("Renamed failed", fs.rename(file1Source, file1Destin)); - assertTrue("Renamed failed: /root/file1", fs.exists(file1Destin)); + assertTrue(fs.rename(file1Source, file1Destin), "Renamed failed"); + assertTrue(fs.exists(file1Destin), "Renamed failed: /root/file1"); /** * Reading several times, this is to verify that OmKeyInfo#keyName cached @@ -1278,8 +1214,8 @@ public void testRenameFile() throws Exception { */ for (int i = 0; i < 10; i++) { FileStatus[] fStatus = fs.listStatus(rootPath); - assertEquals("Renamed failed", 1, fStatus.length); - assertEquals("Wrong path name!", file1Destin, fStatus[0].getPath()); + assertEquals(1, fStatus.length, "Renamed failed"); + assertEquals(file1Destin, fStatus[0].getPath(), "Wrong path name!"); } } @@ -1296,9 +1232,9 @@ public void testRenameFileToDir() throws Exception { ContractTestUtils.touch(fs, file1Destin); Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c"); fs.mkdirs(abcRootPath); - assertTrue("Renamed failed", fs.rename(file1Destin, abcRootPath)); - assertTrue("Renamed filed: /a/b/c/file1", fs.exists(new Path(abcRootPath, - "file1"))); + assertTrue(fs.rename(file1Destin, abcRootPath), "Renamed failed"); + assertTrue(fs.exists(new Path(abcRootPath, + "file1")), "Renamed filed: /a/b/c/file1"); } @Test @@ -1374,18 +1310,16 @@ public void testRenameToParentDir() throws Exception { ContractTestUtils.touch(fs, file1Source); // rename source directory to its parent directory(destination). - assertTrue("Rename failed", fs.rename(dir2SourcePath, destRootPath)); + assertTrue(fs.rename(dir2SourcePath, destRootPath), "Rename failed"); final Path expectedPathAfterRename = new Path(fs.getUri().toString() + root + "/dir2"); - assertTrue("Rename failed", - fs.exists(expectedPathAfterRename)); + assertTrue(fs.exists(expectedPathAfterRename), "Rename failed"); // rename source file to its parent directory(destination). - assertTrue("Rename failed", fs.rename(file1Source, destRootPath)); + assertTrue(fs.rename(file1Source, destRootPath), "Rename failed"); final Path expectedFilePathAfterRename = new Path(fs.getUri().toString() + root + "/file2"); - assertTrue("Rename failed", - fs.exists(expectedFilePathAfterRename)); + assertTrue(fs.exists(expectedFilePathAfterRename), "Rename failed"); } @Test @@ -1399,11 +1333,10 @@ public void testRenameDir() throws Exception { LOG.info("Created dir {}", subdir); LOG.info("Will move {} to {}", source, dest); fs.rename(source, dest); - assertTrue("Directory rename failed", fs.exists(dest)); + assertTrue(fs.exists(dest), "Directory rename failed"); // Verify that the subdir is also renamed i.e. keys corresponding to the // sub-directories of the renamed directory have also been renamed. - assertTrue("Keys under the renamed directory not renamed", - fs.exists(new Path(dest, "sub_dir1"))); + assertTrue(fs.exists(new Path(dest, "sub_dir1")), "Keys under the renamed directory not renamed"); // Test if one path belongs to other FileSystem. IllegalArgumentException exception = assertThrows( @@ -1440,8 +1373,7 @@ public void testGetDirectoryModificationTime() FileStatus[] fileStatuses = o3fs.listStatus(mdir11); // Above listStatus result should only have one entry: mdir111 assertEquals(1, fileStatuses.length); - assertEquals(mdir111.toString(), - fileStatuses[0].getPath().toUri().getPath()); + assertEquals(mdir111.toString(), fileStatuses[0].getPath().toUri().getPath()); assertTrue(fileStatuses[0].isDirectory()); // The dir key is actually created on server, // so modification time should always be the same value. @@ -1457,8 +1389,7 @@ public void testGetDirectoryModificationTime() fileStatuses = o3fs.listStatus(mdir1); // Above listStatus result should only have one entry: mdir11 assertEquals(1, fileStatuses.length); - assertEquals(mdir11.toString(), - fileStatuses[0].getPath().toUri().getPath()); + assertEquals(mdir11.toString(), fileStatuses[0].getPath().toUri().getPath()); assertTrue(fileStatuses[0].isDirectory()); // Since the dir key doesn't exist on server, the modification time is // set to current time upon every listStatus request. @@ -1538,9 +1469,8 @@ private void createKeyAndAssertKeyType(OzoneBucket bucket, OzoneFileSystem o3FS, Path keyPath, ReplicationType expectedType) throws IOException { o3FS.createFile(keyPath).build().close(); - assertEquals(expectedType.name(), - bucket.getKey(o3FS.pathToKey(keyPath)).getReplicationConfig() - .getReplicationType().name()); + assertEquals(expectedType.name(), bucket.getKey(o3FS.pathToKey(keyPath)).getReplicationConfig() + .getReplicationType().name()); } @Test @@ -1554,8 +1484,7 @@ public void testGetTrashRoots() throws IOException { fs.mkdirs(userTrash); res = o3fs.getTrashRoots(false); assertEquals(1, res.size()); - res.forEach(e -> assertEquals( - userTrash.toString(), e.getPath().toUri().getPath())); + res.forEach(e -> assertEquals(userTrash.toString(), e.getPath().toUri().getPath())); // Only have one user trash for now res = o3fs.getTrashRoots(true); assertEquals(1, res.size()); @@ -1572,8 +1501,7 @@ public void testGetTrashRoots() throws IOException { // allUsers = false should still return current user trash res = o3fs.getTrashRoots(false); assertEquals(1, res.size()); - res.forEach(e -> assertEquals( - userTrash.toString(), e.getPath().toUri().getPath())); + res.forEach(e -> assertEquals(userTrash.toString(), e.getPath().toUri().getPath())); // allUsers = true should return all user trash res = o3fs.getTrashRoots(true); assertEquals(6, res.size()); @@ -1663,8 +1591,7 @@ public void testListStatusOnLargeDirectoryForACLCheck() throws Exception { cluster.getOzoneManager().getKeyManager()); fail("Non-existent key name!"); } catch (OMException ome) { - assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, - ome.getResult()); + assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult()); } OzonePrefixPathImpl ozonePrefixPath = @@ -1678,7 +1605,7 @@ public void testListStatusOnLargeDirectoryForACLCheck() throws Exception { Iterator pathItr = ozonePrefixPath.getChildren(keyName); - assertTrue("Failed to list keyPath:" + keyName, pathItr.hasNext()); + assertTrue(pathItr.hasNext(), "Failed to list keyPath:" + keyName); Set actualPaths = new TreeSet<>(); while (pathItr.hasNext()) { @@ -1689,17 +1616,15 @@ public void testListStatusOnLargeDirectoryForACLCheck() throws Exception { Iterator subPathItr = ozonePrefixPath.getChildren(pathname); assertNotNull(subPathItr); - assertFalse("Failed to list keyPath: " + pathname, - subPathItr.hasNext()); + assertFalse(subPathItr.hasNext(), "Failed to list keyPath: " + pathname); } - assertEquals("ListStatus failed", paths.size(), - actualPaths.size()); + assertEquals(paths.size(), actualPaths.size(), "ListStatus failed"); for (String pathname : actualPaths) { paths.remove(pathname); } - assertTrue("ListStatus failed:" + paths, paths.isEmpty()); + assertTrue(paths.isEmpty(), "ListStatus failed:" + paths); } @Test @@ -1760,8 +1685,7 @@ public void testLoopInLinkBuckets() throws Exception { fail("Should throw Exception due to loop in Link Buckets"); } catch (OMException oe) { // Expected exception - assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, - oe.getResult()); + assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, oe.getResult()); } finally { volume.deleteBucket(linkBucket1Name); volume.deleteBucket(linkBucket2Name); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java similarity index 80% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java index d2d2fd6b816..2d4c310c886 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java @@ -33,61 +33,41 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.ozone.test.GenericTestUtils; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Ozone file system tests that are not covered by contract tests, * - prefix layout. * */ -@RunWith(Parameterized.class) -public class TestOzoneFileSystemWithFSO extends TestOzoneFileSystem { - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList( - new Object[]{true, true}, - new Object[]{true, false}); - } - - @BeforeClass - public static void init() { - setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED); - } +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +abstract class AbstractOzoneFileSystemTestWithFSO extends AbstractOzoneFileSystemTest { - public TestOzoneFileSystemWithFSO(boolean setDefaultFs, - boolean enableOMRatis) { - super(setDefaultFs, enableOMRatis); - } + private static final Logger LOG = + LoggerFactory.getLogger(AbstractOzoneFileSystemTestWithFSO.class); - @After - @Override - public void cleanup() { - super.cleanup(); + AbstractOzoneFileSystemTestWithFSO(boolean enableOMRatis) { + super(true, enableOMRatis, BucketLayout.FILE_SYSTEM_OPTIMIZED); } - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneFileSystemWithFSO.class); - @Test public void testListStatusWithoutRecursiveSearch() throws Exception { /* @@ -102,32 +82,27 @@ public void testListStatusWithoutRecursiveSearch() throws Exception { Path key1 = new Path("/key1"); try (FSDataOutputStream outputStream = getFs().create(key1, false)) { - assertNotNull("Should be able to create file: key1", - outputStream); + assertNotNull(outputStream, "Should be able to create file: key1"); } Path d1 = new Path("/d1"); Path dir1Key1 = new Path(d1, "key1"); try (FSDataOutputStream outputStream = getFs().create(dir1Key1, false)) { - assertNotNull("Should be able to create file: " + dir1Key1, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + dir1Key1); } Path d2 = new Path("/d2"); Path dir2Key1 = new Path(d2, "key1"); try (FSDataOutputStream outputStream = getFs().create(dir2Key1, false)) { - assertNotNull("Should be able to create file: " + dir2Key1, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + dir2Key1); } Path dir1Dir2 = new Path("/d1/d2/"); Path dir1Dir2Key1 = new Path(dir1Dir2, "key1"); try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Key1, false)) { - assertNotNull("Should be able to create file: " + dir1Dir2Key1, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + dir1Dir2Key1); } Path d1Key2 = new Path(d1, "key2"); try (FSDataOutputStream outputStream = getFs().create(d1Key2, false)) { - assertNotNull("Should be able to create file: " + d1Key2, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + d1Key2); } Path dir1Dir3 = new Path("/d1/d3/"); @@ -141,8 +116,7 @@ public void testListStatusWithoutRecursiveSearch() throws Exception { // Root Directory FileStatus[] fileStatusList = getFs().listStatus(new Path("/")); - assertEquals("FileStatus should return files and directories", - 3, fileStatusList.length); + assertEquals(3, fileStatusList.length, "FileStatus should return files and directories"); ArrayList expectedPaths = new ArrayList<>(); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1"); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d2"); @@ -150,13 +124,11 @@ public void testListStatusWithoutRecursiveSearch() throws Exception { for (FileStatus fileStatus : fileStatusList) { expectedPaths.remove(fileStatus.getPath().toString()); } - assertEquals("Failed to return the filestatus[]" + expectedPaths, - 0, expectedPaths.size()); + assertEquals(0, expectedPaths.size(), "Failed to return the filestatus[]" + expectedPaths); // level-1 sub-dirs fileStatusList = getFs().listStatus(new Path("/d1")); - assertEquals("FileStatus should return files and directories", - 5, fileStatusList.length); + assertEquals(5, fileStatusList.length, "FileStatus should return files and directories"); expectedPaths = new ArrayList<>(); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2"); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d3"); @@ -166,34 +138,29 @@ public void testListStatusWithoutRecursiveSearch() throws Exception { for (FileStatus fileStatus : fileStatusList) { expectedPaths.remove(fileStatus.getPath().toString()); } - assertEquals("Failed to return the filestatus[]" + expectedPaths, - 0, expectedPaths.size()); + assertEquals(0, expectedPaths.size(), "Failed to return the filestatus[]" + expectedPaths); // level-2 sub-dirs fileStatusList = getFs().listStatus(new Path("/d1/d2")); - assertEquals("FileStatus should return files and directories", - 1, fileStatusList.length); + assertEquals(1, fileStatusList.length, "FileStatus should return files and directories"); expectedPaths = new ArrayList<>(); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" + "key1"); for (FileStatus fileStatus : fileStatusList) { expectedPaths.remove(fileStatus.getPath().toString()); } - assertEquals("Failed to return the filestatus[]" + expectedPaths, - 0, expectedPaths.size()); + assertEquals(0, expectedPaths.size(), "Failed to return the filestatus[]" + expectedPaths); // level-2 key2 fileStatusList = getFs().listStatus(new Path("/d1/d2/key1")); - assertEquals("FileStatus should return files and directories", - 1, fileStatusList.length); + assertEquals(1, fileStatusList.length, "FileStatus should return files and directories"); expectedPaths = new ArrayList<>(); expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" + "key1"); for (FileStatus fileStatus : fileStatusList) { expectedPaths.remove(fileStatus.getPath().toString()); } - assertEquals("Failed to return the filestatus[]" + expectedPaths, - 0, expectedPaths.size()); + assertEquals(0, expectedPaths.size(), "Failed to return the filestatus[]" + expectedPaths); // invalid root key try { @@ -222,24 +189,21 @@ public void testListFilesRecursive() throws Exception { Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1"); try (FSDataOutputStream outputStream = getFs().create(dir1Dir1Dir2Key1, false)) { - assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + dir1Dir1Dir2Key1); } Path key1 = new Path("/key1"); try (FSDataOutputStream outputStream = getFs().create(key1, false)) { - assertNotNull("Should be able to create file: " + key1, - outputStream); + assertNotNull(outputStream, "Should be able to create file: " + key1); } Path key2 = new Path("/key2"); try (FSDataOutputStream outputStream = getFs().create(key2, false)) { - assertNotNull("Should be able to create file: key2", - outputStream); + assertNotNull(outputStream, "Should be able to create file: key2"); } Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1"); try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Dir1Dir2Key1, false)) { - assertNotNull("Should be able to create file: " - + dir1Dir2Dir1Dir2Key1, outputStream); + assertNotNull(outputStream, "Should be able to create file: " + + dir1Dir2Dir1Dir2Key1); } RemoteIterator fileStatusItr = getFs().listFiles( new Path("/"), true); @@ -256,10 +220,8 @@ public void testListFilesRecursive() throws Exception { expectedPaths.remove(status.getPath().toString()); actualCount++; } - assertEquals("Failed to get all the files: " + expectedPaths, - expectedFilesCount, actualCount); - assertEquals("Failed to get all the files: " + expectedPaths, 0, - expectedPaths.size()); + assertEquals(expectedFilesCount, actualCount, "Failed to get all the files: " + expectedPaths); + assertEquals(0, expectedPaths.size(), "Failed to get all the files: " + expectedPaths); // Recursive=false fileStatusItr = getFs().listFiles(new Path("/"), false); @@ -273,10 +235,8 @@ public void testListFilesRecursive() throws Exception { expectedPaths.remove(status.getPath().toString()); actualCount++; } - assertEquals("Failed to get all the files: " + expectedPaths, 0, - expectedPaths.size()); - assertEquals("Failed to get all the files: " + expectedPaths, - expectedFilesCount, actualCount); + assertEquals(0, expectedPaths.size(), "Failed to get all the files: " + expectedPaths); + assertEquals(expectedFilesCount, actualCount, "Failed to get all the files: " + expectedPaths); } /** @@ -431,8 +391,7 @@ public void testMultiLevelDirs() throws Exception { // reset metrics long numKeys = getCluster().getOzoneManager().getMetrics().getNumKeys(); getCluster().getOzoneManager().getMetrics().decNumKeys(numKeys); - assertEquals(0, - getCluster().getOzoneManager().getMetrics().getNumKeys()); + assertEquals(0, getCluster().getOzoneManager().getMetrics().getNumKeys()); // Op 1. create dir -> /d1/d2/d3/d4/ // Op 2. create dir -> /d1/d2/d3/d4/d5 @@ -444,7 +403,7 @@ public void testMultiLevelDirs() throws Exception { getCluster().getOzoneManager().getMetadataManager(); OmBucketInfo omBucketInfo = omMgr.getBucketTable() .get(omMgr.getBucketKey(getVolumeName(), getBucketName())); - assertNotNull("Failed to find bucketInfo", omBucketInfo); + assertNotNull(omBucketInfo, "Failed to find bucketInfo"); final long volumeId = omMgr.getVolumeId(getVolumeName()); final long bucketId = omMgr.getBucketId(getVolumeName(), getBucketName()); @@ -462,8 +421,7 @@ public void testMultiLevelDirs() throws Exception { verifyDirKey(volumeId, bucketId, d3ObjectID, "d4", "/d1/d2/d3/d4", dirKeys, omMgr); - assertEquals("Wrong OM numKeys metrics", 4, - getCluster().getOzoneManager().getMetrics().getNumKeys()); + assertEquals(4, getCluster().getOzoneManager().getMetrics().getNumKeys(), "Wrong OM numKeys metrics"); // create sub-dirs under same parent Path subDir5 = new Path("/d1/d2/d3/d4/d5"); @@ -476,15 +434,14 @@ public void testMultiLevelDirs() throws Exception { long d6ObjectID = verifyDirKey(volumeId, bucketId, d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr); - assertTrue( - "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID - + "/d6] of same parent!", d5ObjectID != d6ObjectID); + assertTrue(d5ObjectID != d6ObjectID, "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + + "/d6] of same parent!"); - assertEquals("Wrong OM numKeys metrics", 6, - getCluster().getOzoneManager().getMetrics().getNumKeys()); + assertEquals(6, getCluster().getOzoneManager().getMetrics().getNumKeys(), "Wrong OM numKeys metrics"); } @Test + @Order(1) public void testCreateFile() throws Exception { // Op 1. create dir -> /d1/d2/d3/d4/ Path parent = new Path("/d1/d2/"); @@ -496,7 +453,7 @@ public void testCreateFile() throws Exception { getCluster().getOzoneManager().getMetadataManager(); OmBucketInfo omBucketInfo = omMgr.getBucketTable() .get(omMgr.getBucketKey(getVolumeName(), getBucketName())); - assertNotNull("Failed to find bucketInfo", omBucketInfo); + assertNotNull(omBucketInfo, "Failed to find bucketInfo"); ArrayList dirKeys = new ArrayList<>(); @@ -516,7 +473,7 @@ public void testCreateFile() throws Exception { outputStream.close(); OmKeyInfo omKeyInfo = omMgr.getKeyTable(getBucketLayout()).get(openFileKey); - assertNotNull("Invalid Key!", omKeyInfo); + assertNotNull(omKeyInfo, "Invalid Key!"); verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID); // wait for DB updates @@ -571,11 +528,10 @@ public void testFSDeleteLogWarnNoExist() throws Exception { private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName, long parentID) { - assertEquals("Wrong keyName", fileName, omKeyInfo.getKeyName()); - assertEquals("Wrong parentID", parentID, - omKeyInfo.getParentObjectID()); + assertEquals(fileName, omKeyInfo.getKeyName(), "Wrong keyName"); + assertEquals(parentID, omKeyInfo.getParentObjectID(), "Wrong parentID"); String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName; - assertEquals("Wrong path format", dbKey, omKeyInfo.getPath()); + assertEquals(dbKey, omKeyInfo.getPath(), "Wrong path format"); } long verifyDirKey(long volumeId, long bucketId, long parentId, @@ -586,21 +542,13 @@ long verifyDirKey(long volumeId, long bucketId, long parentId, parentId + "/" + dirKey; dirKeys.add(dbKey); OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey); - assertNotNull("Failed to find " + absolutePath + - " using dbKey: " + dbKey, dirInfo); - assertEquals("Parent Id mismatches", parentId, - dirInfo.getParentObjectID()); - assertEquals("Mismatches directory name", dirKey, - dirInfo.getName()); - assertTrue("Mismatches directory creation time param", - dirInfo.getCreationTime() > 0); - assertEquals("Mismatches directory modification time param", - dirInfo.getCreationTime(), dirInfo.getModificationTime()); + assertNotNull(dirInfo, "Failed to find " + absolutePath + + " using dbKey: " + dbKey); + assertEquals(parentId, dirInfo.getParentObjectID(), "Parent Id mismatches"); + assertEquals(dirKey, dirInfo.getName(), "Mismatches directory name"); + assertTrue(dirInfo.getCreationTime() > 0, "Mismatches directory creation time param"); + assertEquals(dirInfo.getCreationTime(), dirInfo.getModificationTime()); return dirInfo.getObjectID(); } - @Override - public BucketLayout getBucketLayout() { - return BucketLayout.FILE_SYSTEM_OPTIMIZED; - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java new file mode 100644 index 00000000000..5fdab6fe95d --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FS.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FS extends AbstractOzoneFileSystemTest { + TestO3FS() { + super(false, false, BucketLayout.LEGACY); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java new file mode 100644 index 00000000000..0d6be62b4fc --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSO.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FSWithFSO extends AbstractOzoneFileSystemTestWithFSO { + TestO3FSWithFSO() { + super(false); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java new file mode 100644 index 00000000000..d616d08e328 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FSWithFSOAndOMRatis extends AbstractOzoneFileSystemTestWithFSO { + TestO3FSWithFSOAndOMRatis() { + super(true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java new file mode 100644 index 00000000000..5fffd9df7f4 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FSWithFSPaths extends AbstractOzoneFileSystemTest { + TestO3FSWithFSPaths() { + super(true, false, BucketLayout.LEGACY); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java new file mode 100644 index 00000000000..461961c3e73 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FSWithFSPathsAndOMRatis extends AbstractOzoneFileSystemTest { + TestO3FSWithFSPathsAndOMRatis() { + super(true, true, BucketLayout.LEGACY); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java new file mode 100644 index 00000000000..a02f3812e04 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.TestInstance; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestO3FSWithOMRatis extends AbstractOzoneFileSystemTest { + TestO3FSWithOMRatis() { + super(false, true, BucketLayout.LEGACY); + } +} From d83f434274b9c2d3fe4b7bdcff0b1862a8291c7d Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 18 Dec 2023 23:17:40 -0800 Subject: [PATCH 02/28] HDDS-9423. Throw appropriate error messages when deleting a file in .snapshot path (#5814) --- .../java/org/apache/hadoop/ozone/OmUtils.java | 31 +++++++-- .../om/request/key/OMKeyDeleteRequest.java | 4 +- .../request/file/TestOMFileCreateRequest.java | 64 +++++++------------ .../request/key/TestOMKeyDeleteRequest.java | 40 +++++++++--- .../BasicRootedOzoneClientAdapterImpl.java | 7 +- .../ozone/shell/keys/DeleteKeyHandler.java | 9 +++ 6 files changed, 95 insertions(+), 60 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index babeb305487..f23a703bd0d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -629,15 +629,36 @@ public static void verifyKeyNameWithSnapshotReservedWord(String keyName) if (keyName.substring(OM_SNAPSHOT_INDICATOR.length()) .startsWith(OM_KEY_PREFIX)) { throw new OMException( - "Cannot create key under path reserved for " - + "snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX, + "Cannot create key under path reserved for snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX, OMException.ResultCodes.INVALID_KEY_NAME); } } else { - // We checked for startsWith OM_SNAPSHOT_INDICATOR and the length is + // We checked for startsWith OM_SNAPSHOT_INDICATOR, and the length is // the same, so it must be equal OM_SNAPSHOT_INDICATOR. - throw new OMException( - "Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR, + throw new OMException("Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR, + OMException.ResultCodes.INVALID_KEY_NAME); + } + } + } + + /** + * Verify if key name contains snapshot reserved word. + * This is similar to verifyKeyNameWithSnapshotReservedWord. The only difference is exception message. + */ + public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyName) throws OMException { + if (keyName != null && + keyName.startsWith(OM_SNAPSHOT_INDICATOR)) { + if (keyName.length() > OM_SNAPSHOT_INDICATOR.length()) { + if (keyName.substring(OM_SNAPSHOT_INDICATOR.length()) + .startsWith(OM_KEY_PREFIX)) { + throw new OMException( + "Cannot delete key under path reserved for snapshot: " + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX, + OMException.ResultCodes.INVALID_KEY_NAME); + } + } else { + // We checked for startsWith OM_SNAPSHOT_INDICATOR, and the length is + // the same, so it must be equal OM_SNAPSHOT_INDICATOR. + throw new OMException("Cannot delete key with reserved name: " + OM_SNAPSHOT_INDICATOR, OMException.ResultCodes.INVALID_KEY_NAME); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 9fefd70a2da..0998d001756 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -22,6 +22,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -76,8 +77,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { Preconditions.checkNotNull(deleteKeyRequest); OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); - String keyPath = keyArgs.getKeyName(); + + OmUtils.verifyKeyNameWithSnapshotReservedWordForDeletion(keyPath); keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index dbd2a80964b..0a7a352b382 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -23,8 +23,6 @@ import java.util.List; import java.util.UUID; import java.util.stream.Collectors; -import java.util.Map; -import java.util.HashMap; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; @@ -41,15 +39,12 @@ import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateFileRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS; @@ -481,40 +476,25 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, } } - @Test - public void testPreExecuteWithInvalidKeyPrefix() throws Exception { - Map invalidKeyScenarios = new HashMap() { - { - put(OM_SNAPSHOT_INDICATOR + "/" + keyName, - "Cannot create key under path reserved for snapshot: " - + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX); - put(OM_SNAPSHOT_INDICATOR + "/a/" + keyName, - "Cannot create key under path reserved for snapshot: " - + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX); - put(OM_SNAPSHOT_INDICATOR + "/a/b" + keyName, - "Cannot create key under path reserved for snapshot: " - + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX); - put(OM_SNAPSHOT_INDICATOR, - "Cannot create key with reserved name: " + OM_SNAPSHOT_INDICATOR); - } - }; - - for (Map.Entry entry : invalidKeyScenarios.entrySet()) { - String invalidKeyName = entry.getKey(); - String expectedErrorMessage = entry.getValue(); + @ParameterizedTest + @CsvSource(value = { + ".snapshot/keyName,Cannot create key under path reserved for snapshot: .snapshot/", + ".snapshot/a/keyName,Cannot create key under path reserved for snapshot: .snapshot/", + ".snapshot/a/b/keyName,Cannot create key under path reserved for snapshot: .snapshot/", + ".snapshot,Cannot create key with reserved name: .snapshot"}) + public void testPreExecuteWithInvalidKeyPrefix(String invalidKeyName, + String expectedErrorMessage) { - OMRequest omRequest = createFileRequest(volumeName, bucketName, - invalidKeyName, HddsProtos.ReplicationFactor.ONE, - HddsProtos.ReplicationType.RATIS, false, false); - - OMFileCreateRequest omFileCreateRequest = - getOMFileCreateRequest(omRequest); + OMRequest omRequest = createFileRequest(volumeName, bucketName, + invalidKeyName, HddsProtos.ReplicationFactor.ONE, + HddsProtos.ReplicationType.RATIS, false, false); - OMException ex = Assertions.assertThrows(OMException.class, - () -> omFileCreateRequest.preExecute(ozoneManager)); + OMFileCreateRequest omFileCreateRequest = + getOMFileCreateRequest(omRequest); - Assertions.assertTrue(ex.getMessage().contains(expectedErrorMessage)); - } + OMException ex = Assertions.assertThrows(OMException.class, + () -> omFileCreateRequest.preExecute(ozoneManager)); + Assertions.assertTrue(ex.getMessage().contains(expectedErrorMessage)); } protected void testNonRecursivePath(String key, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index fe84e3cfbe7..907022ceddb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -20,6 +20,7 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Assertions; @@ -28,21 +29,36 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; /** * Tests OmKeyDelete request. */ public class TestOMKeyDeleteRequest extends TestOMKeyRequest { - @Test - public void testPreExecute() throws Exception { - doPreExecute(createDeleteKeyRequest()); + @ParameterizedTest + @ValueSource(strings = {"keyName", "a/b/keyName", "a/.snapshot/keyName", "a.snapshot/b/keyName"}) + public void testPreExecute(String testKeyName) throws Exception { + doPreExecute(createDeleteKeyRequest(testKeyName)); + } + + @ParameterizedTest + @CsvSource(value = {".snapshot,Cannot delete key with reserved name: .snapshot", + ".snapshot/snapName,Cannot delete key under path reserved for snapshot: .snapshot/", + ".snapshot/snapName/keyName,Cannot delete key under path reserved for snapshot: .snapshot/"}) + public void testPreExecuteFailure(String testKeyName, + String expectedExceptionMessage) { + OMKeyDeleteRequest deleteKeyRequest = + getOmKeyDeleteRequest(createDeleteKeyRequest(testKeyName)); + OMException omException = Assertions.assertThrows(OMException.class, + () -> deleteKeyRequest.preExecute(ozoneManager)); + Assertions.assertEquals(expectedExceptionMessage, omException.getMessage()); + Assertions.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, omException.getResult()); } @Test @@ -154,8 +170,12 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { * @return OMRequest */ private OMRequest createDeleteKeyRequest() { + return createDeleteKeyRequest(keyName); + } + + private OMRequest createDeleteKeyRequest(String testKeyName) { KeyArgs keyArgs = KeyArgs.newBuilder().setBucketName(bucketName) - .setVolumeName(volumeName).setKeyName(keyName).build(); + .setVolumeName(volumeName).setKeyName(testKeyName).build(); DeleteKeyRequest deleteKeyRequest = DeleteKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index e565c2bedf3..193e080f0e0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathPermissionException; import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -545,13 +546,15 @@ public boolean deleteObject(String path, boolean recursive) bucket.deleteDirectory(keyName, recursive); return true; } catch (OMException ome) { - LOG.error("delete key failed {}", ome.getMessage()); + LOG.error("Delete key failed. {}", ome.getMessage()); if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) { throw new PathIsNotEmptyDirectoryException(ome.getMessage()); + } else if (OMException.ResultCodes.INVALID_KEY_NAME == ome.getResult()) { + throw new PathPermissionException(ome.getMessage()); } return false; } catch (IOException ioe) { - LOG.error("delete key failed " + ioe.getMessage()); + LOG.error("Delete key failed. {}", ioe.getMessage()); return false; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index 5e56cda4780..d1a6a4e156f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -19,11 +19,13 @@ package org.apache.hadoop.ozone.shell.keys; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -59,6 +61,13 @@ protected void execute(OzoneClient client, OzoneAddress address) OzoneBucket bucket = vol.getBucket(bucketName); String keyName = address.getKeyName(); + try { + OmUtils.verifyKeyNameWithSnapshotReservedWordForDeletion(keyName); + } catch (OMException omException) { + out().printf("Operation not permitted: %s %n", omException.getMessage()); + return; + } + if (bucket.getBucketLayout().isFileSystemOptimized()) { // Handle FSO delete key which supports trash also deleteFSOKey(bucket, keyName); From 6b86d93e1bb0e6333fae8b8a979c61251f8c5d15 Mon Sep 17 00:00:00 2001 From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com> Date: Tue, 19 Dec 2023 14:10:26 +0530 Subject: [PATCH 03/28] HDDS-9809. Migrate assertions in integration tests to JUnit5 (#5815) --- .../org/apache/hadoop/fs/ozone/TestHSync.java | 9 +- .../scm/TestSCMDatanodeProtocolServer.java | 9 +- .../hdds/scm/TestSCMInstallSnapshot.java | 30 +++-- .../TestContainerStateManagerIntegration.java | 89 +++++++------ .../scm/pipeline/TestLeaderChoosePolicy.java | 15 ++- .../hdds/scm/pipeline/TestPipelineClose.java | 19 +-- .../TestRatisPipelineCreateAndDestroy.java | 24 ++-- .../scm/storage/TestContainerCommandsEC.java | 59 ++++----- .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 45 ++++--- .../hdds/upgrade/TestHddsUpgradeUtils.java | 56 ++++---- .../apache/hadoop/ozone/OzoneTestUtils.java | 10 +- .../hadoop/ozone/TestDelegationToken.java | 23 ++-- .../TestContainerStateMachineFailures.java | 103 +++++++-------- .../rpc/TestFailureHandlingByClient.java | 87 ++++++------- .../rpc/TestOzoneRpcClientWithRatis.java | 33 ++--- .../ozone/client/rpc/TestWatchForCommit.java | 82 ++++++------ .../hadoop/ozone/container/TestHelper.java | 28 ++-- .../commandhandler/TestBlockDeletion.java | 3 +- .../TestCloseContainerByPipeline.java | 40 +++--- .../ozoneimpl/TestOzoneContainer.java | 121 +++++++++--------- .../container/server/TestContainerServer.java | 16 +-- .../ozone/freon/TestRandomKeyGenerator.java | 55 ++++---- .../ozone/om/TestAddRemoveOzoneManager.java | 60 +++++---- .../ozone/om/TestOMDbCheckpointServlet.java | 5 +- ...estReconInsightsForDeletedDirectories.java | 36 +++--- .../ozone/scm/TestFailoverWithSCMHA.java | 44 ++++--- ...estSCMContainerPlacementPolicyMetrics.java | 10 +- .../scm/TestSCMInstallSnapshotWithHA.java | 32 ++--- .../hadoop/ozone/shell/TestOzoneShellHA.java | 5 +- 29 files changed, 574 insertions(+), 574 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index b313aa80fb5..559b8da4982 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -68,7 +68,6 @@ import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -84,7 +83,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; -import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -187,8 +187,7 @@ public void testKeyHSyncThenClose() throws Exception { RepeatedOmKeyInfo val = kv.getValue(); LOG.error("Unexpected deletedTable entry: key = {}, val = {}", key, val); - Assertions.fail("deletedTable should not have such entry. key = " + - key); + fail("deletedTable should not have such entry. key = " + key); } } } @@ -332,7 +331,7 @@ static void runTestHSync(FileSystem fs, Path file, int offset = 0; try (FSDataInputStream in = fs.open(file)) { final long skipped = in.skip(length); - Assertions.assertEquals(length, skipped); + assertEquals(length, skipped); for (; ;) { final int n = in.read(buffer, 0, buffer.length); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java index 4e329ad305e..fee608c05b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDatanodeProtocolServer.java @@ -20,13 +20,14 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.junit.Assert; import org.junit.jupiter.api.Test; import org.mockito.Mockito; import java.io.IOException; import java.util.concurrent.TimeoutException; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test for StorageContainerDatanodeProtocolProtos. */ @@ -44,9 +45,9 @@ public void ensureTermAndDeadlineOnCommands() StorageContainerDatanodeProtocolProtos.SCMCommandProto proto = SCMDatanodeProtocolServer.getCommandResponse(command, scm); - Assert.assertEquals(StorageContainerDatanodeProtocolProtos.SCMCommandProto + assertEquals(StorageContainerDatanodeProtocolProtos.SCMCommandProto .Type.replicateContainerCommand, proto.getCommandType()); - Assert.assertEquals(5L, proto.getTerm()); - Assert.assertEquals(1234L, proto.getDeadlineMsSinceEpoch()); + assertEquals(5L, proto.getTerm()); + assertEquals(1234L, proto.getDeadlineMsSinceEpoch()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index a37d3c47564..53f07abc91e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -39,7 +39,6 @@ import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; -import org.junit.Assert; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -52,6 +51,11 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; /** * Class to test install snapshot feature for SCM HA. @@ -115,9 +119,9 @@ private DBCheckpoint downloadSnapshot() throws Exception { String snapshotDir = conf.get(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_DIR); final File[] files = FileUtil.listFiles(provider.getScmSnapshotDir()); - Assert.assertTrue(files[0].getName().startsWith( + assertTrue(files[0].getName().startsWith( OzoneConsts.SCM_DB_NAME + "-" + scmNodeDetails.getNodeId())); - Assert.assertTrue(files[0].getAbsolutePath().startsWith(snapshotDir)); + assertTrue(files[0].getAbsolutePath().startsWith(snapshotDir)); return checkpoint; } @@ -133,7 +137,7 @@ public void testInstallCheckPoint() throws Exception { // Hack the transaction index in the checkpoint so as to ensure the // checkpointed transaction index is higher than when it was downloaded // from. - Assert.assertNotNull(db); + assertNotNull(db); HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()) .put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder() .setCurrentTerm(10).setTransactionIndex(100).build()); @@ -144,9 +148,9 @@ public void testInstallCheckPoint() throws Exception { scm.getPipelineManager().getPipelines().get(0).getId(); scm.getScmMetadataStore().getPipelineTable().delete(pipelineID); scm.getContainerManager().deleteContainer(cid); - Assert.assertNull( + assertNull( scm.getScmMetadataStore().getPipelineTable().get(pipelineID)); - Assert.assertFalse(scm.getContainerManager().containerExist(cid)); + assertFalse(scm.getContainerManager().containerExist(cid)); SCMStateMachine sm = scm.getScmHAManager().getRatisServer().getSCMStateMachine(); @@ -154,16 +158,14 @@ public void testInstallCheckPoint() throws Exception { sm.setInstallingSnapshotData(checkpoint, null); sm.reinitialize(); - Assert.assertNotNull( - scm.getScmMetadataStore().getPipelineTable().get(pipelineID)); - Assert.assertNotNull( - scm.getScmMetadataStore().getContainerTable().get(cid)); - Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID)); - Assert.assertTrue(scm.getContainerManager().containerExist(cid)); - Assert.assertEquals(100, scm.getScmMetadataStore(). + assertNotNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID)); + assertNotNull(scm.getScmMetadataStore().getContainerTable().get(cid)); + assertTrue(scm.getPipelineManager().containsPipeline(pipelineID)); + assertTrue(scm.getContainerManager().containerExist(cid)); + assertEquals(100, scm.getScmMetadataStore(). getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY) .getTransactionIndex()); - Assert.assertEquals(100, + assertEquals(100, scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo() .getTermIndex().getIndex()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index d7f11566909..72d1ebf4381 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.AfterEach; -import org.junit.Assert; import org.junit.jupiter.api.BeforeEach; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.Test; @@ -51,6 +50,12 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for ContainerStateManager. @@ -100,21 +105,21 @@ public void testAllocateContainer() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline()); - Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), + assertNotEquals(container1.getContainerInfo().getContainerID(), info.getContainerID()); - Assert.assertEquals(OzoneConsts.OZONE, info.getOwner()); - Assert.assertEquals(SCMTestUtils.getReplicationType(conf), + assertEquals(OzoneConsts.OZONE, info.getOwner()); + assertEquals(SCMTestUtils.getReplicationType(conf), info.getReplicationType()); - Assert.assertEquals(SCMTestUtils.getReplicationFactor(conf), + assertEquals(SCMTestUtils.getReplicationFactor(conf), ReplicationConfig.getLegacyFactor(info.getReplicationConfig())); - Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState()); + assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState()); // Check there are two containers in ALLOCATED state after allocation ContainerWithPipeline container2 = scm.getClientProtocolServer() .allocateContainer( SCMTestUtils.getReplicationType(conf), SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE); - Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), + assertNotEquals(container1.getContainerInfo().getContainerID(), container2.getContainerInfo().getContainerID()); } @@ -128,7 +133,7 @@ public void testAllocateContainerWithDifferentOwner() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline()); - Assert.assertNotNull(info); + assertNotNull(info); String newContainerOwner = "OZONE_NEW"; ContainerWithPipeline container2 = scm.getClientProtocolServer() @@ -137,9 +142,9 @@ public void testAllocateContainerWithDifferentOwner() throws IOException { ContainerInfo info2 = containerManager .getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner, container1.getPipeline()); - Assert.assertNotNull(info2); + assertNotNull(info2); - Assert.assertNotEquals(info.containerID(), info2.containerID()); + assertNotEquals(info.containerID(), info2.containerID()); } @Test @@ -179,7 +184,7 @@ public void testContainerStateManagerRestart() throws IOException, .filter(info -> info.getState() == HddsProtos.LifeCycleState.OPEN) .count(); - Assert.assertEquals(5, matchCount); + assertEquals(5, matchCount); matchCount = result.stream() .filter(info -> info.getOwner().equals(OzoneConsts.OZONE)) @@ -191,7 +196,7 @@ public void testContainerStateManagerRestart() throws IOException, .filter(info -> info.getState() == HddsProtos.LifeCycleState.CLOSING) .count(); - Assert.assertEquals(5, matchCount); + assertEquals(5, matchCount); } @Test @@ -209,7 +214,7 @@ public void testGetMatchingContainer() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline()); - Assert.assertTrue(info.getContainerID() > cid); + assertTrue(info.getContainerID() > cid); cid = info.getContainerID(); } @@ -218,7 +223,7 @@ public void testGetMatchingContainer() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline()); - Assert.assertEquals(container1.getContainerInfo().getContainerID(), + assertEquals(container1.getContainerInfo().getContainerID(), info.getContainerID()); } @@ -248,7 +253,7 @@ public void testGetMatchingContainerMultipleThreads() // make sure pipeline has has numContainerPerOwnerInPipeline number of // containers. - Assert.assertEquals(scm.getPipelineManager() + assertEquals(scm.getPipelineManager() .getNumberOfContainers(container1.getPipeline().getId()), numContainerPerOwnerInPipeline); Thread.sleep(5000); @@ -259,7 +264,7 @@ public void testGetMatchingContainerMultipleThreads() // TODO: #CLUTIL Look at the division of block allocations in different // containers. LOG.error("Total allocated block = " + matchedCount); - Assert.assertTrue(matchedCount <= + assertTrue(matchedCount <= numBlockAllocates / container2MatchedCount.size() + threshold && matchedCount >= numBlockAllocates / container2MatchedCount.size() - threshold); @@ -272,7 +277,7 @@ public void testUpdateContainerState() throws IOException, Set containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.OPEN); int containers = containerList == null ? 0 : containerList.size(); - Assert.assertEquals(0, containers); + assertEquals(0, containers); // Allocate container1 and update its state from // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED @@ -282,35 +287,35 @@ public void testUpdateContainerState() throws IOException, SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.OPEN); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); containerManager .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.FINALIZE); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.CLOSING); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); containerManager .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLOSE); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.CLOSED); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); containerManager .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.DELETE); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.DELETING); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); containerManager .updateContainerState(container1.getContainerInfo().containerID(), HddsProtos.LifeCycleEvent.CLEANUP); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); // Allocate container1 and update its state from // OPEN -> CLOSING -> CLOSED @@ -329,7 +334,7 @@ public void testUpdateContainerState() throws IOException, HddsProtos.LifeCycleEvent.CLOSE); containerList = containerStateManager .getContainerIDs(HddsProtos.LifeCycleState.CLOSED); - Assert.assertEquals(1, containerList.size()); + assertEquals(1, containerList.size()); } @@ -346,7 +351,7 @@ public void testReplicaMap() throws Exception { ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong()); Set replicaSet = containerStateManager.getContainerReplicas(containerID); - Assert.assertNull(replicaSet); + assertNull(replicaSet); ContainerWithPipeline container = scm.getClientProtocolServer() .allocateContainer( @@ -369,44 +374,44 @@ public void testReplicaMap() throws Exception { containerStateManager.updateContainerReplica(id, replicaOne); containerStateManager.updateContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); + assertEquals(2, replicaSet.size()); + assertTrue(replicaSet.contains(replicaOne)); + assertTrue(replicaSet.contains(replicaTwo)); // Test 3: Remove one replica node and then test containerStateManager.removeContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(1, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); + assertEquals(1, replicaSet.size()); + assertFalse(replicaSet.contains(replicaOne)); + assertTrue(replicaSet.contains(replicaTwo)); // Test 3: Remove second replica node and then test containerStateManager.removeContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(0, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(replicaOne)); - Assert.assertFalse(replicaSet.contains(replicaTwo)); + assertEquals(0, replicaSet.size()); + assertFalse(replicaSet.contains(replicaOne)); + assertFalse(replicaSet.contains(replicaTwo)); // Test 4: Re-insert dn1 containerStateManager.updateContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(1, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertFalse(replicaSet.contains(replicaTwo)); + assertEquals(1, replicaSet.size()); + assertTrue(replicaSet.contains(replicaOne)); + assertFalse(replicaSet.contains(replicaTwo)); // Re-insert dn2 containerStateManager.updateContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); + assertEquals(2, replicaSet.size()); + assertTrue(replicaSet.contains(replicaOne)); + assertTrue(replicaSet.contains(replicaTwo)); // Re-insert dn1 containerStateManager.updateContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); + assertEquals(2, replicaSet.size()); + assertTrue(replicaSet.contains(replicaOne)); + assertTrue(replicaSet.contains(replicaTwo)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index bcd00d07934..a695038d444 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -25,7 +25,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; import org.apache.ozone.test.tag.Unhealthy; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -41,6 +40,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for LeaderChoosePolicy. @@ -93,9 +94,9 @@ private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn) leaderCount.put(leader, leaderCount.get(leader) + 1); } - Assert.assertTrue(leaderCount.size() == dnNum); + assertTrue(leaderCount.size() == dnNum); for (Map.Entry entry: leaderCount.entrySet()) { - Assert.assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn); + assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn); } } @@ -114,7 +115,7 @@ public void testRestoreSuggestedLeader() throws Exception { // make sure two pipelines are created waitForPipelines(pipelineNum); // No Factor ONE pipeline is auto created. - Assert.assertEquals(0, + assertEquals(0, pipelineManager.getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.ONE)).size()); @@ -132,7 +133,7 @@ public void testRestoreSuggestedLeader() throws Exception { cluster.getStorageContainerManager().getPipelineManager() .getPipelines(); - Assert.assertEquals( + assertEquals( pipelinesBeforeRestart.size(), pipelinesAfterRestart.size()); for (Pipeline p : pipelinesBeforeRestart) { @@ -144,7 +145,7 @@ public void testRestoreSuggestedLeader() throws Exception { } } - Assert.assertTrue(equal); + assertTrue(equal); } } @@ -163,7 +164,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { // make sure pipelines are created waitForPipelines(pipelineNum); // No Factor ONE pipeline is auto created. - Assert.assertEquals(0, pipelineManager.getPipelines( + assertEquals(0, pipelineManager.getPipelines( RatisReplicationConfig.getInstance( ReplicationFactor.ONE)).size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index b823f15798f..6c66ecf3185 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -46,7 +46,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.protocol.RaftGroupId; import org.junit.jupiter.api.AfterEach; -import org.junit.Assert; import org.junit.jupiter.api.BeforeEach; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.Test; @@ -62,6 +61,9 @@ import java.util.concurrent.TimeoutException; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for Pipeline Closing. @@ -126,8 +128,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException, .getContainersInPipeline(ratisContainer.getPipeline().getId()); ContainerID cId = ratisContainer.getContainerInfo().containerID(); - Assert.assertEquals(1, set.size()); - set.forEach(containerID -> Assert.assertEquals(containerID, cId)); + assertEquals(1, set.size()); + set.forEach(containerID -> assertEquals(containerID, cId)); // Now close the container and it should not show up while fetching // containers by pipeline @@ -138,13 +140,13 @@ public void testPipelineCloseWithClosedContainer() throws IOException, Set setClosed = pipelineManager .getContainersInPipeline(ratisContainer.getPipeline().getId()); - Assert.assertEquals(0, setClosed.size()); + assertEquals(0, setClosed.size()); pipelineManager.closePipeline(ratisContainer.getPipeline().getId()); pipelineManager.deletePipeline(ratisContainer.getPipeline().getId()); for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) { // Assert that the pipeline has been removed from Node2PipelineMap as well - Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn) + assertFalse(scm.getScmNodeManager().getPipelines(dn) .contains(ratisContainer.getPipeline().getId())); } } @@ -154,7 +156,7 @@ public void testPipelineCloseWithOpenContainer() throws IOException, TimeoutException, InterruptedException { Set setOpen = pipelineManager.getContainersInPipeline( ratisContainer.getPipeline().getId()); - Assert.assertEquals(1, setOpen.size()); + assertEquals(1, setOpen.size()); pipelineManager .closePipeline(ratisContainer.getPipeline(), false); @@ -230,7 +232,7 @@ public void testPipelineCloseWithLogFailure() try { pipelineManager.getPipeline(openPipeline.getId()); } catch (PipelineNotFoundException e) { - Assert.assertTrue("pipeline should exist", false); + assertTrue(false, "pipeline should exist"); } DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0); @@ -277,8 +279,7 @@ private boolean verifyCloseForPipeline(Pipeline pipeline, } } - Assert.assertTrue("SCM did not receive a Close action for the Pipeline", - found); + assertTrue(found, "SCM did not receive a Close action for the Pipeline"); return found; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 7e88f45025c..22ec99d2a6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -30,11 +30,10 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.Assert; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; @@ -81,7 +80,7 @@ public void testAutomaticPipelineCreationOnPipelineDestroy() init(numOfDatanodes); // make sure two pipelines are created waitForPipelines(2); - Assert.assertEquals(numOfDatanodes, pipelineManager.getPipelines( + Assertions.assertEquals(numOfDatanodes, pipelineManager.getPipelines( RatisReplicationConfig.getInstance( ReplicationFactor.ONE)).size()); @@ -103,7 +102,7 @@ public void testAutomaticPipelineCreationDisablingFactorONE() // make sure two pipelines are created waitForPipelines(2); // No Factor ONE pipeline is auto created. - Assert.assertEquals(0, pipelineManager.getPipelines( + Assertions.assertEquals(0, pipelineManager.getPipelines( RatisReplicationConfig.getInstance( ReplicationFactor.ONE)).size()); @@ -140,17 +139,12 @@ public void testPipelineCreationOnNodeRestart() throws Exception { 100, 10 * 1000); // try creating another pipeline now - try { - pipelineManager.createPipeline(RatisReplicationConfig.getInstance( - ReplicationFactor.THREE)); - Assert.fail("pipeline creation should fail after shutting down pipeline"); - } catch (IOException ioe) { - // As now all datanodes are shutdown, they move to stale state, there - // will be no sufficient datanodes to create the pipeline. - Assert.assertTrue(ioe instanceof SCMException); - Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_HEALTHY_NODES, - ((SCMException) ioe).getResult()); - } + SCMException ioe = Assertions.assertThrows(SCMException.class, () -> + pipelineManager.createPipeline(RatisReplicationConfig.getInstance( + ReplicationFactor.THREE)), + "pipeline creation should fail after shutting down pipeline"); + Assertions.assertEquals( + SCMException.ResultCodes.FAILED_TO_FIND_HEALTHY_NODES, ioe.getResult()); // make sure pipelines is destroyed waitForPipelines(0); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index f2fe3fa31a1..8ab74422516 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -77,10 +77,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.ozone.test.GenericTestUtils; -import org.junit.Assert; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -114,6 +112,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class tests container commands on EC containers. @@ -382,8 +384,8 @@ public void testOrphanBlock() throws Exception { .filter(bd -> bd.getBlockID().getLocalID() == localID) .count(); - Assert.assertEquals(0L, count); - Assert.assertEquals(0, response.getBlockDataList().size()); + assertEquals(0L, count); + assertEquals(0, response.getBlockDataList().size()); } } @@ -410,25 +412,24 @@ public void testListBlock() throws Exception { .map(expectedChunksFunc::apply).sum(); if (minNumExpectedBlocks == 0) { final int j = i; - Throwable t = Assertions.assertThrows(StorageContainerException.class, + Throwable t = assertThrows(StorageContainerException.class, () -> ContainerProtocolCalls .listBlock(clients.get(j), containerID, null, minNumExpectedBlocks + 1, containerToken)); - Assertions - .assertEquals("ContainerID " + containerID + " does not exist", + assertEquals("ContainerID " + containerID + " does not exist", t.getMessage()); continue; } ListBlockResponseProto response = ContainerProtocolCalls .listBlock(clients.get(i), containerID, null, Integer.MAX_VALUE, containerToken); - Assertions.assertTrue( + assertTrue( minNumExpectedBlocks <= response.getBlockDataList().stream().filter( k -> k.getChunksCount() > 0 && k.getChunks(0).getLen() > 0) .collect(Collectors.toList()).size(), "blocks count should be same or more than min expected" + " blocks count on DN " + i); - Assertions.assertTrue( + assertTrue( minNumExpectedChunks <= response.getBlockDataList().stream() .mapToInt(BlockData::getChunksCount).sum(), "chunks count should be same or more than min expected" + @@ -492,10 +493,10 @@ public void testCreateRecoveryContainer() throws Exception { ContainerProtos.ReadContainerResponseProto readContainerResponseProto = ContainerProtocolCalls.readContainer(dnClient, container.containerID().getProtobuf().getId(), encodedToken); - Assert.assertEquals(ContainerProtos.ContainerDataProto.State.RECOVERING, + assertEquals(ContainerProtos.ContainerDataProto.State.RECOVERING, readContainerResponseProto.getContainerData().getState()); // Container at SCM should be still in closed state. - Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED, + assertEquals(HddsProtos.LifeCycleState.CLOSED, scm.getContainerManager().getContainerStateManager() .getContainer(container.containerID()).getState()); // close container call @@ -505,7 +506,7 @@ public void testCreateRecoveryContainer() throws Exception { readContainerResponseProto = ContainerProtocolCalls .readContainer(dnClient, container.containerID().getProtobuf().getId(), encodedToken); - Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, + assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, readContainerResponseProto.getContainerData().getState()); ContainerProtos.ReadChunkResponseProto readChunkResponseProto = ContainerProtocolCalls.readChunk(dnClient, @@ -514,10 +515,10 @@ public void testCreateRecoveryContainer() throws Exception { ByteBuffer[] readOnlyByteBuffersArray = BufferUtils .getReadOnlyByteBuffersArray( readChunkResponseProto.getDataBuffers().getBuffersList()); - Assert.assertEquals(readOnlyByteBuffersArray[0].limit(), data.length); + assertEquals(readOnlyByteBuffersArray[0].limit(), data.length); byte[] readBuff = new byte[readOnlyByteBuffersArray[0].limit()]; readOnlyByteBuffersArray[0].get(readBuff, 0, readBuff.length); - Assert.assertArrayEquals(data, readBuff); + assertArrayEquals(data, readBuff); } finally { xceiverClientManager.releaseClient(dnClient, false); } @@ -563,7 +564,7 @@ public void testCreateRecoveryContainerAfterDNRestart() throws Exception { cluster.restartHddsDatanode(targetDN, true); // Recovering container state after DN restart should be UNHEALTHY. - Assert.assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY, + assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY, cluster.getHddsDatanode(targetDN) .getDatanodeStateMachine() .getContainer() @@ -590,7 +591,7 @@ public void testCreateRecoveryContainerAfterDNRestart() throws Exception { try { dnClient.sendCommand(writeChunkRequest); } catch (StorageContainerException e) { - Assert.assertEquals(CONTAINER_UNHEALTHY, e.getResult()); + assertEquals(CONTAINER_UNHEALTHY, e.getResult()); } } finally { @@ -635,7 +636,7 @@ static Stream> recoverableMissingIndexes() { @Test public void testECReconstructionCoordinatorWithMissingIndexes135() { InsufficientLocationsException exception = - Assert.assertThrows(InsufficientLocationsException.class, () -> { + assertThrows(InsufficientLocationsException.class, () -> { testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3); }); @@ -643,7 +644,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() { "There are insufficient datanodes to read the EC block"; String actualMessage = exception.getMessage(); - Assert.assertEquals(expectedMessage, actualMessage); + assertEquals(expectedMessage, actualMessage); } private void testECReconstructionCoordinator(List missingIndexes, @@ -707,7 +708,7 @@ private void testECReconstructionCoordinator(List missingIndexes, } } - Assert.assertEquals(missingIndexes.size(), targetNodes.size()); + assertEquals(missingIndexes.size(), targetNodes.size()); List blockDataArrList = new ArrayList<>(); @@ -766,7 +767,7 @@ private void testECReconstructionCoordinator(List missingIndexes, .listBlock(conID, newTargetPipeline.getFirstNode(), (ECReplicationConfig) newTargetPipeline .getReplicationConfig(), cToken); - Assert.assertEquals(blockDataArrList.get(i).length, + assertEquals(blockDataArrList.get(i).length, reconstructedBlockData.length); checkBlockData(blockDataArrList.get(i), reconstructedBlockData); XceiverClientSpi client = xceiverClientManager.acquireClient( @@ -776,14 +777,14 @@ private void testECReconstructionCoordinator(List missingIndexes, ContainerProtocolCalls.readContainer( client, conID, cToken.encodeToUrlString()); - Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, + assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, readContainerResponse.getContainerData().getState()); } finally { xceiverClientManager.releaseClient(client, false); } i++; } - Assertions.assertEquals(metrics.getReconstructionTotal(), 1L); + assertEquals(1L, metrics.getReconstructionTotal()); } } } @@ -796,7 +797,7 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket, try (OzoneOutputStream out = bucket.createKey(keyString, 4096, new ECReplicationConfig(3, 2, EcCodec.RS, EC_CHUNK_SIZE), new HashMap<>())) { - Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream); + assertTrue(out.getOutputStream() instanceof KeyOutputStream); for (int i = 0; i < numChunks; i++) { out.write(inputChunks[i]); } @@ -856,7 +857,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() MockDatanodeDetails.randomDatanodeDetails(); targetNodeMap.put(3, invalidTargetNode); - Assert.assertThrows(IOException.class, () -> { + assertThrows(IOException.class, () -> { try (ECReconstructionCoordinator coordinator = new ECReconstructionCoordinator(config, certClient, secretKeyClient, @@ -868,14 +869,14 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() }); final DatanodeDetails targetDNToCheckContainerCLeaned = goodTargetNode; StorageContainerException ex = - Assert.assertThrows(StorageContainerException.class, () -> { + assertThrows(StorageContainerException.class, () -> { try (ECContainerOperationClient client = new ECContainerOperationClient(config, certClient)) { client.listBlock(conID, targetDNToCheckContainerCLeaned, new ECReplicationConfig(3, 2), cToken); } }); - Assert.assertEquals("ContainerID 1 does not exist", ex.getMessage()); + assertEquals("ContainerID 1 does not exist", ex.getMessage()); } private void closeContainer(long conID) @@ -905,7 +906,7 @@ private void checkBlockData( // let's ignore the empty chunks continue; } - Assert.assertEquals(chunkInfo, newBlockDataChunks.get(j)); + assertEquals(chunkInfo, newBlockDataChunks.get(j)); } } } @@ -963,10 +964,10 @@ public static void prepareData(int[][] ranges) throws Exception { .stream() .map(ContainerInfo::containerID) .collect(Collectors.toList()); - Assertions.assertEquals(1, containerIDs.size()); + assertEquals(1, containerIDs.size()); containerID = containerIDs.get(0).getId(); List pipelines = scm.getPipelineManager().getPipelines(repConfig); - Assertions.assertEquals(1, pipelines.size()); + assertEquals(1, pipelines.size()); pipeline = pipelines.get(0); datanodeDetails = pipeline.getNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 928de5990ed..caf9cadb165 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -34,6 +34,10 @@ import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_REQUIRED; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_FINALIZATION; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -82,7 +86,6 @@ import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; -import org.junit.Assert; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.apache.ozone.test.tag.Slow; @@ -229,12 +232,12 @@ private void testPostUpgradePipelineCreation() throws IOException, TimeoutException { Pipeline ratisPipeline1 = scmPipelineManager.createPipeline(RATIS_THREE); scmPipelineManager.openPipeline(ratisPipeline1.getId()); - Assert.assertEquals(0, + assertEquals(0, scmPipelineManager.getNumberOfContainers(ratisPipeline1.getId())); PipelineID pid = scmContainerManager.allocateContainer(RATIS_THREE, "Owner1").getPipelineID(); - Assert.assertEquals(1, scmPipelineManager.getNumberOfContainers(pid)); - Assert.assertEquals(pid, ratisPipeline1.getId()); + assertEquals(1, scmPipelineManager.getNumberOfContainers(pid)); + assertEquals(pid, ratisPipeline1.getId()); } /* @@ -290,7 +293,7 @@ public void testFinalizationFromInitialVersionToLatestVersion() // Trigger Finalization on the SCM StatusAndMessages status = scm.getFinalizationManager().finalizeUpgrade( "xyz"); - Assert.assertEquals(STARTING_FINALIZATION, status.status()); + assertEquals(STARTING_FINALIZATION, status.status()); // Wait for the Finalization to complete on the SCM. TestHddsUpgradeUtils.waitForFinalizationFromClient( @@ -308,7 +311,7 @@ public void testFinalizationFromInitialVersionToLatestVersion() .stream() .filter(postUpgradeOpenPipelines::contains) .count(); - Assert.assertEquals(0, numPreUpgradeOpenPipelines); + assertEquals(0, numPreUpgradeOpenPipelines); // Verify Post-Upgrade conditions on the SCM. TestHddsUpgradeUtils.testPostUpgradeConditionsSCM( @@ -447,7 +450,7 @@ public void run() { }); } catch (Exception e) { LOG.info("DataNode Restart Failed!"); - Assert.fail(e.getMessage()); + fail(e.getMessage()); } return t; } @@ -515,7 +518,7 @@ public void testScmFailuresBeforeScmPreFinalizeUpgrade() BEFORE_PRE_FINALIZE_UPGRADE, this::injectSCMFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -534,7 +537,7 @@ public void testScmFailuresAfterScmPreFinalizeUpgrade() AFTER_PRE_FINALIZE_UPGRADE, this::injectSCMFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -553,7 +556,7 @@ public void testScmFailuresAfterScmCompleteFinalization() AFTER_COMPLETE_FINALIZATION, () -> this.injectSCMFailureDuringSCMUpgrade()); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -572,7 +575,7 @@ public void testScmFailuresAfterScmPostFinalizeUpgrade() AFTER_POST_FINALIZE_UPGRADE, () -> this.injectSCMFailureDuringSCMUpgrade()); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -591,7 +594,7 @@ public void testAllDataNodeFailuresBeforeScmPreFinalizeUpgrade() BEFORE_PRE_FINALIZE_UPGRADE, this::injectDataNodeFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -610,7 +613,7 @@ public void testAllDataNodeFailuresAfterScmPreFinalizeUpgrade() AFTER_PRE_FINALIZE_UPGRADE, this::injectDataNodeFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -629,7 +632,7 @@ public void testAllDataNodeFailuresAfterScmCompleteFinalization() AFTER_COMPLETE_FINALIZATION, this::injectDataNodeFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -648,7 +651,7 @@ public void testAllDataNodeFailuresAfterScmPostFinalizeUpgrade() AFTER_POST_FINALIZE_UPGRADE, this::injectDataNodeFailureDuringSCMUpgrade); testFinalizationWithFailureInjectionHelper(null); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); } /* @@ -683,7 +686,7 @@ public void testDataNodeFailuresDuringDataNodeUpgrade() .getUpgradeFinalizer()) .setFinalizationExecutor(dataNodeFinalizationExecutor); testFinalizationWithFailureInjectionHelper(failureInjectionThread); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); synchronized (cluster) { shutdown(); init(); @@ -736,7 +739,7 @@ public void testAllPossibleDataNodeFailuresAndSCMFailures() .setFinalizationExecutor(dataNodeFinalizationExecutor); testFinalizationWithFailureInjectionHelper( dataNodefailureInjectionThread); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); synchronized (cluster) { shutdown(); init(); @@ -777,7 +780,7 @@ public void testDataNodeAndSCMFailuresTogetherDuringSCMUpgrade() scm.getFinalizationManager().getUpgradeFinalizer() .setFinalizationExecutor(finalizationExecutor); testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); synchronized (cluster) { shutdown(); init(); @@ -817,7 +820,7 @@ public void testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade() .getUpgradeFinalizer()) .setFinalizationExecutor(dataNodeFinalizationExecutor); testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread); - Assert.assertTrue(testPassed.get()); + assertTrue(testPassed.get()); synchronized (cluster) { shutdown(); init(); @@ -843,7 +846,7 @@ public void testFinalizationWithFailureInjectionHelper( // Trigger Finalization on the SCM StatusAndMessages status = scm.getFinalizationManager().finalizeUpgrade("xyz"); - Assert.assertEquals(STARTING_FINALIZATION, status.status()); + assertEquals(STARTING_FINALIZATION, status.status()); // Make sure that any outstanding thread created by failure injection // has completed its job. @@ -907,7 +910,7 @@ public void testFinalizationWithFailureInjectionHelper( DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine(); Set pipelines = scm.getScmNodeManager().getPipelines(dsm.getDatanodeDetails()); - Assert.assertTrue(pipelines != null); + assertNotNull(pipelines); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java index 10617e8a1b4..6fc964fd0ab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,6 +49,10 @@ import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.ALREADY_FINALIZED; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Helper methods for testing HDDS upgrade finalization in integration tests. @@ -73,7 +76,7 @@ public static void waitForFinalizationFromClient( .queryUpgradeFinalizationProgress(clientID, true, true) .status(); LOG.info("Waiting for upgrade finalization to complete from client." + - " Current status is {}.", status); + " Current status is {}.", status); return status == FINALIZATION_DONE || status == ALREADY_FINALIZED; }); } @@ -84,11 +87,11 @@ public static void waitForFinalizationFromClient( public static void testPreUpgradeConditionsSCM( List scms) { for (StorageContainerManager scm : scms) { - Assert.assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), + assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), scm.getLayoutVersionManager().getMetadataLayoutVersion()); for (ContainerInfo ci : scm.getContainerManager() .getContainers()) { - Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, ci.getState()); + assertEquals(HddsProtos.LifeCycleState.OPEN, ci.getState()); } } } @@ -106,15 +109,15 @@ public static void testPostUpgradeConditionsSCM( } public static void testPostUpgradeConditionsSCM(StorageContainerManager scm, - int numContainers, int numDatanodes) { + int numContainers, int numDatanodes) { - Assert.assertTrue(scm.getScmContext().getFinalizationCheckpoint() + assertTrue(scm.getScmContext().getFinalizationCheckpoint() .hasCrossed(FinalizationCheckpoint.FINALIZATION_COMPLETE)); HDDSLayoutVersionManager scmVersionManager = scm.getLayoutVersionManager(); - Assert.assertEquals(scmVersionManager.getSoftwareLayoutVersion(), + assertEquals(scmVersionManager.getSoftwareLayoutVersion(), scmVersionManager.getMetadataLayoutVersion()); - Assert.assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1); + assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1); // SCM should not return from finalization until there is at least one // pipeline to use. @@ -124,7 +127,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm, () -> scmPipelineManager.getPipelines(RATIS_THREE, OPEN).size() >= 1, 500, 60000); } catch (TimeoutException | InterruptedException e) { - Assert.fail("Timeout waiting for Upgrade to complete on SCM."); + fail("Timeout waiting for Upgrade to complete on SCM."); } // SCM will not return from finalization until there is at least one @@ -137,26 +140,26 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm, HddsProtos.LifeCycleState ciState = ci.getState(); LOG.info("testPostUpgradeConditionsSCM: container state is {}", ciState.name()); - Assert.assertTrue((ciState == HddsProtos.LifeCycleState.CLOSED) || + assertTrue((ciState == HddsProtos.LifeCycleState.CLOSED) || (ciState == HddsProtos.LifeCycleState.CLOSING) || (ciState == HddsProtos.LifeCycleState.DELETING) || (ciState == HddsProtos.LifeCycleState.DELETED) || (ciState == HddsProtos.LifeCycleState.QUASI_CLOSED)); countContainers++; } - Assert.assertTrue(countContainers >= numContainers); + assertTrue(countContainers >= numContainers); } /* * Helper function to test Pre-Upgrade conditions on all the DataNodes. */ public static void testPreUpgradeConditionsDataNodes( - List datanodes) { + List datanodes) { for (HddsDatanodeService dataNode : datanodes) { DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine(); HDDSLayoutVersionManager dnVersionManager = dsm.getLayoutVersionManager(); - Assert.assertEquals(0, dnVersionManager.getMetadataLayoutVersion()); + assertEquals(0, dnVersionManager.getMetadataLayoutVersion()); } int countContainers = 0; @@ -165,12 +168,12 @@ public static void testPreUpgradeConditionsDataNodes( // Also verify that all the existing containers are open. for (Container container : dsm.getContainer().getController().getContainers()) { - Assert.assertSame(container.getContainerState(), + assertSame(container.getContainerState(), ContainerProtos.ContainerDataProto.State.OPEN); countContainers++; } } - Assert.assertTrue(countContainers >= 1); + assertTrue(countContainers >= 1); } /* @@ -204,7 +207,7 @@ public static void testPostUpgradeConditionsDataNodes( return true; }, 500, 60000); } catch (TimeoutException | InterruptedException e) { - Assert.fail("Timeout waiting for Upgrade to complete on Data Nodes."); + fail("Timeout waiting for Upgrade to complete on Data Nodes."); } int countContainers = 0; @@ -212,21 +215,20 @@ public static void testPostUpgradeConditionsDataNodes( DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine(); HDDSLayoutVersionManager dnVersionManager = dsm.getLayoutVersionManager(); - Assert.assertEquals(dnVersionManager.getSoftwareLayoutVersion(), + assertEquals(dnVersionManager.getSoftwareLayoutVersion(), dnVersionManager.getMetadataLayoutVersion()); - Assert.assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1); + assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1); // Also verify that all the existing containers are closed. for (Container container : - dsm.getContainer().getController().getContainers()) { - Assert.assertTrue("Container had unexpected state " + - container.getContainerState(), - closeStates.stream().anyMatch( - state -> container.getContainerState().equals(state))); + dsm.getContainer().getController().getContainers()) { + assertTrue(closeStates.stream().anyMatch( + state -> container.getContainerState().equals(state)), + "Container had unexpected state " + container.getContainerState()); countContainers++; } } - Assert.assertTrue(countContainers >= numContainers); + assertTrue(countContainers >= numContainers); } public static void testDataNodesStateOnSCM(List scms, @@ -251,14 +253,14 @@ public static void testDataNodesStateOnSCM(StorageContainerManager scm, try { HddsProtos.NodeState dnState = scm.getScmNodeManager().getNodeStatus(dn).getHealth(); - Assert.assertTrue((dnState == state) || + assertTrue((dnState == state) || (alternateState != null && dnState == alternateState)); } catch (NodeNotFoundException e) { e.printStackTrace(); - Assert.fail("Node not found"); + fail("Node not found"); } ++countNodes; } - Assert.assertEquals(expectedDatanodeCount, countNodes); + assertEquals(expectedDatanodeCount, countNodes); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 59e95e7c213..d89e6a6c360 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -34,9 +34,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils.VoidCallable; - import org.apache.ratis.util.function.CheckedConsumer; -import org.junit.Assert; +import org.junit.jupiter.api.Assertions; + /** * Helper class for Tests. @@ -92,7 +92,7 @@ public static void closeContainers( .updateContainerState(ContainerID.valueOf(blockID.getContainerID()), HddsProtos.LifeCycleEvent.CLOSE); } - Assert.assertFalse(scm.getContainerManager() + Assertions.assertFalse(scm.getContainerManager() .getContainer(ContainerID.valueOf(blockID.getContainerID())) .isOpen()); }, omKeyLocationInfoGroups); @@ -144,9 +144,9 @@ public static void expectOmException( throws Exception { try { eval.call(); - Assert.fail("OMException is expected"); + Assertions.fail("OMException is expected"); } catch (OMException ex) { - Assert.assertEquals(code, ex.getResult()); + Assertions.assertEquals(code, ex.getResult()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index cb6bbc9dd0d..da806ac2a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -56,9 +56,10 @@ import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; - +import org.apache.ratis.util.ExitUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; + import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; @@ -85,13 +86,13 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.slf4j.event.Level.INFO; -import org.apache.ratis.util.ExitUtils; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -101,7 +102,6 @@ import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.slf4j.event.Level.INFO; /** * Test class to for security enabled Ozone cluster. @@ -362,10 +362,9 @@ public void testDelegationToken(boolean useIp) throws Exception { "Auth successful for " + username + " (auth:TOKEN)")); OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND, () -> omClient.deleteVolume("vol1")); - assertTrue( - "Log file doesn't contain successful auth for user " + username, - logs.getOutput().contains("Auth successful for " - + username + " (auth:TOKEN)")); + assertTrue(logs.getOutput().contains("Auth successful for " + + username + " (auth:TOKEN)"), + "Log file doesn't contain successful auth for user " + username); // Case 4: Test failure of token renewal. // Call to renewDelegationToken will fail but it will confirm that diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 717304a5d0a..55e16989a88 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -86,18 +86,22 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.hamcrest.core.Is.is; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.hamcrest.MatcherAssert.assertThat; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.storage.FileInfo; import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import static org.hamcrest.core.Is.is; import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -212,7 +216,7 @@ public void testContainerStateMachineCloseOnMissingPipeline() getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); @@ -268,7 +272,7 @@ public void testContainerStateMachineFailures() throws Exception { (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); @@ -287,7 +291,7 @@ public void testContainerStateMachineFailures() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); // Make sure the container is marked unhealthy - Assert.assertTrue( + assertTrue( dn.getDatanodeStateMachine() .getContainer().getContainerSet() .getContainer(containerID) @@ -305,7 +309,7 @@ public void testContainerStateMachineFailures() throws Exception { cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) .getDatanodeStateMachine().getContainer(); - Assert.assertNull(ozoneContainer.getContainerSet(). + assertNull(ozoneContainer.getContainerSet(). getContainer(containerID)); } @@ -323,7 +327,7 @@ public void testUnhealthyContainer() throws Exception { .getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); @@ -332,7 +336,7 @@ public void testUnhealthyContainer() throws Exception { .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData; // delete the container db file @@ -348,7 +352,7 @@ public void testUnhealthyContainer() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); // Make sure the container is marked unhealthy - Assert.assertTrue( + assertTrue( dn.getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerState() @@ -388,7 +392,7 @@ public void testUnhealthyContainer() throws Exception { request.setCloseContainer( ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(dnService.getDatanodeDetails().getUuidString()); - Assert.assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY, + assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY, dispatcher.dispatch(request.build(), null) .getResult()); } @@ -408,7 +412,7 @@ public void testApplyTransactionFailure() throws Exception { getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); @@ -417,7 +421,7 @@ public void testApplyTransactionFailure() throws Exception { .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData; key.close(); @@ -431,8 +435,8 @@ public void testApplyTransactionFailure() throws Exception { final Path parentPath = snapshot.getPath(); // Since the snapshot threshold is set to 1, since there are // applyTransactions, we should see snapshots - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - Assert.assertNotNull(snapshot); + assertTrue(parentPath.getParent().toFile().listFiles().length > 0); + assertNotNull(snapshot); long containerID = omKeyLocationInfo.getContainerID(); // delete the container db file FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath())); @@ -452,14 +456,14 @@ public void testApplyTransactionFailure() throws Exception { try { xceiverClient.sendCommand(request.build()); - Assert.fail("Expected exception not thrown"); + fail("Expected exception not thrown"); } catch (IOException e) { // Exception should be thrown } finally { xceiverClientManager.releaseClient(xceiverClient, false); } // Make sure the container is marked unhealthy - Assert.assertTrue(dn.getDatanodeStateMachine() + assertTrue(dn.getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY); @@ -467,16 +471,16 @@ public void testApplyTransactionFailure() throws Exception { // try to take a new snapshot, ideally it should just fail stateMachine.takeSnapshot(); } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof StateMachineException); + assertTrue(ioe instanceof StateMachineException); } if (snapshot.getPath().toFile().exists()) { // Make sure the latest snapshot is same as the previous one try { final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath())); + assertTrue(snapshot.getPath().equals(latestSnapshot.getPath())); } catch (Throwable e) { - Assert.assertFalse(snapshot.getPath().toFile().exists()); + assertFalse(snapshot.getPath().toFile().exists()); } } @@ -500,7 +504,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); @@ -508,7 +512,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + assertTrue(containerData instanceof KeyValueContainerData); key.close(); ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, @@ -518,8 +522,8 @@ public void testApplyTransactionIdempotencyWithClosedContainer() final FileInfo snapshot = getSnapshotFileInfo(storage); final Path parentPath = snapshot.getPath(); stateMachine.takeSnapshot(); - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - Assert.assertNotNull(snapshot); + assertTrue(parentPath.getParent().toFile().listFiles().length > 0); + assertNotNull(snapshot); long markIndex1 = StatemachineImplTestUtil.findLatestSnapshot(storage) .getIndex(); long containerID = omKeyLocationInfo.getContainerID(); @@ -537,19 +541,19 @@ public void testApplyTransactionIdempotencyWithClosedContainer() try { xceiverClient.sendCommand(request.build()); } catch (IOException e) { - Assert.fail("Exception should not be thrown"); + fail("Exception should not be thrown"); } - Assert.assertTrue( + assertTrue( TestHelper.getDatanodeService(omKeyLocationInfo, cluster) .getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED); - Assert.assertTrue(stateMachine.isStateMachineHealthy()); + assertTrue(stateMachine.isStateMachineHealthy()); try { stateMachine.takeSnapshot(); } catch (IOException ioe) { - Assert.fail("Exception should not be thrown"); + fail("Exception should not be thrown"); } finally { xceiverClientManager.releaseClient(xceiverClient, false); } @@ -566,7 +570,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() } }), 1000, 30000); final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); + assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); } // The test injects multiple write chunk requests along with closed container @@ -590,7 +594,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() .getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); @@ -599,7 +603,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + assertTrue(containerData instanceof KeyValueContainerData); key.close(); ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, @@ -611,8 +615,8 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() stateMachine.takeSnapshot(); // Since the snapshot threshold is set to 1, since there are // applyTransactions, we should see snapshots - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - Assert.assertNotNull(snapshot); + assertTrue(parentPath.getParent().toFile().listFiles().length > 0); + assertNotNull(snapshot); long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerLocationClient() .getContainerWithPipeline(containerID).getPipeline(); @@ -653,10 +657,9 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() failCount.incrementAndGet(); } String message = e.getMessage(); - Assert.assertFalse(message, - message.contains("hello")); - Assert.assertTrue(message, - message.contains(HddsUtils.REDACTED.toStringUtf8())); + assertFalse(message.contains("hello"), message); + assertTrue(message.contains(HddsUtils.REDACTED.toStringUtf8()), + message); } }; @@ -681,21 +684,21 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() if (failCount.get() > 0) { fail("testWriteStateMachineDataIdempotencyWithClosedContainer failed"); } - Assert.assertTrue( + assertTrue( TestHelper.getDatanodeService(omKeyLocationInfo, cluster) .getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) .getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED); - Assert.assertTrue(stateMachine.isStateMachineHealthy()); + assertTrue(stateMachine.isStateMachineHealthy()); try { stateMachine.takeSnapshot(); } catch (IOException ioe) { - Assert.fail("Exception should not be thrown"); + fail("Exception should not be thrown"); } final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); + assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); r2.run(); } finally { @@ -720,7 +723,7 @@ public void testContainerStateMachineSingleFailureRetry() getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); @@ -733,7 +736,7 @@ public void testContainerStateMachineSingleFailureRetry() key.close(); } catch (Exception ioe) { // Should not fail.. - Assert.fail("Exception " + ioe.getMessage()); + fail("Exception " + ioe.getMessage()); } validateData("ratis1", 2, "ratisratisratisratis"); } @@ -755,7 +758,7 @@ public void testContainerStateMachineDualFailureRetry() getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); @@ -768,7 +771,7 @@ public void testContainerStateMachineDualFailureRetry() key.close(); } catch (Exception ioe) { // Should not fail.. - Assert.fail("Exception " + ioe.getMessage()); + fail("Exception " + ioe.getMessage()); } validateData("ratis1", 2, "ratisratisratisratis"); } @@ -794,7 +797,7 @@ private void induceFollowerFailure(OmKeyLocationInfo omKeyLocationInfo, ContainerData containerData = container .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData; FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath())); @@ -817,7 +820,7 @@ private void validateData(String key, int locationCount, String payload) { try { keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs); - Assert.assertEquals(locationCount, + assertEquals(locationCount, keyInfo.getLatestVersionLocations().getLocationListCount()); byte[] buffer = new byte[1024]; try (OzoneInputStream o = objectStore.getVolume(volumeName) @@ -828,9 +831,9 @@ private void validateData(String key, int locationCount, String payload) { String response = new String(buffer, 0, end, StandardCharsets.UTF_8); - Assert.assertEquals(payload, response); + assertEquals(payload, response); } catch (IOException e) { - Assert.fail("Exception not expected " + e.getMessage()); + fail("Exception not expected " + e.getMessage()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 911650390fa..deeb4214011 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.client.rpc; import java.io.IOException; -import static java.nio.charset.StandardCharsets.UTF_8; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -63,11 +62,14 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -173,16 +175,16 @@ public void testBlockWritesWithDnFailures() throws Exception { key.write(data); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); // assert that the exclude list's expire time equals to // default value 600000 ms in OzoneClientConfig.java - Assert.assertEquals(((KeyOutputStream) key.getOutputStream()) + assertEquals(((KeyOutputStream) key.getOutputStream()) .getExcludeList().getExpiryTime(), 600000); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertTrue(locationInfoList.size() == 1); + assertEquals(1, locationInfoList.size()); long containerId = locationInfoList.get(0).getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager() @@ -204,7 +206,7 @@ public void testBlockWritesWithDnFailures() throws Exception { .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(data.length, keyInfo.getDataSize()); + assertEquals(data.length, keyInfo.getDataSize()); validateData(keyName, data); // Verify that the block information is updated correctly in the DB on @@ -277,12 +279,11 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { .getLocalID())); // The first Block could have 1 or 2 chunkSize of data int block1NumChunks = blockData1.getChunks().size(); - Assert.assertTrue(block1NumChunks >= 1); + assertTrue(block1NumChunks >= 1); - Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize()); - Assert.assertEquals(1, containerData1.getBlockCount()); - Assert.assertEquals(chunkSize * block1NumChunks, - containerData1.getBytesUsed()); + assertEquals(chunkSize * block1NumChunks, blockData1.getSize()); + assertEquals(1, containerData1.getBlockCount()); + assertEquals(chunkSize * block1NumChunks, containerData1.getBytesUsed()); } // Verify that the second block has the remaining 0.5*chunkSize of data @@ -295,17 +296,17 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { containerData2.getBlockKey(locationList.get(1).getBlockID() .getLocalID())); // The second Block should have 0.5 chunkSize of data - Assert.assertEquals(block2ExpectedChunkCount, + assertEquals(block2ExpectedChunkCount, blockData2.getChunks().size()); - Assert.assertEquals(1, containerData2.getBlockCount()); + assertEquals(1, containerData2.getBlockCount()); int expectedBlockSize; if (block2ExpectedChunkCount == 1) { expectedBlockSize = chunkSize / 2; } else { expectedBlockSize = chunkSize + chunkSize / 2; } - Assert.assertEquals(expectedBlockSize, blockData2.getSize()); - Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed()); + assertEquals(expectedBlockSize, blockData2.getSize()); + assertEquals(expectedBlockSize, containerData2.getBytesUsed()); } } @@ -319,7 +320,7 @@ public void testWriteSmallFile() throws Exception { .getFixedLengthString(keyString, chunkSize / 2); key.write(data.getBytes(UTF_8)); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = @@ -347,11 +348,10 @@ public void testWriteSmallFile() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); // Make sure a new block is written - Assert.assertNotEquals( + assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(data.getBytes(UTF_8).length, - keyInfo.getDataSize()); + assertEquals(data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.getBytes(UTF_8)); } @@ -367,14 +367,14 @@ public void testContainerExclusionWithClosedContainerException() .getFixedLengthString(keyString, chunkSize); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List streamEntryList = keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); + assertEquals(1, streamEntryList.size()); key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); @@ -391,12 +391,10 @@ public void testContainerExclusionWithClosedContainerException() key.write(data.getBytes(UTF_8)); key.flush(); - Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds() + assertTrue(keyOutputStream.getExcludeList().getContainerIds() .contains(ContainerID.valueOf(containerId))); - Assert.assertTrue( - keyOutputStream.getExcludeList().getDatanodes().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); // The close will just write to the buffer key.close(); @@ -408,11 +406,10 @@ public void testContainerExclusionWithClosedContainerException() OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); // Make sure a new block is written - Assert.assertNotEquals( + assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(2 * data.getBytes(UTF_8).length, - keyInfo.getDataSize()); + assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).getBytes(UTF_8)); } @@ -426,14 +423,14 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { .getFixedLengthString(keyString, chunkSize); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List streamEntryList = keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); + assertEquals(1, streamEntryList.size()); key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); @@ -454,12 +451,10 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { key.write(data.getBytes(UTF_8)); key.flush(); - Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes() + assertTrue(keyOutputStream.getExcludeList().getDatanodes() .contains(datanodes.get(0))); - Assert.assertTrue( - keyOutputStream.getExcludeList().getContainerIds().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); // The close will just write to the buffer key.close(); @@ -471,10 +466,10 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); // Make sure a new block is written - Assert.assertNotEquals( + assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } @@ -489,14 +484,14 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { .getFixedLengthString(keyString, chunkSize); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List streamEntryList = keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); + assertEquals(1, streamEntryList.size()); key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); @@ -517,12 +512,10 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { key.write(data.getBytes(UTF_8)); key.write(data.getBytes(UTF_8)); key.flush(); - Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() + assertTrue(keyOutputStream.getExcludeList().getPipelineIds() .contains(pipeline.getId())); - Assert.assertTrue( - keyOutputStream.getExcludeList().getContainerIds().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getDatanodes().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty()); + assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty()); // The close will just write to the buffer key.close(); @@ -534,10 +527,10 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); // Make sure a new block is written - Assert.assertNotEquals( + assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index c84f6f31419..54153744d7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -52,15 +52,16 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.Assert; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * This class is to test all the public facing APIs of Ozone Client with an @@ -132,7 +133,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = bucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } @@ -141,7 +142,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = bucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } @@ -156,7 +157,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = newBucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } @@ -165,7 +166,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = newBucket.readFile(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } @@ -197,9 +198,9 @@ public void testMultiPartUploadWithStream() throws IOException { assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); OzoneDataStreamOutput ozoneStreamOutput = bucket.createMultipartStreamKey( @@ -211,11 +212,11 @@ public void testMultiPartUploadWithStream() throws IOException { OzoneMultipartUploadPartListParts parts = bucket.listParts(keyName, uploadID, 0, 1); - Assert.assertEquals(parts.getPartInfoList().size(), 1); + assertEquals(parts.getPartInfoList().size(), 1); OzoneMultipartUploadPartListParts.PartInfo partInfo = parts.getPartInfoList().get(0); - Assert.assertEquals(valueLength, partInfo.getSize()); + assertEquals(valueLength, partInfo.getSize()); } @@ -269,8 +270,8 @@ public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException { // verify the key details final OzoneKeyDetails keyDetails = bucket.getKey(keyName); - Assertions.assertEquals(keyName, keyDetails.getName()); - Assertions.assertEquals(data.length, keyDetails.getDataSize()); + assertEquals(keyName, keyDetails.getName()); + assertEquals(data.length, keyDetails.getDataSize()); // verify the key content final byte[] buffer = new byte[data.length]; @@ -283,6 +284,6 @@ public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException { off += n; } } - Assertions.assertArrayEquals(data, buffer); + assertArrayEquals(data, buffer); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index 5a46a0edf4e..9289d4fb6df 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -65,8 +65,12 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + import org.apache.ratis.protocol.exceptions.GroupMismatchException; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -183,43 +187,42 @@ public void testWatchForCommitWithKeyWrite() throws Exception { ContainerTestHelper.getFixedLengthString(keyString, dataLength) .getBytes(UTF_8); key.write(data1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); OutputStream stream = keyOutputStream.getStreamEntries().get(0) .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); + assertTrue(stream instanceof BlockOutputStream); RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream; // we have just written data more than flush Size(2 chunks), at this time // buffer pool will have 3 buffers allocated worth of chunk size - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertEquals(4, blockOutputStream.getBufferPool().getSize()); // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - Assert.assertEquals(maxFlushSize, + assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); + assertEquals(maxFlushSize, blockOutputStream.getTotalDataFlushedLength()); // since data equals to maxBufferSize is written, this will be a blocking // call and hence will wait for atleast flushSize worth of data to get // acked by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); + assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); // watchForCommit will clean up atleast one entry from the map where each // entry corresponds to flushSize worth of data - Assert.assertTrue( + assertTrue( blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); // Now do a flush. This will flush the data and update the flush length and // the map. key.flush(); // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); + assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); + assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); // flush will make sure one more entry gets updated in the map - Assert.assertTrue( + assertTrue( blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); + assertEquals(3, raftClient.getCommitInfoMap().size()); Pipeline pipeline = raftClient.getPipeline(); cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); @@ -234,16 +237,13 @@ public void testWatchForCommitWithKeyWrite() throws Exception { // and one flush for partial chunk key.flush(); // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); + assertEquals(0, keyOutputStream.getRetryCount()); // now close the stream, It will update the ack length after watchForCommit key.close(); - Assert - .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); + assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().isEmpty()); + assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); + assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().isEmpty()); validateData(keyName, data1); } @@ -257,9 +257,8 @@ public void testWatchForCommitForRetryfailure() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); Pipeline pipeline = xceiverClient.getPipeline(); TestHelper.createPipelineOnDatanode(pipeline, cluster); XceiverClientReply reply = xceiverClient.sendCommandAsync( @@ -280,19 +279,18 @@ public void testWatchForCommitForRetryfailure() throws Exception { // The basic idea here is just to test if its throws an exception. xceiverClient .watchForCommit(index + new Random().nextInt(100) + 10); - Assert.fail("expected exception not thrown"); + fail("expected exception not thrown"); } catch (Exception e) { - Assert.assertTrue(e instanceof ExecutionException); + assertTrue(e instanceof ExecutionException); // since the timeout value is quite long, the watch request will either // fail with NotReplicated exceptio, RetryFailureException or // RuntimeException - Assert.assertFalse(HddsClientUtils + assertFalse(HddsClientUtils .checkForException(e) instanceof TimeoutException); // client should not attempt to watch with // MAJORITY_COMMITTED replication level, except the grpc IO issue if (!logCapturer.getOutput().contains("Connection refused")) { - Assert.assertFalse( - e.getMessage().contains("Watch-MAJORITY_COMMITTED")); + assertFalse(e.getMessage().contains("Watch-MAJORITY_COMMITTED")); } } clientManager.releaseClient(xceiverClient, false); @@ -310,9 +308,8 @@ public void test2WayCommitForTimeoutException() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); Pipeline pipeline = xceiverClient.getPipeline(); TestHelper.createPipelineOnDatanode(pipeline, cluster); XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; @@ -321,7 +318,7 @@ public void test2WayCommitForTimeoutException() throws Exception { container1.getContainerInfo().getContainerID(), xceiverClient.getPipeline())); reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); + assertEquals(3, ratisClient.getCommitInfoMap().size()); List nodesInPipeline = pipeline.getNodes(); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { // shutdown the ratis follower @@ -338,12 +335,12 @@ public void test2WayCommitForTimeoutException() throws Exception { xceiverClient.watchForCommit(reply.getLogIndex()); // commitInfo Map will be reduced to 2 here - Assert.assertEquals(2, ratisClient.getCommitInfoMap().size()); + assertEquals(2, ratisClient.getCommitInfoMap().size()); clientManager.releaseClient(xceiverClient, false); String output = logCapturer.getOutput(); - Assert.assertTrue(output.contains("3 way commit failed")); - Assert.assertTrue(output.contains("TimeoutException")); - Assert.assertTrue(output.contains("Committed by majority")); + assertTrue(output.contains("3 way commit failed")); + assertTrue(output.contains("TimeoutException")); + assertTrue(output.contains("Committed by majority")); } logCapturer.stopCapturing(); } @@ -356,9 +353,8 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); Pipeline pipeline = xceiverClient.getPipeline(); XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; long containerId = container1.getContainerInfo().getContainerID(); @@ -366,7 +362,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { ContainerTestHelper.getCreateContainerRequest(containerId, xceiverClient.getPipeline())); reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); + assertEquals(3, ratisClient.getCommitInfoMap().size()); List pipelineList = new ArrayList<>(); pipelineList.add(pipeline); TestHelper.waitForPipelineClose(pipelineList, cluster); @@ -377,9 +373,9 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { xceiverClient .watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10); - Assert.fail("Expected exception not thrown"); + fail("Expected exception not thrown"); } catch (Exception e) { - Assert.assertTrue(HddsClientUtils + assertTrue(HddsClientUtils .checkForException(e) instanceof GroupMismatchException); } clientManager.releaseClient(xceiverClient, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index 2a7423b15d3..1a1e9bb4c56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -62,11 +62,13 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.statemachine.StateMachine; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Helpers for container tests. @@ -186,7 +188,7 @@ public static void validateData(String keyName, byte[] data, sha1.update(data); MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH); sha2.update(readData); - Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest())); + assertTrue(Arrays.equals(sha1.digest(), sha2.digest())); } } @@ -203,7 +205,7 @@ public static void waitForContainerClose(OzoneOutputStream outputStream, containerIdList.add(id); } } - Assert.assertTrue(!containerIdList.isEmpty()); + assertTrue(!containerIdList.isEmpty()); waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); } @@ -221,7 +223,7 @@ public static void waitForContainerClose(OzoneDataStreamOutput outputStream, containerIdList.add(id); } } - Assert.assertTrue(!containerIdList.isEmpty()); + assertTrue(!containerIdList.isEmpty()); waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); } @@ -239,7 +241,7 @@ public static void waitForPipelineClose(OzoneOutputStream outputStream, containerIdList.add(id); } } - Assert.assertTrue(!containerIdList.isEmpty()); + assertFalse(containerIdList.isEmpty()); waitForPipelineClose(cluster, waitForContainerCreation, containerIdList.toArray(new Long[0])); } @@ -268,10 +270,10 @@ public static void waitForPipelineClose(MiniOzoneCluster cluster, GenericTestUtils .waitFor(() -> isContainerPresent(cluster, containerID, details), 500, 100 * 1000); - Assert.assertTrue(isContainerPresent(cluster, containerID, details)); + assertTrue(isContainerPresent(cluster, containerID, details)); // make sure the container gets created first - Assert.assertFalse(isContainerClosed(cluster, containerID, details)); + assertFalse(isContainerClosed(cluster, containerID, details)); } } } @@ -294,7 +296,7 @@ public static void waitForPipelineClose(List pipelineList, XceiverServerSpi server = cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn)) .getDatanodeStateMachine().getContainer().getWriteChannel(); - Assert.assertTrue(server instanceof XceiverServerRatis); + assertTrue(server instanceof XceiverServerRatis); GenericTestUtils.waitFor(() -> !server.isExist(pipelineId), 100, 30_000); } @@ -311,7 +313,7 @@ public static void createPipelineOnDatanode(Pipeline pipeline, cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn)) .getDatanodeStateMachine().getContainer() .getWriteChannel(); - Assert.assertTrue(server instanceof XceiverServerRatis); + assertTrue(server instanceof XceiverServerRatis); try { server.addGroup(pipeline.getId().getProtobuf(), Collections. unmodifiableList(pipeline.getNodes())); @@ -343,10 +345,10 @@ public static void waitForContainerClose(MiniOzoneCluster cluster, GenericTestUtils .waitFor(() -> isContainerPresent(cluster, containerID, details), 500, 100 * 1000); - Assert.assertTrue(isContainerPresent(cluster, containerID, details)); + assertTrue(isContainerPresent(cluster, containerID, details)); // make sure the container gets created first - Assert.assertFalse(isContainerClosed(cluster, containerID, details)); + assertFalse(isContainerClosed(cluster, containerID, details)); // send the order to close the container cluster.getStorageContainerManager().getEventQueue() .fireEvent(SCMEvents.CLOSE_CONTAINER, @@ -366,7 +368,7 @@ public static void waitForContainerClose(MiniOzoneCluster cluster, 15 * 1000); //double check if it's really closed // (waitFor also throws an exception) - Assert.assertTrue( + assertTrue( isContainerClosed(cluster, containerID, datanodeDetails)); } index++; @@ -410,7 +412,7 @@ public static Set getDatanodeServices( services.add(service); } } - Assert.assertEquals(pipelineNodes.size(), services.size()); + assertEquals(pipelineNodes.size(), services.size()); return services; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index a0841980723..b2c0f47997e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -70,7 +70,6 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -631,7 +630,7 @@ public void testContainerDeleteWithInvalidKeyCount() = scm.getContainerManager().getContainerReplicas(containerId); // Ensure for all replica isEmpty are false in SCM - Assert.assertTrue(scm.getContainerManager().getContainerReplicas( + Assertions.assertTrue(scm.getContainerManager().getContainerReplicas( containerId).stream(). allMatch(replica -> !replica.isEmpty())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 4948fd23a0a..c62f943ee87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -45,7 +45,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterAll; -import org.junit.Assert; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -59,6 +58,9 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test container closing. @@ -131,12 +133,11 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); + assertEquals(1, datanodes.size()); DatanodeDetails datanodeDetails = datanodes.get(0); HddsDatanodeService datanodeService = null; - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); + assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) { if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) { datanodeService = datanodeServiceItr; @@ -158,7 +159,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000); // Make sure the closeContainerCommandHandler is Invoked - Assert.assertTrue( + assertTrue( closeContainerHandler.getInvocationCount() > lastInvocationCount); } @@ -190,11 +191,10 @@ public void testCloseContainerViaStandAlone() Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); + assertEquals(1, datanodes.size()); DatanodeDetails datanodeDetails = datanodes.get(0); - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); + assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); // Send the order to close the container, give random pipeline id so that // the container will not be closed via RATIS @@ -211,13 +211,13 @@ public void testCloseContainerViaStandAlone() GenericTestUtils .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000); - Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); + assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); cluster.getStorageContainerManager().getPipelineManager() .closePipeline(pipeline, false); Thread.sleep(5000); // Pipeline close should not affect a container in CLOSED state - Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); + assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); } @Test @@ -247,11 +247,11 @@ public void testCloseContainerViaRatis() throws IOException, Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - Assert.assertEquals(3, datanodes.size()); + assertEquals(3, datanodes.size()); List metadataStores = new ArrayList<>(datanodes.size()); for (DatanodeDetails details : datanodes) { - Assert.assertFalse(isContainerClosed(cluster, containerID, details)); + assertFalse(isContainerClosed(cluster, containerID, details)); //send the order to close the container SCMCommand command = new CloseContainerCommand( containerID, pipeline.getId()); @@ -270,8 +270,7 @@ public void testCloseContainerViaRatis() throws IOException, } // There should be as many rocks db as the number of datanodes in pipeline. - Assert.assertEquals(datanodes.size(), - metadataStores.stream().distinct().count()); + assertEquals(datanodes.size(), metadataStores.stream().distinct().count()); // Make sure that it is CLOSED for (DatanodeDetails datanodeDetails : datanodes) { @@ -279,8 +278,7 @@ public void testCloseContainerViaRatis() throws IOException, () -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 15 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assert.assertTrue(isContainerClosed(cluster, - containerID, datanodeDetails)); + assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); } } @@ -313,11 +311,10 @@ public void testQuasiCloseTransitionViaRatis() Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); + assertEquals(1, datanodes.size()); DatanodeDetails datanodeDetails = datanodes.get(0); - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); + assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); // close the pipeline cluster.getStorageContainerManager() @@ -328,7 +325,7 @@ public void testQuasiCloseTransitionViaRatis() GenericTestUtils.waitFor( () -> isContainerQuasiClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000); - Assert.assertTrue( + assertTrue( isContainerQuasiClosed(cluster, containerID, datanodeDetails)); // Send close container command from SCM to datanode with forced flag as @@ -342,8 +339,7 @@ public void testQuasiCloseTransitionViaRatis() GenericTestUtils .waitFor(() -> isContainerClosed( cluster, containerID, datanodeDetails), 500, 5 * 1000); - Assert.assertTrue( - isContainerClosed(cluster, containerID, datanodeDetails)); + assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); } private Boolean isContainerClosed(MiniOzoneCluster ozoneCluster, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 4eb57003df0..c055aaf1060 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,13 +30,12 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; -import org.junit.Assert; -import org.junit.Rule; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.migrationsupport.rules.EnableRuleMigrationSupport; -import org.junit.rules.TemporaryFolder; +import java.io.File; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -47,6 +46,11 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests ozone containers. @@ -55,11 +59,9 @@ @Timeout(300) public class TestOzoneContainer { - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - @Test - public void testCreateOzoneContainer() throws Exception { + public void testCreateOzoneContainer( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { long containerID = ContainerTestHelper.getTestContainerID(); OzoneConfiguration conf = newOzoneConfiguration(); OzoneContainer container = null; @@ -67,8 +69,8 @@ public void testCreateOzoneContainer() throws Exception { // We don't start Ozone Container via data node, we will do it // independently in our test path. Pipeline pipeline = MockPipeline.createSingleNodePipeline(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); @@ -91,14 +93,15 @@ public void testCreateOzoneContainer() throws Exception { } @Test - public void testOzoneContainerStart() throws Exception { + public void testOzoneContainerStart( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { OzoneConfiguration conf = newOzoneConfiguration(); OzoneContainer container = null; try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); @@ -112,14 +115,14 @@ public void testOzoneContainerStart() throws Exception { try { container.start(clusterId); } catch (Exception e) { - Assert.fail(); + fail(); } container.stop(); try { container.stop(); } catch (Exception e) { - Assert.fail(); + fail(); } } finally { @@ -180,8 +183,8 @@ public static void runTestOzoneContainerViaDataNode( pipeline, writeChunkRequest.getWriteChunk()); response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Put Block putBlockRequest = ContainerTestHelper.getPutBlockRequest( @@ -189,8 +192,8 @@ public static void runTestOzoneContainerViaDataNode( response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Get Block request = ContainerTestHelper. @@ -210,8 +213,8 @@ public static void runTestOzoneContainerViaDataNode( updateRequest1 = ContainerTestHelper.getUpdateContainerRequest( testContainerID, containerUpdate); updateResponse1 = client.sendCommand(updateRequest1); - Assert.assertNotNull(updateResponse1); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, + assertNotNull(updateResponse1); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); //Update an non-existing container @@ -220,7 +223,7 @@ public static void runTestOzoneContainerViaDataNode( updateRequest2 = ContainerTestHelper.getUpdateContainerRequest( nonExistingContinerID, containerUpdate); updateResponse2 = client.sendCommand(updateRequest2); - Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, updateResponse2.getResult()); } finally { if (client != null) { @@ -230,13 +233,14 @@ public static void runTestOzoneContainerViaDataNode( } @Test - public void testBothGetandPutSmallFile() throws Exception { + public void testBothGetandPutSmallFile( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { MiniOzoneCluster cluster = null; XceiverClientGrpc client = null; try { OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) .build(); @@ -267,22 +271,22 @@ static void runTestBothGetandPutSmallFile( .toByteArray(); ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(smallFileRequest); - Assert.assertNotNull(response); + assertNotNull(response); final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest = ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(), smallFileRequest.getPutSmallFile().getBlock()); response = client.sendCommand(getSmallFileRequest); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); ContainerProtos.ReadChunkResponseProto chunkResponse = response.getGetSmallFile().getData(); if (chunkResponse.hasDataBuffers()) { - Assert.assertArrayEquals(requestBytes, + assertArrayEquals(requestBytes, chunkResponse.getDataBuffers().toByteArray()); } else { - Assert.assertArrayEquals(requestBytes, + assertArrayEquals(requestBytes, chunkResponse.getData().toByteArray()); } } finally { @@ -295,7 +299,8 @@ static void runTestBothGetandPutSmallFile( @Test - public void testCloseContainer() throws Exception { + public void testCloseContainer( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { MiniOzoneCluster cluster = null; XceiverClientGrpc client = null; ContainerProtos.ContainerCommandResponseProto response; @@ -304,8 +309,8 @@ public void testCloseContainer() throws Exception { try { OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) .build(); @@ -324,16 +329,16 @@ public void testCloseContainer() throws Exception { client.getPipeline(), writeChunkRequest.getWriteChunk()); // Put block before closing. response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Close the container. request = ContainerTestHelper.getCloseContainer( client.getPipeline(), containerID); response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Assert that none of the write operations are working after close. @@ -341,21 +346,21 @@ public void testCloseContainer() throws Exception { // Write chunks should fail now. response = client.sendCommand(writeChunkRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, + assertNotNull(response); + assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, response.getResult()); // Read chunk must work on a closed container. request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(), writeChunkRequest.getWriteChunk()); response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Put block will fail on a closed container. response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, + assertNotNull(response); + assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, response.getResult()); // Get block must work on the closed container. @@ -376,7 +381,8 @@ public void testCloseContainer() throws Exception { } @Test - public void testDeleteContainer() throws Exception { + public void testDeleteContainer( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { MiniOzoneCluster cluster = null; XceiverClientGrpc client = null; ContainerProtos.ContainerCommandResponseProto response; @@ -384,8 +390,8 @@ public void testDeleteContainer() throws Exception { writeChunkRequest, putBlockRequest; try { OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) .build(); @@ -403,8 +409,8 @@ public void testDeleteContainer() throws Exception { client.getPipeline(), writeChunkRequest.getWriteChunk()); // Put key before deleting. response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); // Container cannot be deleted because force flag is set to false and @@ -413,8 +419,8 @@ public void testDeleteContainer() throws Exception { client.getPipeline(), containerID, false); response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER, + assertNotNull(response); + assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER, response.getResult()); // Container can be deleted, by setting force flag, even with out closing @@ -422,8 +428,8 @@ public void testDeleteContainer() throws Exception { client.getPipeline(), containerID, true); response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); } finally { @@ -466,7 +472,7 @@ static void runAsyncTests( combinedFuture.get(); // Assert that all futures are indeed done. for (CompletableFuture future : computeResults) { - Assert.assertTrue(future.isDone()); + assertTrue(future.isDone()); } } finally { if (client != null) { @@ -476,13 +482,14 @@ static void runAsyncTests( } @Test - public void testXcieverClientAsync() throws Exception { + public void testXcieverClientAsync( + @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { MiniOzoneCluster cluster = null; XceiverClientGrpc client = null; try { OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, tempFolder.newFolder().getPath()); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.newFolder().getPath()); + conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); + conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) .build(); @@ -513,7 +520,7 @@ public static void createContainerForTesting(XceiverClientSpi client, containerID, client.getPipeline()); ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(request); - Assert.assertNotNull(response); + assertNotNull(response); } public static ContainerProtos.ContainerCommandRequestProto @@ -526,8 +533,8 @@ public static void createContainerForTesting(XceiverClientSpi client, blockID, dataLen); ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); return writeChunkRequest; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 7e5db1f8e57..0451ba5c98e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -62,23 +62,19 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ozone.test.GenericTestUtils; - import com.google.common.collect.Maps; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; - import org.apache.ratis.rpc.RpcType; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; -import org.junit.Assert; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.ratis.rpc.SupportedRpcType.GRPC; /** * Test Containers. @@ -174,7 +170,7 @@ static void runTestClientServer( ContainerTestHelper .getCreateContainerRequest( ContainerTestHelper.getTestContainerID(), pipeline); - Assert.assertNotNull(request.getTraceID()); + Assertions.assertNotNull(request.getTraceID()); client.sendCommand(request); } finally { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java index 20c17279389..cd42a153925 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.Assert; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -96,9 +95,9 @@ void testDefault() { "--num-of-buckets", "5", "--num-of-keys", "10"); - Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); randomKeyGenerator.printStats(System.out); } @@ -114,9 +113,9 @@ void testECKey() { "--type", "EC" ); - Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); } @Test @@ -133,9 +132,9 @@ void testMultiThread() { "--type", "RATIS" ); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); } @Test @@ -152,9 +151,9 @@ void testRatisKey() { "--type", "RATIS" ); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); } @Test @@ -172,10 +171,10 @@ void testKeyLargerThan2GB() { "--validate-writes" ); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); - Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); + assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); } @Test @@ -193,10 +192,10 @@ void testZeroSizeKey() { "--validate-writes" ); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); - Assert.assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); + assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(1, randomKeyGenerator.getSuccessfulValidationCount()); } @Test @@ -212,8 +211,8 @@ void testThreadPoolSize() { "--type", "RATIS" ); - Assert.assertEquals(10, randomKeyGenerator.getThreadPoolSize()); - Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(10, randomKeyGenerator.getThreadPoolSize()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); } @Test @@ -230,10 +229,10 @@ void cleanObjectsTest() { "--clean-objects" ); - Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); - Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCleaned()); - Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCleaned()); + assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(2, randomKeyGenerator.getNumberOfVolumesCleaned()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCleaned()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index f2d6a0d80d2..d438ad09fc3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -49,7 +49,6 @@ import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.grpc.server.GrpcLogAppender; import org.apache.ratis.server.leader.FollowerInfo; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -58,6 +57,11 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Test for OM bootstrap process. @@ -123,14 +127,14 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception { // Check that new peer exists in all OMs peers list and also in their Ratis // server's peer list for (OzoneManager om : cluster.getOzoneManagersList()) { - Assert.assertTrue("New OM node " + nodeId + " not present in Peer list " + - "of OM " + om.getOMNodeId(), om.doesPeerExist(nodeId)); - Assert.assertTrue("New OM node " + nodeId + " not present in Peer list " + - "of OM " + om.getOMNodeId() + " RatisServer", - om.getOmRatisServer().doesPeerExist(nodeId)); - Assert.assertTrue("New OM node " + nodeId + " not present in " + - "OM " + om.getOMNodeId() + "RatisServer's RaftConf", - om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId)); + assertTrue(om.doesPeerExist(nodeId), "New OM node " + nodeId + + " not present in Peer list of OM " + om.getOMNodeId()); + assertTrue(om.getOmRatisServer().doesPeerExist(nodeId), "New OM node " + nodeId + + " not present in Peer list of OM " + om.getOMNodeId() + " RatisServer"); + assertTrue( + om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId), + "New OM node " + nodeId + " not present in " + "OM " + + om.getOMNodeId() + "RatisServer's RaftConf"); } OzoneManager newOM = cluster.getOzoneManager(nodeId); @@ -140,8 +144,7 @@ private void assertNewOMExistsInPeerList(String nodeId) throws Exception { // Check Ratis Dir for log files File[] logFiles = getRatisLogFiles(newOM); - Assert.assertTrue("There are no ratis logs in new OM ", - logFiles.length > 0); + assertTrue(logFiles.length > 0, "There are no ratis logs in new OM "); } private File[] getRatisLogFiles(OzoneManager om) { @@ -194,8 +197,9 @@ public void testBootstrap() throws Exception { GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000); OzoneManager omLeader = cluster.getOMLeader(); - Assert.assertTrue("New Bootstrapped OM not elected Leader even though " + - "other OMs are down", newOMNodeIds.contains(omLeader.getOMNodeId())); + assertTrue(newOMNodeIds.contains(omLeader.getOMNodeId()), + "New Bootstrapped OM not elected Leader even though" + + " other OMs are down"); // Perform some read and write operations with new OM leader IOUtils.closeQuietly(client); @@ -206,7 +210,7 @@ public void testBootstrap() throws Exception { OzoneBucket bucket = volume.getBucket(BUCKET_NAME); String key = createKey(bucket); - Assert.assertNotNull(bucket.getKey(key)); + assertNotNull(bucket.getKey(key)); } /** @@ -236,16 +240,16 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { String newNodeId = "omNode-bootstrap-1"; try { cluster.bootstrapOzoneManager(newNodeId, false, false); - Assert.fail("Bootstrap should have failed as configs are not updated on" + + fail("Bootstrap should have failed as configs are not updated on" + " all OMs."); } catch (Exception e) { - Assert.assertEquals(OmUtils.getOMAddressListPrintString( + assertEquals(OmUtils.getOMAddressListPrintString( Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" + " have incorrect information of the bootstrapping OM. Update their " + "ozone-site.xml before proceeding.", e.getMessage()); - Assert.assertTrue(omLog.getOutput().contains("Remote OM config check " + + assertTrue(omLog.getOutput().contains("Remote OM config check " + "failed on OM " + existingOMNodeId)); - Assert.assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + + assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + " - System Exit")); } @@ -264,14 +268,14 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { try { cluster.bootstrapOzoneManager(newNodeId, false, true); } catch (IOException e) { - Assert.assertTrue(omLog.getOutput().contains("Couldn't add OM " + + assertTrue(omLog.getOutput().contains("Couldn't add OM " + newNodeId + " to peer list.")); - Assert.assertTrue(miniOzoneClusterLog.getOutput().contains( + assertTrue(miniOzoneClusterLog.getOutput().contains( existingOMNodeId + " - System Exit: There is no OM configuration " + "for node ID " + newNodeId + " in ozone-site.xml.")); // Verify that the existing OM has stopped. - Assert.assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning()); + assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning()); } } @@ -310,18 +314,18 @@ public void testForceBootstrap() throws Exception { String newNodeId = "omNode-bootstrap-1"; try { cluster.bootstrapOzoneManager(newNodeId, true, false); - Assert.fail("Bootstrap should have failed as configs are not updated on" + + fail("Bootstrap should have failed as configs are not updated on" + " all OMs."); } catch (IOException e) { - Assert.assertEquals(OmUtils.getOMAddressListPrintString( + assertEquals(OmUtils.getOMAddressListPrintString( Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " + "have incorrect information of the bootstrapping OM. Update their " + "ozone-site.xml before proceeding.", e.getMessage()); - Assert.assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId + + assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId + " configuration returned null")); - Assert.assertTrue(omLog.getOutput().contains("Remote OM config check " + + assertTrue(omLog.getOutput().contains("Remote OM config check " + "failed on OM " + downOMNodeId)); - Assert.assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + + assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId + " - System Exit")); } @@ -338,7 +342,7 @@ public void testForceBootstrap() throws Exception { OzoneManager newOM = cluster.getOzoneManager(newNodeId); // Verify that the newly bootstrapped OM is running - Assert.assertTrue(newOM.isRunning()); + assertTrue(newOM.isRunning()); } /** @@ -375,7 +379,7 @@ public void testDecommission() throws Exception { OzoneBucket bucket = volume.getBucket(BUCKET_NAME); String key = createKey(bucket); - Assert.assertNotNull(bucket.getKey(key)); + assertNotNull(bucket.getKey(key)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index d140e0aeaf6..d4f1f777877 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -87,7 +87,6 @@ import org.apache.ozone.test.GenericTestUtils; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -931,7 +930,7 @@ private void confirmServletLocksOutOtherHandler(BootstrapStateHandler handler, ExecutorService executorService) { Future test = checkLock(handler, executorService); // Handler should fail to take the lock because the servlet has taken it. - Assert.assertThrows(TimeoutException.class, + Assertions.assertThrows(TimeoutException.class, () -> test.get(500, TimeUnit.MILLISECONDS)); } @@ -943,7 +942,7 @@ private void confirmOtherHandlerLocksOutServlet(BootstrapStateHandler handler, handler.getBootstrapStateLock().lock()) { Future test = checkLock(servlet, executorService); // Servlet should fail to lock when other handler has taken it. - Assert.assertThrows(TimeoutException.class, + Assertions.assertThrows(TimeoutException.class, () -> test.get(500, TimeUnit.MILLISECONDS)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index ce89f8ffe41..74751dde6de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.apache.ozone.test.GenericTestUtils; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; -import org.junit.Assert; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterAll; @@ -60,12 +59,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; - import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; - -import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; /** @@ -136,7 +132,7 @@ public void cleanup() { fs.delete(fileStatus.getPath(), true); } } catch (IOException ex) { - fail("Failed to cleanup files."); + Assertions.fail("Failed to cleanup files."); } } @@ -205,19 +201,19 @@ public void testGetDeletedDirectoryInfo() } if (directoryObjectId == null) { - fail("directoryObjectId is null. Test case cannot proceed."); + Assertions.fail("directoryObjectId is null. Test case cannot proceed."); + } else { + // Retrieve Namespace Summary for dir1 from Recon. + ReconNamespaceSummaryManagerImpl namespaceSummaryManager = + (ReconNamespaceSummaryManagerImpl) cluster.getReconServer() + .getReconNamespaceSummaryManager(); + NSSummary summary = + namespaceSummaryManager.getNSSummary(directoryObjectId); + // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. + Assertions.assertEquals(10, summary.getNumOfFiles()); + Assertions.assertEquals(10, summary.getSizeOfFiles()); } - // Retrieve Namespace Summary for dir1 from Recon. - ReconNamespaceSummaryManagerImpl namespaceSummaryManager = - (ReconNamespaceSummaryManagerImpl) cluster.getReconServer() - .getReconNamespaceSummaryManager(); - NSSummary summary = - namespaceSummaryManager.getNSSummary(directoryObjectId); - // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. - Assert.assertEquals(10, summary.getNumOfFiles()); - Assert.assertEquals(10, summary.getSizeOfFiles()); - // Delete the entire directory dir1. fs.delete(dir1, true); syncDataFromOM(); @@ -242,7 +238,7 @@ public void testGetDeletedDirectoryInfo() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 10. - Assert.assertEquals(10, entity.getUnreplicatedDataSize()); + Assertions.assertEquals(10, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -331,7 +327,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 3. - Assert.assertEquals(3, entity.getUnreplicatedDataSize()); + Assertions.assertEquals(3, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -393,7 +389,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 100. - Assert.assertEquals(100, entity.getUnreplicatedDataSize()); + Assertions.assertEquals(100, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -475,7 +471,7 @@ private boolean assertTableRowCount(int expectedCount, LOG.info("{} actual row count={}, expectedCount={}", table.getName(), count, expectedCount); } catch (IOException ex) { - fail("Test failed with: " + ex); + Assertions.fail("Test failed with: " + ex); } return count == expectedCount; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java index 906b2aaf702..e1d1ba31d74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java @@ -42,8 +42,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.ozone.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -57,6 +55,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto; import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests failover with SCM HA setup. @@ -113,10 +115,10 @@ public void testFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - Assert.assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(scmClientConfig.getRetryCount(), 15); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); - Assert.assertNotNull(scm); + assertNotNull(scm); SCMBlockLocationFailoverProxyProvider failoverProxyProvider = new SCMBlockLocationFailoverProxyProvider(conf); failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId()); @@ -131,7 +133,7 @@ public void testFailover() throws Exception { .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); scmBlockLocationProtocol.getScmInfo(); - Assert.assertTrue(logCapture.getOutput() + assertTrue(logCapture.getOutput() .contains("Performing failover to suggested leader")); scm = getLeader(cluster); SCMContainerLocationFailoverProxyProvider proxyProvider = @@ -148,7 +150,7 @@ public void testFailover() throws Exception { scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, "ozone"); - Assert.assertTrue(logCapture.getOutput() + assertTrue(logCapture.getOutput() .contains("Performing failover to suggested leader")); } @@ -159,10 +161,10 @@ public void testMoveFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - Assert.assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(scmClientConfig.getRetryCount(), 15); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); - Assert.assertNotNull(scm); + assertNotNull(scm); final ContainerID id = getContainer(HddsProtos.LifeCycleState.CLOSED).containerID(); @@ -190,19 +192,19 @@ public void testMoveFailover() throws Exception { .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); scmBlockLocationProtocol.getScmInfo(); - Assert.assertTrue(logCapture.getOutput() + assertTrue(logCapture.getOutput() .contains("Performing failover to suggested leader")); scm = getLeader(cluster); - Assert.assertNotNull(scm); + assertNotNull(scm); //switch to the new leader successfully, new leader should //get the same inflightMove Map inflightMove = scm.getReplicationManager().getMoveScheduler().getInflightMove(); - Assert.assertTrue(inflightMove.containsKey(id)); + assertTrue(inflightMove.containsKey(id)); MoveDataNodePair mp = inflightMove.get(id); - Assert.assertTrue(dn2.equals(mp.getTgt())); - Assert.assertTrue(dn1.equals(mp.getSrc())); + assertTrue(dn2.equals(mp.getTgt())); + assertTrue(dn1.equals(mp.getSrc())); //complete move in the new leader scm.getReplicationManager().getMoveScheduler() @@ -223,17 +225,17 @@ public void testMoveFailover() throws Exception { scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, "ozone"); - Assert.assertTrue(logCapture.getOutput() + assertTrue(logCapture.getOutput() .contains("Performing failover to suggested leader")); //switch to the new leader successfully, new leader should //get the same inflightMove , which should not contains //that container. scm = getLeader(cluster); - Assert.assertNotNull(scm); + assertNotNull(scm); inflightMove = scm.getReplicationManager() .getMoveScheduler().getInflightMove(); - Assert.assertFalse(inflightMove.containsKey(id)); + assertFalse(inflightMove.containsKey(id)); } /** @@ -257,14 +259,14 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs() conf.getObject(SCMClientConfig.class); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - Assertions.assertEquals(15, scmClientConfig.getRetryCount()); + assertEquals(15, scmClientConfig.getRetryCount()); conf.setFromObject(scmClientConfig); StorageContainerManager leader = getLeader(cluster); - Assertions.assertNotNull(leader); + assertNotNull(leader); ScmClient scmClient = new ContainerOperationClient(conf); // assert that container balancer is not running right now - Assertions.assertFalse(scmClient.getContainerBalancerStatus()); + assertFalse(scmClient.getContainerBalancerStatus()); ContainerBalancerConfiguration balancerConf = conf.getObject(ContainerBalancerConfiguration.class); ContainerBalancer containerBalancer = leader.getContainerBalancer(); @@ -278,7 +280,7 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs() // assert that balancer has stopped since the cluster is already balanced GenericTestUtils.waitFor(() -> !containerBalancer.isBalancerRunning(), 10, 500); - Assertions.assertFalse(containerBalancer.isBalancerRunning()); + assertFalse(containerBalancer.isBalancerRunning()); ByteString byteString = leader.getScmMetadataStore().getStatefulServiceConfigTable().get( @@ -315,7 +317,7 @@ public void testContainerBalancerPersistsConfigurationInAllSCMs() containerBalancer.getServiceName()); ContainerBalancerConfigurationProto protobuf = ContainerBalancerConfigurationProto.parseFrom(byteString); - Assertions.assertFalse(protobuf.getShouldRun()); + assertFalse(protobuf.getShouldRun()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java index cf8730a9651..c00840c835d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.junit.jupiter.api.AfterEach; -import org.junit.Assert; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -63,6 +62,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test cases to verify the metrics exposed by SCMPipelineManager. @@ -151,10 +151,10 @@ public void test() throws IOException, TimeoutException { getLongCounter("DatanodeChooseFallbackCount", metrics); // Seems no under-replicated closed containers get replicated - Assert.assertTrue(totalRequest == 0); - Assert.assertTrue(tryCount == 0); - Assert.assertTrue(sucessCount == 0); - Assert.assertTrue(compromiseCount == 0); + assertEquals(0, totalRequest); + assertEquals(0, tryCount); + assertEquals(0, sucessCount); + assertEquals(0, compromiseCount); } @AfterEach diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java index 74868bee2af..ab9b687dcec 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java @@ -48,11 +48,13 @@ import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.ratis.util.LifeCycle; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -123,7 +125,7 @@ public void shutdown() { public void testInstallSnapshot() throws Exception { // Get the leader SCM StorageContainerManager leaderSCM = getLeader(cluster); - Assert.assertNotNull(leaderSCM); + assertNotNull(leaderSCM); // Find the inactive SCM String followerId = getInactiveSCM(cluster).getSCMNodeId(); @@ -155,7 +157,7 @@ public void testInstallSnapshot() throws Exception { // made while it was inactive. SCMMetadataStore followerMetaStore = followerSCM.getScmMetadataStore(); for (ContainerInfo containerInfo : containers) { - Assert.assertNotNull(followerMetaStore.getContainerTable() + assertNotNull(followerMetaStore.getContainerTable() .get(containerInfo.containerID())); } } @@ -206,12 +208,11 @@ public void testInstallOldCheckpointFailure() throws Exception { } String errorMsg = "Reloading old state of SCM"; - Assert.assertTrue(logCapture.getOutput().contains(errorMsg)); - Assert.assertNull(" installed checkpoint even though checkpoint " + - "logIndex is less than it's lastAppliedIndex", newTermIndex); - Assert.assertEquals(followerTermIndex, - followerSM.getLastAppliedTermIndex()); - Assert.assertFalse(followerSM.getLifeCycleState().isPausingOrPaused()); + assertTrue(logCapture.getOutput().contains(errorMsg)); + assertNull(newTermIndex, " installed checkpoint even though checkpoint " + + "logIndex is less than it's lastAppliedIndex"); + assertEquals(followerTermIndex, followerSM.getLastAppliedTermIndex()); + assertFalse(followerSM.getLifeCycleState().isPausingOrPaused()); } @Test @@ -235,7 +236,7 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation, new SCMDBDefinition()); - Assert.assertNotNull(leaderCheckpointLocation); + assertNotNull(leaderCheckpointLocation); // Take a backup of the current DB String dbBackupName = "SCM_CHECKPOINT_BACKUP" + termIndex.getIndex() + "_" + System @@ -272,17 +273,16 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { scmhaManager.installCheckpoint(leaderCheckpointLocation, leaderCheckpointTrxnInfo); - Assert.assertTrue(logCapture.getOutput() + assertTrue(logCapture.getOutput() .contains("Failed to reload SCM state and instantiate services.")); final LifeCycle.State s = followerSM.getLifeCycleState(); - Assert.assertTrue("Unexpected lifeCycle state: " + s, - s == LifeCycle.State.NEW || s.isPausingOrPaused()); + assertTrue(s == LifeCycle.State.NEW || s.isPausingOrPaused(), "Unexpected lifeCycle state: " + s); // Verify correct reloading followerSM.setInstallingSnapshotData( new RocksDBCheckpoint(checkpointBackup.toPath()), null); followerSM.reinitialize(); - Assert.assertEquals(followerSM.getLastAppliedTermIndex(), + assertEquals(followerSM.getLastAppliedTermIndex(), leaderCheckpointTrxnInfo.getTermIndex()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index d28ef3b2703..92381829f0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -85,7 +85,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import org.junit.Assert; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -1879,7 +1878,7 @@ public void testLinkedAndNonLinkedBucketMetaData() final ArrayList> bucketListOut = parseOutputIntoArrayList(); - Assert.assertTrue(bucketListOut.size() == 1); + assertEquals(1, bucketListOut.size()); boolean link = String.valueOf(bucketListOut.get(0).get("link")).equals("false"); assertTrue(link); @@ -1898,7 +1897,7 @@ public void testLinkedAndNonLinkedBucketMetaData() final ArrayList> bucketListLinked = parseOutputIntoArrayList(); - Assert.assertTrue(bucketListLinked.size() == 2); + assertEquals(2, bucketListLinked.size()); link = String.valueOf(bucketListLinked.get(1).get("link")).equals("true"); assertTrue(link); From 9bdd9e223e9fd80746ac36dbdb1ec82c62e29536 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 19 Dec 2023 11:22:58 +0100 Subject: [PATCH 04/28] HDDS-9916. Useless execution of version-info in rocksdb-checkpoint-differ (#5784) --- hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 38 ------------------- 1 file changed, 38 deletions(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index a54f7bb0700..6da69338308 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -99,45 +99,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - false - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - true - - - - org.apache.hadoop - hadoop-maven-plugins - - - version-info - generate-resources - - version-info - - - - ${basedir}/../ - - */src/main/java/**/*.java - */src/main/proto/*.proto - - - - - - com.github.spotbugs spotbugs-maven-plugin From aa36940a80c1ed6276d597cede019ae9cf98e1a1 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 19 Dec 2023 11:56:56 +0100 Subject: [PATCH 05/28] HDDS-9962. Mark TestBlockDeletion#testBlockDeletion as flaky --- .../common/statemachine/commandhandler/TestBlockDeletion.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index b2c0f47997e..744f8286e6b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -70,6 +70,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -204,6 +205,7 @@ private static Stream replicationConfigs() { @ParameterizedTest @MethodSource("replicationConfigs") + @Flaky("HDDS-9962") public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); From 3066c495987d72b4a204cefd2904b2ac73924cc8 Mon Sep 17 00:00:00 2001 From: VarshaRavi <30603028+VarshaRaviCV@users.noreply.github.com> Date: Tue, 19 Dec 2023 17:29:18 +0530 Subject: [PATCH 06/28] HDDS-9776. Migrate simple client integration tests to JUnit5 (#5819) --- .../ozone/client/TestOzoneClientFactory.java | 10 +- .../rpc/AbstractTestECKeyOutputStream.java | 76 ++--- .../client/rpc/Test2WayCommitInRatis.java | 37 +-- .../hadoop/ozone/client/rpc/TestBCSID.java | 40 +-- .../client/rpc/TestBlockDataStreamOutput.java | 42 +-- .../TestCloseContainerHandlingByClient.java | 70 ++--- .../rpc/TestContainerReplicationEndToEnd.java | 28 +- .../client/rpc/TestContainerStateMachine.java | 50 ++- .../TestContainerStateMachineFailures.java | 263 ++++++++-------- .../TestContainerStateMachineFlushDelay.java | 41 +-- .../rpc/TestContainerStateMachineStream.java | 33 +- .../rpc/TestDeleteWithInAdequateDN.java | 68 ++-- .../rpc/TestDiscardPreallocatedBlocks.java | 56 ++-- .../client/rpc/TestECKeyOutputStream.java | 4 +- .../TestECKeyOutputStreamWithZeroCopy.java | 4 +- ...TestFailureHandlingByClientFlushDelay.java | 38 +-- .../rpc/TestHybridPipelineOnDatanode.java | 53 ++-- .../TestMultiBlockWritesWithDnFailures.java | 36 +-- ...TestOzoneClientMultipartUploadWithFSO.java | 297 +++++++++--------- ...oneClientRetriesOnExceptionFlushDelay.java | 39 +-- .../TestOzoneClientRetriesOnExceptions.java | 106 +++---- .../rpc/TestOzoneRpcClientForAclAuditLog.java | 28 +- .../rpc/TestOzoneRpcClientWithRatis.java | 14 +- .../rpc/TestValidateBCSIDOnRestart.java | 105 ++++--- 24 files changed, 721 insertions(+), 817 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index 5dc7e5f5e98..70ccf289453 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -20,8 +20,8 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -59,14 +59,14 @@ public void testRemoteException() { public Void run() throws IOException { conf.set("ozone.security.enabled", "true"); try (OzoneClient ozoneClient = - OzoneClientFactory.getRpcClient("localhost", - Integer.parseInt(omPort), conf)) { + OzoneClientFactory.getRpcClient("localhost", + Integer.parseInt(omPort), conf)) { ozoneClient.getObjectStore().listVolumes("/"); } return null; } }); - Assert.fail("Should throw exception here"); + Assertions.fail("Should throw exception here"); } catch (IOException | InterruptedException e) { assert e instanceof AccessControlException; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 518893aa0a0..9691a31efb1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -47,10 +47,10 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; import org.apache.ozone.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; @@ -132,7 +132,7 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { initInputChunks(); } - @BeforeClass + @BeforeAll public static void init() throws Exception { init(false); } @@ -140,7 +140,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -152,9 +152,9 @@ public static void shutdown() { public void testCreateKeyWithECReplicationConfig() throws Exception { try (OzoneOutputStream key = TestHelper .createKey(keyString, new ECReplicationConfig(3, 2, - ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, + ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, objectStore, volumeName, bucketName)) { - Assert.assertTrue(key.getOutputStream() instanceof ECKeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof ECKeyOutputStream); } } @@ -163,9 +163,9 @@ public void testCreateKeyWithOutBucketDefaults() throws Exception { OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) { - Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream); - for (int i = 0; i < inputChunks.length; i++) { - out.write(inputChunks[i]); + Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); } } } @@ -184,17 +184,17 @@ public void testCreateKeyWithBucketDefaults() throws Exception { OzoneBucket bucket = volume.getBucket(myBucket); try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) { - Assert.assertTrue(out.getOutputStream() instanceof ECKeyOutputStream); - for (int i = 0; i < inputChunks.length; i++) { - out.write(inputChunks[i]); + Assertions.assertTrue(out.getOutputStream() instanceof ECKeyOutputStream); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); } } byte[] buf = new byte[chunkSize]; try (OzoneInputStream in = bucket.readKey(keyString)) { - for (int i = 0; i < inputChunks.length; i++) { + for (byte[] inputChunk : inputChunks) { int read = in.read(buf, 0, chunkSize); - Assert.assertEquals(chunkSize, read); - Assert.assertTrue(Arrays.equals(buf, inputChunks[i])); + Assertions.assertEquals(chunkSize, read); + Assertions.assertArrayEquals(buf, inputChunk); } } } @@ -236,16 +236,16 @@ public void testOverwriteRatisKeyWithECKey() throws Exception { } private void createKeyAndCheckReplicationConfig(String keyName, - OzoneBucket bucket, ReplicationConfig replicationConfig) + OzoneBucket bucket, ReplicationConfig replicationConfig) throws IOException { try (OzoneOutputStream out = bucket .createKey(keyName, inputSize, replicationConfig, new HashMap<>())) { - for (int i = 0; i < inputChunks.length; i++) { - out.write(inputChunks[i]); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); } } OzoneKeyDetails key = bucket.getKey(keyName); - Assert.assertEquals(replicationConfig, key.getReplicationConfig()); + Assertions.assertEquals(replicationConfig, key.getReplicationConfig()); } @Test @@ -255,9 +255,9 @@ public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception { "testCreateRatisKeyAndWithECBucketDefaults", 2000, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), new HashMap<>())) { - Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream); - for (int i = 0; i < inputChunks.length; i++) { - out.write(inputChunks[i]); + Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); } } } @@ -288,14 +288,14 @@ public void test21ChunksInSingleWriteOp() throws IOException { } private void testMultipleChunksInSingleWriteOp(int offset, - int bufferChunks, int numChunks) - throws IOException { + int bufferChunks, int numChunks) + throws IOException { byte[] inputData = getInputBytes(offset, bufferChunks, numChunks); final OzoneBucket bucket = getOzoneBucket(); String keyName = - String.format("testMultipleChunksInSingleWriteOpOffset" + - "%dBufferChunks%dNumChunks", offset, bufferChunks, - numChunks); + String.format("testMultipleChunksInSingleWriteOpOffset" + + "%dBufferChunks%dNumChunks", offset, bufferChunks, + numChunks); try (OzoneOutputStream out = bucket.createKey(keyName, 4096, new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize), new HashMap<>())) { @@ -303,7 +303,7 @@ private void testMultipleChunksInSingleWriteOp(int offset, } validateContent(offset, numChunks * chunkSize, inputData, bucket, - bucket.getKey(keyName)); + bucket.getKey(keyName)); } private void testMultipleChunksInSingleWriteOp(int numChunks) @@ -344,7 +344,7 @@ public void testECContainerKeysCountAndNumContainerReplicas() .getNumberOfKeys() == 1) && (containerOperationClient .getContainerReplicas(currentKeyContainerID).size() == 5); } catch (IOException exception) { - Assert.fail("Unexpected exception " + exception); + Assertions.fail("Unexpected exception " + exception); return false; } }, 100, 10000); @@ -358,12 +358,12 @@ private void validateContent(byte[] inputData, OzoneBucket bucket, private void validateContent(int offset, int length, byte[] inputData, OzoneBucket bucket, - OzoneKey key) throws IOException { + OzoneKey key) throws IOException { try (OzoneInputStream is = bucket.readKey(key.getName())) { byte[] fileContent = new byte[length]; - Assert.assertEquals(length, is.read(fileContent)); - Assert.assertEquals(new String(Arrays.copyOfRange(inputData, offset, - offset + length), UTF_8), + Assertions.assertEquals(length, is.read(fileContent)); + Assertions.assertEquals(new String(Arrays.copyOfRange(inputData, offset, + offset + length), UTF_8), new String(fileContent, UTF_8)); } } @@ -423,7 +423,7 @@ public void testWriteShouldSucceedWhenDNKilled() throws Exception { // Check the second blockGroup pipeline to make sure that the failed // node is not selected. - Assert.assertFalse(ecOut.getStreamEntries() + Assertions.assertFalse(ecOut.getStreamEntries() .get(1).getPipeline().getNodes().contains(nodeToKill)); } @@ -432,8 +432,8 @@ public void testWriteShouldSucceedWhenDNKilled() throws Exception { // data comes back. for (int i = 0; i < 2; i++) { byte[] fileContent = new byte[inputData.length]; - Assert.assertEquals(inputData.length, is.read(fileContent)); - Assert.assertEquals(new String(inputData, UTF_8), + Assertions.assertEquals(inputData.length, is.read(fileContent)); + Assertions.assertEquals(new String(inputData, UTF_8), new String(fileContent, UTF_8)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index 3f5ede5478f..8e87f6207f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB. - StorageContainerLocationProtocolClientSideTranslatorPB; + StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -41,31 +41,22 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.ozone.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.time.Duration; import java.util.concurrent.TimeUnit; -import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - OZONE_SCM_STALENODE_INTERVAL; + OZONE_SCM_STALENODE_INTERVAL; /** * This class tests the 2 way commit in Ratis. */ +@Timeout(300) public class Test2WayCommitInRatis { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneClient client; private ObjectStore objectStore; @@ -93,9 +84,9 @@ private void startCluster(OzoneConfiguration conf) throws Exception { // Make sure the pipeline does not get destroyed quickly conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, - 60, TimeUnit.SECONDS); + 60, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000, - TimeUnit.SECONDS); + TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -154,8 +145,8 @@ public void test2WayCommitForRetryfailure() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), + Assertions.assertEquals(1, xceiverClient.getRefcount()); + Assertions.assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); Pipeline pipeline = xceiverClient.getPipeline(); XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; @@ -164,7 +155,7 @@ public void test2WayCommitForRetryfailure() throws Exception { container1.getContainerInfo().getContainerID(), xceiverClient.getPipeline())); reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); + Assertions.assertEquals(3, ratisClient.getCommitInfoMap().size()); // wait for the container to be created on all the nodes xceiverClient.watchForCommit(reply.getLogIndex()); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { @@ -181,10 +172,10 @@ public void test2WayCommitForRetryfailure() throws Exception { xceiverClient.watchForCommit(reply.getLogIndex()); // commitInfo Map will be reduced to 2 here - Assert.assertEquals(2, ratisClient.getCommitInfoMap().size()); + Assertions.assertEquals(2, ratisClient.getCommitInfoMap().size()); clientManager.releaseClient(xceiverClient, false); - Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed")); - Assert + Assertions.assertTrue(logCapturer.getOutput().contains("3 way commit failed")); + Assertions .assertTrue(logCapturer.getOutput().contains("Committed by majority")); logCapturer.stopCapturing(); shutdown(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index bbdc9d27d78..1917cf68fd5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client.rpc; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,10 +33,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.io.IOException; @@ -52,22 +54,11 @@ .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; - /** * Tests the validity BCSID of a container. */ +@Timeout(300) public class TestBCSID { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private static OzoneConfiguration conf = new OzoneConfiguration(); private static MiniOzoneCluster cluster; private static OzoneClient client; @@ -80,7 +71,7 @@ public class TestBCSID { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { String path = GenericTestUtils .getTempPath(TestBCSID.class.getSimpleName()); @@ -110,7 +101,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -122,8 +113,9 @@ public static void shutdown() { public void testBCSID() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); key.write("ratis".getBytes(UTF_8)); key.close(); @@ -138,7 +130,7 @@ public void testBCSID() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); List keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly(); - Assert.assertEquals(1, keyLocationInfos.size()); + Assertions.assertEquals(1, keyLocationInfos.size()); OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0); long blockCommitSequenceId = @@ -146,16 +138,16 @@ public void testBCSID() throws Exception { .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) .getContainerReport().getBlockCommitSequenceId(); - Assert.assertTrue(blockCommitSequenceId > 0); + Assertions.assertTrue(blockCommitSequenceId > 0); // make sure the persisted block Id in OM is same as that seen in the // container report to be reported to SCM. - Assert.assertEquals(blockCommitSequenceId, + Assertions.assertEquals(blockCommitSequenceId, omKeyLocationInfo.getBlockCommitSequenceId()); // verify that on restarting the datanode, it reloads the BCSID correctly. cluster.restartHddsDatanode(0, true); - Assert.assertEquals(blockCommitSequenceId, + Assertions.assertEquals(blockCommitSequenceId, cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 4d3d1c2c32c..a77218d8915 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -37,14 +37,11 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.nio.ByteBuffer; @@ -57,13 +54,8 @@ /** * Tests BlockDataStreamOutput class. */ +@Timeout(300) public class TestBlockDataStreamOutput { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); private static MiniOzoneCluster cluster; private static OzoneConfiguration conf = new OzoneConfiguration(); private static OzoneClient client; @@ -83,7 +75,7 @@ public class TestBlockDataStreamOutput { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; @@ -128,7 +120,7 @@ static String getKeyName() { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -184,7 +176,7 @@ private void testWriteWithFailure(int dataLength) throws Exception { (KeyDataStreamOutput) key.getByteBufStreamOutput(); ByteBufferStreamOutput stream = keyDataStreamOutput.getStreamEntries().get(0).getByteBufStreamOutput(); - Assert.assertTrue(stream instanceof BlockDataStreamOutput); + Assertions.assertTrue(stream instanceof BlockDataStreamOutput); TestHelper.waitForContainerClose(key, cluster); key.write(b); key.close(); @@ -208,21 +200,21 @@ public void testPutBlockAtBoundary() throws Exception { ContainerTestHelper.getFixedLengthString(keyString, dataLength) .getBytes(UTF_8); key.write(ByteBuffer.wrap(data)); - Assert.assertTrue( + Assertions.assertTrue( metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock) <= pendingPutBlockCount + 1); key.close(); // Since data length is 500 , first putBlock will be at 400(flush boundary) // and the other at 500 - Assert.assertTrue( - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock) - == putBlockCount + 2); + Assertions.assertEquals( + metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock), + putBlockCount + 2); validateData(keyName, data); } static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper.createStreamKey( keyName, type, size, objectStore, volumeName, bucketName); } @@ -245,10 +237,10 @@ public void testMinPacketSize() throws Exception { .getBytes(UTF_8); key.write(ByteBuffer.wrap(data)); // minPacketSize= 100, so first write of 50 wont trigger a writeChunk - Assert.assertEquals(writeChunkCount, + Assertions.assertEquals(writeChunkCount, metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); key.write(ByteBuffer.wrap(data)); - Assert.assertEquals(writeChunkCount + 1, + Assertions.assertEquals(writeChunkCount + 1, metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); // now close the stream, It will update the key length. key.close(); @@ -271,7 +263,7 @@ public void testTotalAckDataLength() throws Exception { keyDataStreamOutput.getStreamEntries().get(0); key.write(ByteBuffer.wrap(data)); key.close(); - Assert.assertEquals(dataLength, stream.getTotalAckDataLength()); + Assertions.assertEquals(dataLength, stream.getTotalAckDataLength()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index c35abee17cf..63c9b275b38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -53,26 +53,18 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests Close Container Exception handling by Ozone Client. */ +@Timeout(300) public class TestCloseContainerHandlingByClient { - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private static MiniOzoneCluster cluster; private static OzoneConfiguration conf = new OzoneConfiguration(); private static OzoneClient client; @@ -90,7 +82,7 @@ public class TestCloseContainerHandlingByClient { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; @@ -123,7 +115,7 @@ private String getKeyName() { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -141,7 +133,7 @@ public void testBlockWritesWithFlushAndClose() throws Exception { .getBytes(UTF_8); key.write(data); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -156,7 +148,7 @@ public void testBlockWritesWithFlushAndClose() throws Exception { // read the key from OM again and match the length.The length will still // be the equal to the original data size. OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); + Assertions.assertEquals(2 * data.length, keyInfo.getDataSize()); // Written the same data twice String dataString = new String(data, UTF_8); @@ -174,7 +166,7 @@ public void testBlockWritesCloseConsistency() throws Exception { .getBytes(UTF_8); key.write(data); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -187,7 +179,7 @@ public void testBlockWritesCloseConsistency() throws Exception { // read the key from OM again and match the length.The length will still // be the equal to the original data size. OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(data.length, keyInfo.getDataSize()); + Assertions.assertEquals(data.length, keyInfo.getDataSize()); validateData(keyName, data); } @@ -200,15 +192,15 @@ public void testMultiBlockWrites() throws Exception { KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); // With the initial size provided, it should have preallocated 4 blocks - Assert.assertEquals(3, keyOutputStream.getStreamEntries().size()); + Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size()); // write data more than 1 block byte[] data = ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)) .getBytes(UTF_8); - Assert.assertEquals(data.length, 3 * blockSize); + Assertions.assertEquals(data.length, 3 * blockSize); key.write(data); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -232,10 +224,10 @@ public void testMultiBlockWrites() throws Exception { // closeContainerException and remaining data in the chunkOutputStream // buffer will be copied into a different allocated block and will be // committed. - Assert.assertEquals(4, keyLocationInfos.size()); - Assert.assertEquals(4 * blockSize, keyInfo.getDataSize()); + Assertions.assertEquals(4, keyLocationInfos.size()); + Assertions.assertEquals(4 * blockSize, keyInfo.getDataSize()); for (OmKeyLocationInfo locationInfo : keyLocationInfos) { - Assert.assertEquals(blockSize, locationInfo.getLength()); + Assertions.assertEquals(blockSize, locationInfo.getLength()); } } @@ -247,9 +239,9 @@ public void testMultiBlockWrites2() throws Exception { KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); // With the initial size provided, it should have pre allocated 2 blocks - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); + Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size()); String dataString = ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize)); byte[] data = dataString.getBytes(UTF_8); @@ -289,7 +281,7 @@ public void testMultiBlockWrites2() throws Exception { String dataCommitted = dataString.concat(dataString2).concat(dataString3).concat(dataString4); - Assert.assertEquals(dataCommitted.getBytes(UTF_8).length, + Assertions.assertEquals(dataCommitted.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, dataCommitted.getBytes(UTF_8)); } @@ -303,16 +295,16 @@ public void testMultiBlockWrites3() throws Exception { KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); // With the initial size provided, it should have preallocated 4 blocks - Assert.assertEquals(4, keyOutputStream.getStreamEntries().size()); + Assertions.assertEquals(4, keyOutputStream.getStreamEntries().size()); // write data 4 blocks and one more chunk byte[] writtenData = ContainerTestHelper.getFixedLengthString(keyString, keyLen) .getBytes(UTF_8); byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize); - Assert.assertEquals(data.length, 3 * blockSize + chunkSize); + Assertions.assertEquals(data.length, 3 * blockSize + chunkSize); key.write(data); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -337,7 +329,7 @@ public void testMultiBlockWrites3() throws Exception { try (OzoneInputStream inputStream = bucket.readKey(keyName)) { inputStream.read(readData); } - Assert.assertArrayEquals(writtenData, readData); + Assertions.assertArrayEquals(writtenData, readData); // Though we have written only block initially, the close will hit // closeContainerException and remaining data in the chunkOutputStream @@ -347,7 +339,7 @@ public void testMultiBlockWrites3() throws Exception { for (OmKeyLocationInfo locationInfo : keyLocationInfos) { length += locationInfo.getLength(); } - Assert.assertEquals(4 * blockSize, length); + Assertions.assertEquals(4 * blockSize, length); } private void waitForContainerClose(OzoneOutputStream outputStream) @@ -357,7 +349,7 @@ private void waitForContainerClose(OzoneOutputStream outputStream) } private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper .createKey(keyName, type, size, objectStore, volumeName, bucketName); } @@ -383,7 +375,7 @@ public void testBlockWriteViaRatis() throws Exception { .setKeyName(keyName) .build(); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); waitForContainerClose(key); // Again Write the Data. This will throw an exception which will be handled // and new blocks will be allocated @@ -395,7 +387,7 @@ public void testBlockWriteViaRatis() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); String dataString = new String(data, UTF_8); dataString = dataString.concat(dataString); - Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); + Assertions.assertEquals(2 * data.length, keyInfo.getDataSize()); validateData(keyName, dataString.getBytes(UTF_8)); } @@ -409,7 +401,7 @@ public void testBlockWrites() throws Exception { .getBytes(UTF_8); key.write(data1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -427,7 +419,7 @@ public void testBlockWrites() throws Exception { // read the key from OM again and match the length.The length will still // be the equal to the original data size. OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize()); + Assertions.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize()); // Written the same data twice String dataString = new String(data1, UTF_8); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 96f5ac586ca..9a351e77e9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -43,10 +44,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; import java.io.File; @@ -84,7 +85,7 @@ public class TestContainerReplicationEndToEnd { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); path = GenericTestUtils @@ -132,7 +133,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (xceiverClientManager != null) { @@ -151,8 +152,9 @@ public void testContainerReplication() throws Exception { String keyName = "testContainerReplication"; OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, 0, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey(keyName, 0, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); @@ -161,7 +163,7 @@ public void testContainerReplication() throws Exception { KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + Assertions.assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); long containerID = omKeyLocationInfo.getContainerID(); PipelineID pipelineID = @@ -203,9 +205,9 @@ public void testContainerReplication() throws Exception { } // wait for container to move to closed state in SCM Thread.sleep(2 * containerReportInterval); - Assert.assertTrue( + Assertions.assertSame( cluster.getStorageContainerManager().getContainerInfo(containerID) - .getState() == HddsProtos.LifeCycleState.CLOSED); + .getState(), HddsProtos.LifeCycleState.CLOSED); // shutdown the replica node cluster.shutdownHddsDatanode(oldReplicaNode); // now the container is under replicated and will be moved to a different dn @@ -219,14 +221,14 @@ public void testContainerReplication() throws Exception { } } - Assert.assertNotNull(dnService); + Assertions.assertNotNull(dnService); final HddsDatanodeService newReplicaNode = dnService; // wait for the container to get replicated GenericTestUtils.waitFor(() -> { return newReplicaNode.getDatanodeStateMachine().getContainer() .getContainerSet().getContainer(containerID) != null; }, 500, 100000); - Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer() + Assertions.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer() .getContainerSet().getContainer(containerID).getContainerData() .getBlockCommitSequenceId() > 0); // wait for SCM to update the replica Map diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 7c0fcd43722..1050fdd7f2b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -56,26 +57,18 @@ import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests the containerStateMachine failure handling. */ +@Timeout(300) public class TestContainerStateMachine { - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; @@ -89,7 +82,7 @@ public class TestContainerStateMachine { * * @throws IOException */ - @Before + @BeforeEach public void setup() throws Exception { path = GenericTestUtils .getTempPath(TestContainerStateMachine.class.getSimpleName()); @@ -137,7 +130,7 @@ public void setup() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -149,8 +142,9 @@ public void shutdown() { public void testContainerStateMachineFailures() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); @@ -162,7 +156,7 @@ public void testContainerStateMachineFailures() throws Exception { List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + Assertions.assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); // delete the container dir @@ -174,7 +168,7 @@ public void testContainerStateMachineFailures() throws Exception { key.close(); // Make sure the container is marked unhealthy - Assert.assertEquals( + Assertions.assertEquals( ContainerProtos.ContainerDataProto.State.UNHEALTHY, cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() .getContainer().getContainerSet() @@ -189,14 +183,15 @@ public void testRatisSnapshotRetention() throws Exception { (ContainerStateMachine) TestHelper.getStateMachine(cluster); SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - Assert.assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage)); + Assertions.assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage)); // Write 10 keys. Num snapshots should be equal to config value. for (int i = 1; i <= 10; i++) { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(("ratis" + i), 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey(("ratis" + i), 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write(("ratis" + i).getBytes(UTF_8)); key.flush(); @@ -212,15 +207,16 @@ public void testRatisSnapshotRetention() throws Exception { storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); Path parentPath = getSnapshotPath(storage); int numSnapshots = parentPath.getParent().toFile().listFiles().length; - Assert.assertTrue(Math.abs(ratisServerConfiguration + Assertions.assertTrue(Math.abs(ratisServerConfiguration .getNumSnapshotsRetained() - numSnapshots) <= 1); // Write 10 more keys. Num Snapshots should remain the same. for (int i = 11; i <= 20; i++) { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(("ratis" + i), 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey(("ratis" + i), 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write(("ratis" + i).getBytes(UTF_8)); key.flush(); @@ -232,7 +228,7 @@ public void testRatisSnapshotRetention() throws Exception { storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); parentPath = getSnapshotPath(storage); numSnapshots = parentPath.getParent().toFile().listFiles().length; - Assert.assertTrue(Math.abs(ratisServerConfiguration + Assertions.assertTrue(Math.abs(ratisServerConfiguration .getNumSnapshotsRetained() - numSnapshots) <= 1); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 55e16989a88..eb84e67398f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; @@ -86,14 +87,14 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.hamcrest.core.Is.is; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.hamcrest.MatcherAssert.assertThat; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.exceptions.StateMachineException; @@ -207,8 +208,9 @@ public void testContainerStateMachineCloseOnMissingPipeline() OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("testQuasiClosed1", 1024, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey("testQuasiClosed1", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); key.write("ratis".getBytes(UTF_8)); key.flush(); @@ -250,9 +252,9 @@ public void testContainerStateMachineCloseOnMissingPipeline() for (HddsDatanodeService dn : datanodeSet) { LambdaTestUtils.await(20000, 1000, () -> (dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(containerID) - .getContainerState().equals(QUASI_CLOSED))); + .getContainer().getContainerSet() + .getContainer(containerID) + .getContainerState().equals(QUASI_CLOSED))); } key.close(); } @@ -260,27 +262,29 @@ public void testContainerStateMachineCloseOnMissingPipeline() @Test public void testContainerStateMachineFailures() throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); key.flush(); key.write(testData); KeyOutputStream groupOutputStream = - (KeyOutputStream) key.getOutputStream(); + (KeyOutputStream) key.getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); + groupOutputStream.getLocationInfoList(); assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); // delete the container dir FileUtil.fullyDelete(new File(dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()). - getContainerData().getContainerPath())); + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()). + getContainerData().getContainerPath())); try { // there is only 1 datanode in the pipeline, the pipeline will be closed // and allocation to new pipeline will fail as there is no other dn in @@ -291,24 +295,22 @@ public void testContainerStateMachineFailures() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); // Make sure the container is marked unhealthy - assertTrue( - dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + assertSame(dn.getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(containerID) + .getContainerState(), UNHEALTHY); OzoneContainer ozoneContainer; // restart the hdds datanode, container should not in the regular set OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) - + UUID.randomUUID(); + DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + + UUID.randomUUID(); config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) - .getDatanodeStateMachine().getContainer(); + .getDatanodeStateMachine().getContainer(); assertNull(ozoneContainer.getContainerSet(). getContainer(containerID)); } @@ -316,29 +318,31 @@ public void testContainerStateMachineFailures() throws Exception { @Test public void testUnhealthyContainer() throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key - .getOutputStream(); + .getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); + groupOutputStream.getLocationInfoList(); assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); ContainerData containerData = - dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); + dn.getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; + (KeyValueContainerData) containerData; // delete the container db file FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath())); try { @@ -352,23 +356,21 @@ public void testUnhealthyContainer() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); // Make sure the container is marked unhealthy - assertTrue( - dn.getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + assertSame(dn.getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(containerID) + .getContainerState(), UNHEALTHY); // Check metadata in the .container file File containerFile = new File(keyValueContainerData.getMetadataPath(), - containerID + OzoneConsts.CONTAINER_EXTENSION); + containerID + OzoneConsts.CONTAINER_EXTENSION); keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); + .readContainerFile(containerFile); + assertEquals(keyValueContainerData.getState(), UNHEALTHY); OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) - + UUID.randomUUID(); + DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + + UUID.randomUUID(); config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the @@ -376,21 +378,21 @@ public void testUnhealthyContainer() throws Exception { cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); // make sure the container state is still marked unhealthy after restart keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); + .readContainerFile(containerFile); + assertEquals(keyValueContainerData.getState(), UNHEALTHY); OzoneContainer ozoneContainer; HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index); ozoneContainer = dnService - .getDatanodeStateMachine().getContainer(); + .getDatanodeStateMachine().getContainer(); HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer - .getDispatcher(); + .getDispatcher(); ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); + ContainerProtos.ContainerCommandRequestProto.newBuilder(); request.setCmdType(ContainerProtos.Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); + ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(dnService.getDatanodeDetails().getUuidString()); assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY, dispatcher.dispatch(request.build(), null) @@ -401,35 +403,37 @@ public void testUnhealthyContainer() throws Exception { @Flaky("HDDS-6935") public void testApplyTransactionFailure() throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key. - getOutputStream(); + getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); + groupOutputStream.getLocationInfoList(); assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); ContainerData containerData = dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; + (KeyValueContainerData) containerData; key.close(); ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster. getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline()); SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); + (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); stateMachine.takeSnapshot(); final FileInfo snapshot = getSnapshotFileInfo(storage); final Path parentPath = snapshot.getPath(); @@ -441,16 +445,16 @@ public void testApplyTransactionFailure() throws Exception { // delete the container db file FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath())); Pipeline pipeline = cluster.getStorageContainerLocationClient() - .getContainerWithPipeline(containerID).getPipeline(); + .getContainerWithPipeline(containerID).getPipeline(); XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); + xceiverClientManager.acquireClient(pipeline); ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); + ContainerProtos.ContainerCommandRequestProto.newBuilder(); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); request.setCmdType(ContainerProtos.Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); + ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); // close container transaction will fail over Ratis and will initiate // a pipeline close action @@ -463,10 +467,9 @@ public void testApplyTransactionFailure() throws Exception { xceiverClientManager.releaseClient(xceiverClient, false); } // Make sure the container is marked unhealthy - assertTrue(dn.getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + assertSame(dn.getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(containerID) + .getContainerState(), UNHEALTHY); try { // try to take a new snapshot, ideally it should just fail stateMachine.takeSnapshot(); @@ -478,12 +481,12 @@ public void testApplyTransactionFailure() throws Exception { // Make sure the latest snapshot is same as the previous one try { final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - assertTrue(snapshot.getPath().equals(latestSnapshot.getPath())); + assertEquals(snapshot.getPath(), latestSnapshot.getPath()); } catch (Throwable e) { assertFalse(snapshot.getPath().toFile().exists()); } } - + // when remove pipeline, group dir including snapshot will be deleted LambdaTestUtils.await(10000, 500, () -> (!snapshot.getPath().toFile().exists())); @@ -492,33 +495,35 @@ public void testApplyTransactionFailure() throws Exception { @Test @Flaky("HDDS-6115") public void testApplyTransactionIdempotencyWithClosedContainer() - throws Exception { + throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); + groupOutputStream.getLocationInfoList(); assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); ContainerData containerData = dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); assertTrue(containerData instanceof KeyValueContainerData); key.close(); ContainerStateMachine stateMachine = - (ContainerStateMachine) TestHelper.getStateMachine(dn, - omKeyLocationInfo.getPipeline()); + (ContainerStateMachine) TestHelper.getStateMachine(dn, + omKeyLocationInfo.getPipeline()); SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); + (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); final FileInfo snapshot = getSnapshotFileInfo(storage); final Path parentPath = snapshot.getPath(); stateMachine.takeSnapshot(); @@ -528,27 +533,27 @@ public void testApplyTransactionIdempotencyWithClosedContainer() .getIndex(); long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerLocationClient() - .getContainerWithPipeline(containerID).getPipeline(); + .getContainerWithPipeline(containerID).getPipeline(); XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); + xceiverClientManager.acquireClient(pipeline); ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); + ContainerProtos.ContainerCommandRequestProto.newBuilder(); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); request.setCmdType(ContainerProtos.Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); + ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); try { xceiverClient.sendCommand(request.build()); } catch (IOException e) { fail("Exception should not be thrown"); } - assertTrue( - TestHelper.getDatanodeService(omKeyLocationInfo, cluster) - .getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.CLOSED); + assertSame( + TestHelper.getDatanodeService(omKeyLocationInfo, cluster) + .getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(containerID) + .getContainerState(), + ContainerProtos.ContainerDataProto.State.CLOSED); assertTrue(stateMachine.isStateMachineHealthy()); try { stateMachine.takeSnapshot(); @@ -570,7 +575,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() } }), 1000, 30000); final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); + assertNotEquals(snapshot.getPath(), latestSnapshot.getPath()); } // The test injects multiple write chunk requests along with closed container @@ -581,35 +586,37 @@ public void testApplyTransactionIdempotencyWithClosedContainer() // closed here. @Test public void testWriteStateMachineDataIdempotencyWithClosedContainer() - throws Exception { + throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis-1", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis-1", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key - .getOutputStream(); + .getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); + groupOutputStream.getLocationInfoList(); assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); ContainerData containerData = - dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); + dn.getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); assertTrue(containerData instanceof KeyValueContainerData); key.close(); ContainerStateMachine stateMachine = - (ContainerStateMachine) TestHelper.getStateMachine(dn, - omKeyLocationInfo.getPipeline()); + (ContainerStateMachine) TestHelper.getStateMachine(dn, + omKeyLocationInfo.getPipeline()); SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); + (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); final FileInfo snapshot = getSnapshotFileInfo(storage); final Path parentPath = snapshot.getPath(); stateMachine.takeSnapshot(); @@ -619,22 +626,22 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() assertNotNull(snapshot); long containerID = omKeyLocationInfo.getContainerID(); Pipeline pipeline = cluster.getStorageContainerLocationClient() - .getContainerWithPipeline(containerID).getPipeline(); + .getContainerWithPipeline(containerID).getPipeline(); XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); + xceiverClientManager.acquireClient(pipeline); CountDownLatch latch = new CountDownLatch(100); int count = 0; AtomicInteger failCount = new AtomicInteger(0); Runnable r1 = () -> { try { ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); + ContainerProtos.ContainerCommandRequestProto.newBuilder(); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); request.setCmdType(ContainerProtos.Type.CloseContainer); request.setContainerID(containerID); request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto. - getDefaultInstance()); + ContainerProtos.CloseContainerRequestProto. + getDefaultInstance()); xceiverClient.sendCommand(request.build()); } catch (IOException e) { failCount.incrementAndGet(); @@ -647,13 +654,13 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() ContainerTestHelper.newWriteChunkRequestBuilder(pipeline, omKeyLocationInfo.getBlockID(), data.size()); writeChunkRequest.setWriteChunk(writeChunkRequest.getWriteChunkBuilder() - .setData(data)); + .setData(data)); xceiverClient.sendCommand(writeChunkRequest.build()); latch.countDown(); } catch (IOException e) { latch.countDown(); if (!(HddsClientUtils - .checkForException(e) instanceof ContainerNotOpenException)) { + .checkForException(e) instanceof ContainerNotOpenException)) { failCount.incrementAndGet(); } String message = e.getMessage(); @@ -682,14 +689,16 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() } if (failCount.get() > 0) { - fail("testWriteStateMachineDataIdempotencyWithClosedContainer failed"); + fail( + "testWriteStateMachineDataIdempotencyWithClosedContainer " + + "failed"); } - assertTrue( + assertSame( TestHelper.getDatanodeService(omKeyLocationInfo, cluster) .getDatanodeStateMachine() .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.CLOSED); + .getContainerState(), + ContainerProtos.ContainerDataProto.State.CLOSED); assertTrue(stateMachine.isStateMachineHealthy()); try { stateMachine.takeSnapshot(); @@ -698,7 +707,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() } final FileInfo latestSnapshot = getSnapshotFileInfo(storage); - assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); + assertNotEquals(snapshot.getPath(), latestSnapshot.getPath()); r2.run(); } finally { @@ -711,8 +720,9 @@ public void testContainerStateMachineSingleFailureRetry() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis1", 1024, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey("ratis1", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); key.write("ratis".getBytes(UTF_8)); key.flush(); @@ -746,8 +756,9 @@ public void testContainerStateMachineDualFailureRetry() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis2", 1024, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey("ratis2", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); key.write("ratis".getBytes(UTF_8)); key.flush(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index c24f209cdeb..fafba729e0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -38,14 +39,11 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.io.IOException; @@ -63,14 +61,8 @@ /** * Tests the containerStateMachine failure handling by set flush delay. */ +@Timeout(300) public class TestContainerStateMachineFlushDelay { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; @@ -89,7 +81,7 @@ public class TestContainerStateMachineFlushDelay { * * @throws IOException */ - @Before + @BeforeEach public void setup() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; @@ -140,7 +132,7 @@ public void setup() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -152,14 +144,15 @@ public void shutdown() { public void testContainerStateMachineFailures() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // Now ozone.client.stream.buffer.flush.delay is currently enabled // by default. Here we written data(length 110) greater than chunk // Size(length 100), make sure flush will sync data. byte[] data = ContainerTestHelper.getFixedLengthString(keyString, 110) - .getBytes(UTF_8); + .getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(data); key.flush(); @@ -171,7 +164,7 @@ public void testContainerStateMachineFailures() throws Exception { List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + Assertions.assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); // delete the container dir @@ -183,12 +176,12 @@ public void testContainerStateMachineFailures() throws Exception { key.close(); // Make sure the container is marked unhealthy - Assert.assertTrue( + Assertions.assertSame( cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + .getContainerState(), + ContainerProtos.ContainerDataProto.State.UNHEALTHY); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index aa755bf6939..ccb3fc992cd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -36,14 +36,11 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.ByteBuffer; @@ -60,14 +57,8 @@ /** * Tests the containerStateMachine stream handling. */ +@Timeout(300) public class TestContainerStateMachineStream { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; @@ -85,7 +76,7 @@ public class TestContainerStateMachineStream { * * @throws IOException */ - @Before + @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); @@ -150,7 +141,7 @@ public void setup() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -184,9 +175,9 @@ public void testContainerStateMachineForStreaming() throws Exception { long bytesUsed = dn.getDatanodeStateMachine() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()). - getContainerData().getBytesUsed(); + getContainerData().getBytesUsed(); - Assert.assertTrue(bytesUsed == size); + Assertions.assertEquals(bytesUsed, size); } @@ -215,9 +206,9 @@ public void testContainerStateMachineForStreamingSmallFile() long bytesUsed = dn.getDatanodeStateMachine() .getContainer().getContainerSet() .getContainer(omKeyLocationInfo.getContainerID()). - getContainerData().getBytesUsed(); + getContainerData().getBytesUsed(); - Assert.assertTrue(bytesUsed == size); + Assertions.assertEquals(bytesUsed, size); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index 131ce705539..c9b1f7c1705 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; @@ -68,11 +69,11 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Tests delete key operation with inadequate datanodes. @@ -94,7 +95,7 @@ public class TestDeleteWithInAdequateDN { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); path = GenericTestUtils @@ -111,7 +112,7 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000, - TimeUnit.SECONDS); + TimeUnit.SECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, @@ -133,7 +134,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 1, TimeUnit.SECONDS); + 1, TimeUnit.SECONDS); ScmConfig scmConfig = conf.getObject(ScmConfig.class); scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1)); @@ -153,11 +154,11 @@ public static void init() throws Exception { conf.setQuietMode(false); int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit( - numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) - .setHbInterval(100) - .build(); + .setNumDatanodes(numOfDatanodes) + .setTotalPipelineNumLimit( + numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) + .setHbInterval(100) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(THREE, 60000); //the easiest way to create an open container is creating a key @@ -173,7 +174,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (xceiverClientManager != null) { @@ -199,8 +200,9 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { String keyName = "ratis"; OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, 0, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey(keyName, 0, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); @@ -209,8 +211,9 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assume.assumeTrue("Expected exactly a single location, but got: " + - locationInfoList.size(), 1 == locationInfoList.size()); + Assumptions.assumeTrue(1 == locationInfoList.size(), + "Expected exactly a single location, but got: " + + locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); long containerID = omKeyLocationInfo.getContainerID(); // A container is created on the datanode. Now figure out a follower node to @@ -221,7 +224,7 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { List pipelineList = cluster.getStorageContainerManager().getPipelineManager() .getPipelines(RatisReplicationConfig.getInstance(THREE)); - Assume.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT); + Assumptions.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT); Pipeline pipeline = pipelineList.get(0); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { if (RatisTestHelper.isRatisFollower(dn, pipeline)) { @@ -230,9 +233,10 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { leader = dn; } } - Assume.assumeNotNull(follower, leader); + Assertions.assertNotNull(follower); + Assertions.assertNotNull(leader); //ensure that the chosen follower is still a follower - Assume.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline)); + Assumptions.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline)); // shutdown the follower node cluster.shutdownHddsDatanode(follower.getDatanodeDetails()); key.write(testData); @@ -277,7 +281,7 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { keyValueHandler.getBlockManager().getBlock(container, blockID); //cluster.getOzoneManager().deleteKey(keyArgs); client.getObjectStore().getVolume(volumeName).getBucket(bucketName). - deleteKey("ratis"); + deleteKey("ratis"); // make sure the chunk was never deleted on the leader even though // deleteBlock handler is invoked try { @@ -287,12 +291,12 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { null); } } catch (IOException ioe) { - Assert.fail("Exception should not be thrown."); + Assertions.fail("Exception should not be thrown."); } long numReadStateMachineOps = stateMachine.getMetrics().getNumReadStateMachineOps(); - Assert.assertTrue( - stateMachine.getMetrics().getNumReadStateMachineFails() == 0); + Assertions.assertEquals(0, + stateMachine.getMetrics().getNumReadStateMachineFails()); stateMachine.evictStateMachineCache(); cluster.restartHddsDatanode(follower.getDatanodeDetails(), false); // wait for the raft server to come up and join the ratis ring @@ -300,10 +304,10 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { // Make sure the readStateMachine call got triggered after the follower // caught up - Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps() + Assertions.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps() > numReadStateMachineOps); - Assert.assertTrue( - stateMachine.getMetrics().getNumReadStateMachineFails() == 0); + Assertions.assertEquals(0, + stateMachine.getMetrics().getNumReadStateMachineFails()); // wait for the chunk to get deleted now Thread.sleep(10000); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { @@ -317,11 +321,11 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { keyValueHandler.getChunkManager().readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), null); } - Assert.fail("Expected exception is not thrown"); + Assertions.fail("Expected exception is not thrown"); } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof StorageContainerException); - Assert.assertTrue(((StorageContainerException) ioe).getResult() - == ContainerProtos.Result.UNABLE_TO_FIND_CHUNK); + Assertions.assertTrue(ioe instanceof StorageContainerException); + Assertions.assertSame(((StorageContainerException) ioe).getResult(), + ContainerProtos.Result.UNABLE_TO_FIND_CHUNK); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java index 75089b3f55c..550c1841b3f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java @@ -49,26 +49,17 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests Close Container Exception handling by Ozone Client. */ +@Timeout(300) public class TestDiscardPreallocatedBlocks { - - /** - * Set a timeout for each test. - */ - - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); private static MiniOzoneCluster cluster; private static OzoneConfiguration conf = new OzoneConfiguration(); private static OzoneClient client; @@ -87,7 +78,7 @@ public class TestDiscardPreallocatedBlocks { * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; @@ -119,10 +110,10 @@ private String getKeyName() { } /** - * Shutdown MiniDFSCluster. - */ + * Shutdown MiniDFSCluster. + */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -137,14 +128,14 @@ public void testDiscardPreallocatedBlocks() throws Exception { createKey(keyName, ReplicationType.RATIS, 2 * blockSize); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); // With the initial size provided, it should have pre allocated 2 blocks - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); + Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size()); long containerID1 = keyOutputStream.getStreamEntries().get(0) - .getBlockID().getContainerID(); + .getBlockID().getContainerID(); long containerID2 = keyOutputStream.getStreamEntries().get(1) - .getBlockID().getContainerID(); - Assert.assertEquals(containerID1, containerID2); + .getBlockID().getContainerID(); + Assertions.assertEquals(containerID1, containerID2); String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); byte[] data = dataString.getBytes(UTF_8); @@ -161,28 +152,27 @@ public void testDiscardPreallocatedBlocks() throws Exception { cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); - Assert.assertEquals(3, datanodes.size()); + Assertions.assertEquals(3, datanodes.size()); waitForContainerClose(key); dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); data = dataString.getBytes(UTF_8); key.write(data); - Assert.assertEquals(3, keyOutputStream.getStreamEntries().size()); + Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size()); // the 1st block got written. Now all the containers are closed, so the 2nd // pre allocated block will be removed from the list and new block should // have been allocated - Assert.assertTrue( - keyOutputStream.getLocationInfoList().get(0).getBlockID() - .equals(locationInfos.get(0).getBlockID())); - Assert.assertFalse( - locationStreamInfos.get(1).getBlockID() - .equals(keyOutputStream.getLocationInfoList().get(1).getBlockID())); + Assertions.assertEquals( + keyOutputStream.getLocationInfoList().get(0).getBlockID(), + locationInfos.get(0).getBlockID()); + Assertions.assertNotEquals(locationStreamInfos.get(1).getBlockID(), + keyOutputStream.getLocationInfoList().get(1).getBlockID()); key.close(); } private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper .createKey(keyName, type, size, objectStore, volumeName, bucketName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java index dc5622e1e8b..c5147ecfb01 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java @@ -17,14 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; /** * Tests key output stream without zero-copy enabled. */ public class TestECKeyOutputStream extends AbstractTestECKeyOutputStream { - @BeforeClass + @BeforeAll public static void init() throws Exception { init(false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java index b9baeb2437f..47c94e03cb2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java @@ -17,14 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; /** * Tests key output stream with zero-copy enabled. */ public class TestECKeyOutputStreamWithZeroCopy extends AbstractTestECKeyOutputStream { - @BeforeClass + @BeforeAll public static void init() throws Exception { init(true); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index c9183400032..3d10661f69c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -47,13 +47,10 @@ import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.time.Duration; @@ -69,14 +66,9 @@ /** * Tests Exception handling by Ozone Client by set flush delay. */ +@Timeout(300) public class TestFailureHandlingByClientFlushDelay { - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf; private OzoneClient client; @@ -132,7 +124,7 @@ private void init() throws Exception { conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), + Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) @@ -160,7 +152,7 @@ private void startCluster() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -178,14 +170,14 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { .getFixedLengthString(keyString, chunkSize); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List streamEntryList = keyOutputStream.getStreamEntries(); // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); + Assertions.assertEquals(1, streamEntryList.size()); key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); @@ -205,11 +197,11 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { key.write(data.getBytes(UTF_8)); key.flush(); - Assert.assertTrue( + Assertions.assertTrue( keyOutputStream.getExcludeList().getContainerIds().isEmpty()); - Assert.assertTrue( + Assertions.assertTrue( keyOutputStream.getExcludeList().getDatanodes().isEmpty()); - Assert.assertTrue( + Assertions.assertTrue( keyOutputStream.getExcludeList().getDatanodes().isEmpty()); key.write(data.getBytes(UTF_8)); // The close will just write to the buffer @@ -225,15 +217,15 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); // Make sure a new block is written - Assert.assertNotEquals( + Assertions.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + Assertions.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper .createKey(keyName, type, size, objectStore, volumeName, bucketName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index e0e95239b0e..8b39e994b05 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rpc; - +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -37,33 +37,23 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import static java.nio.charset.StandardCharsets.UTF_8; -import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.HashMap; -import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; /** * Tests Hybrid Pipeline Creation and IO on same set of Datanodes. */ +@Timeout(300) public class TestHybridPipelineOnDatanode { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private static MiniOzoneCluster cluster; private static OzoneConfiguration conf; private static OzoneClient client; @@ -76,7 +66,7 @@ public class TestHybridPipelineOnDatanode { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3) @@ -90,7 +80,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -117,8 +107,9 @@ public void testHybridPipelineOnDatanode() throws IOException { // Write data into a key OzoneOutputStream out = bucket - .createKey(keyName1, data.length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey(keyName1, data.length, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -126,8 +117,9 @@ public void testHybridPipelineOnDatanode() throws IOException { // Write data into a key out = bucket - .createKey(keyName2, data.length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); + .createKey(keyName2, data.length, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -151,17 +143,18 @@ public void testHybridPipelineOnDatanode() throws IOException { cluster.getStorageContainerManager().getPipelineManager() .getPipeline(pipelineID1); List dns = pipeline1.getNodes(); - Assert.assertTrue(dns.size() == 1); + Assertions.assertEquals(1, dns.size()); Pipeline pipeline2 = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(pipelineID2); - Assert.assertNotEquals(pipeline1, pipeline2); - Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS); - Assert.assertTrue(pipeline1.getType() == pipeline2.getType()); + Assertions.assertNotEquals(pipeline1, pipeline2); + Assertions.assertSame(pipeline1.getType(), + HddsProtos.ReplicationType.RATIS); + Assertions.assertSame(pipeline1.getType(), pipeline2.getType()); // assert that the pipeline Id1 and pipelineId2 are on the same node // but different replication factor - Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0))); + Assertions.assertTrue(pipeline2.getNodes().contains(dns.get(0))); byte[] b1 = new byte[data.length]; byte[] b2 = new byte[data.length]; // now try to read both the keys @@ -173,8 +166,8 @@ public void testHybridPipelineOnDatanode() throws IOException { is = bucket.readKey(keyName2); is.read(b2); is.close(); - Assert.assertTrue(Arrays.equals(b1, data)); - Assert.assertTrue(Arrays.equals(b1, b2)); + Assertions.assertArrayEquals(b1, data); + Assertions.assertArrayEquals(b1, b2); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index d7600fa2a85..8c8b0a269a8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -42,9 +42,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.time.Duration; @@ -52,11 +53,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; - import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -64,14 +60,8 @@ /** * Tests MultiBlock Writes with Dn failures by Ozone Client. */ +@Timeout(300) public class TestMultiBlockWritesWithDnFailures { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf; private OzoneClient client; @@ -137,7 +127,7 @@ private void startCluster(int datanodes) throws Exception { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -156,12 +146,12 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { key.write(data.getBytes(UTF_8)); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertTrue(locationInfoList.size() == 2); + Assertions.assertEquals(2, locationInfoList.size()); long containerId = locationInfoList.get(1).getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager() @@ -185,7 +175,7 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { .setKeyName(keyName) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + Assertions.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).getBytes(UTF_8)); } @@ -201,14 +191,14 @@ public void testMultiBlockWritesWithIntermittentDnFailures() key.write(data.getBytes(UTF_8)); // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List streamEntryList = keyOutputStream.getStreamEntries(); // Assert that 6 block will be preallocated - Assert.assertEquals(6, streamEntryList.size()); + Assertions.assertEquals(6, streamEntryList.size()); key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); @@ -237,13 +227,13 @@ public void testMultiBlockWritesWithIntermittentDnFailures() .setKeyName(keyName) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + Assertions.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, data.concat(data).concat(data).concat(data).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper .createKey(keyName, type, size, objectStore, volumeName, bucketName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index cc7864a3b53..6eaf051ba45 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -61,15 +61,12 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.nio.file.Path; @@ -86,13 +83,14 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * This test verifies all the S3 multipart client apis - prefix layout. */ +@Timeout(300) public class TestOzoneClientMultipartUploadWithFSO { private static ObjectStore store = null; @@ -100,12 +98,6 @@ public class TestOzoneClientMultipartUploadWithFSO { private static OzoneClient ozClient = null; private static String scmId = UUID.randomUUID().toString(); - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(new Timeout(300000)); private String volumeName; private String bucketName; private String keyName; @@ -119,7 +111,7 @@ public class TestOzoneClientMultipartUploadWithFSO { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); @@ -129,7 +121,7 @@ public static void init() throws Exception { /** * Close OzoneClient and shutdown MiniOzoneCluster. */ - @AfterClass + @AfterAll public static void shutdown() throws IOException { shutdownCluster(); } @@ -142,10 +134,10 @@ public static void shutdown() throws IOException { */ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) - .setScmId(scmId) - .build(); + .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) + .setScmId(scmId) + .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); store = ozClient.getObjectStore(); @@ -163,8 +155,8 @@ static void shutdownCluster() throws IOException { cluster.shutdown(); } } - - @Before + + @BeforeEach public void preTest() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); @@ -178,7 +170,7 @@ public void preTest() throws Exception { @Test public void testInitiateMultipartUploadWithReplicationInformationSet() throws - IOException { + IOException { String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType.RATIS, ONE); @@ -186,31 +178,31 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // generate a new uploadID. String uploadIDNew = initiateMultipartUpload(bucket, keyName, ReplicationType.RATIS, ONE); - Assert.assertNotEquals(uploadIDNew, uploadID); + Assertions.assertNotEquals(uploadIDNew, uploadID); } @Test public void testInitiateMultipartUploadWithDefaultReplication() throws - IOException { + IOException { OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName); - Assert.assertNotNull(multipartInfo); + Assertions.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - Assert.assertNotNull(multipartInfo.getUploadID()); + Assertions.assertEquals(volumeName, multipartInfo.getVolumeName()); + Assertions.assertEquals(bucketName, multipartInfo.getBucketName()); + Assertions.assertEquals(keyName, multipartInfo.getKeyName()); + Assertions.assertNotNull(multipartInfo.getUploadID()); // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName); - Assert.assertNotNull(multipartInfo); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID); - Assert.assertNotNull(multipartInfo.getUploadID()); + Assertions.assertNotNull(multipartInfo); + Assertions.assertEquals(volumeName, multipartInfo.getVolumeName()); + Assertions.assertEquals(bucketName, multipartInfo.getBucketName()); + Assertions.assertEquals(keyName, multipartInfo.getKeyName()); + Assertions.assertNotEquals(multipartInfo.getUploadID(), uploadID); + Assertions.assertNotNull(multipartInfo.getUploadID()); } @Test @@ -220,15 +212,15 @@ public void testUploadPartWithNoOverride() throws IOException { ReplicationType.RATIS, ONE); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), 1, uploadID); + sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); + .getCommitUploadPartInfo(); - Assert.assertNotNull(commitUploadPartInfo); - Assert.assertNotNull(commitUploadPartInfo.getPartName()); + Assertions.assertNotNull(commitUploadPartInfo); + Assertions.assertNotNull(commitUploadPartInfo.getPartName()); } @Test @@ -253,13 +245,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - Assert.assertEquals("Part names should be same", partName, - partNameNew); + Assertions.assertEquals(partName, partNameNew, "Part names should be same"); // old part bytes written needs discard and have only // new part bytes in quota for this bucket long byteWritten = "name".length() * 3; // data written with replication - Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), + Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), byteWritten); } @@ -277,14 +268,14 @@ public void testUploadTwiceWithEC() throws IOException { String partName = uploadPart(bucket, keyName, uploadID, partNumber, data); - + Map partsMap = new HashMap<>(); partsMap.put(partNumber, partName); bucket.completeMultipartUpload(keyName, uploadID, partsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); - Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), + Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), replicatedSize); //upload same key again @@ -299,7 +290,7 @@ public void testUploadTwiceWithEC() throws IOException { bucket.completeMultipartUpload(keyName, uploadID, partsMap); // used sized should remain same, overwrite previous upload - Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), + Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), replicatedSize); } @@ -316,16 +307,16 @@ public void testUploadAbortWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; uploadPart(bucket, keyName, uploadID, partNumber, data); - + long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); - Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), + Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), replicatedSize); bucket.abortMultipartUpload(keyName, uploadID); // used size should become zero after aport upload - Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0); + Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0); } private OzoneBucket getOzoneECBucket(String myBucket) @@ -339,22 +330,22 @@ private OzoneBucket getOzoneECBucket(String myBucket) volume.createBucket(myBucket, bucketArgs.build()); return volume.getBucket(myBucket); } - + @Test public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { // Initiate multipart upload String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, - ONE); + ONE); // Upload Parts Map partsMap = new TreeMap<>(); // Uploading part 1 with less than min size String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + "data".getBytes(UTF_8)); partsMap.put(1, partName); partName = uploadPart(bucket, keyName, uploadID, 2, - "data".getBytes(UTF_8)); + "data".getBytes(UTF_8)); partsMap.put(2, partName); // Complete multipart upload @@ -389,14 +380,14 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); - Assert.assertEquals(bucketSize, data.length * 2); + Assertions.assertEquals(bucketSize, data.length * 2); } @Test public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() - throws Exception { + throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, - ONE); + ONE); // We have not uploaded any parts, but passing some list it should throw // error. @@ -409,9 +400,9 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() @Test public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() - throws Exception { + throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, - ONE); + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -426,7 +417,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() @Test public void testMultipartUploadWithMissingParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, - ONE); + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -461,35 +452,35 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception { @Test public void testCommitPartAfterCompleteUpload() throws Exception { String parentDir = "a/b/c/d/"; - keyName = parentDir + UUID.randomUUID().toString(); + keyName = parentDir + UUID.randomUUID(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - Assert.assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4); + Assertions.assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4); // upload part 1. byte[] data = generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong()); + (byte) RandomUtils.nextLong()); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - data.length, 1, uploadID); + data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - ozoneOutputStream.getCommitUploadPartInfo(); + ozoneOutputStream.getCommitUploadPartInfo(); // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, - data.length, 2, uploadID); + data.length, 2, uploadID); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = - bucket.completeMultipartUpload(keyName, - uploadID, partsMap); - Assert.assertNotNull(omMultipartUploadCompleteInfo); + bucket.completeMultipartUpload(keyName, + uploadID, partsMap); + Assertions.assertNotNull(omMultipartUploadCompleteInfo); - Assert.assertNotNull(omMultipartCommitUploadPartInfo); + Assertions.assertNotNull(omMultipartCommitUploadPartInfo); byte[] fileContent = new byte[data.length]; try (OzoneInputStream inputStream = bucket.readKey(keyName)) { @@ -500,15 +491,15 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Combine all parts data, and check is it matching with get key data. String part1 = new String(data, UTF_8); sb.append(part1); - Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8)); + Assertions.assertEquals(sb.toString(), new String(fileContent, UTF_8)); try { ozoneOutputStream.close(); - Assert.fail("testCommitPartAfterCompleteUpload failed"); + Assertions.fail("testCommitPartAfterCompleteUpload failed"); } catch (IOException ex) { - Assert.assertTrue(ex instanceof OMException); - Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, - ((OMException) ex).getResult()); + Assertions.assertTrue(ex instanceof OMException); + Assertions.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, + ((OMException) ex).getResult()); } } @@ -521,7 +512,7 @@ public void testAbortUploadFail() throws Exception { @Test public void testAbortUploadFailWithInProgressPartUpload() throws Exception { String parentDir = "a/b/c/d/"; - keyName = parentDir + UUID.randomUUID().toString(); + keyName = parentDir + UUID.randomUUID(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); @@ -548,7 +539,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { @Test public void testAbortUploadSuccessWithOutAnyParts() throws Exception { String parentDir = "a/b/c/d/"; - keyName = parentDir + UUID.randomUUID().toString(); + keyName = parentDir + UUID.randomUUID(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); @@ -558,7 +549,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { @Test public void testAbortUploadSuccessWithParts() throws Exception { String parentDir = "a/b/c/d/"; - keyName = parentDir + UUID.randomUUID().toString(); + keyName = parentDir + UUID.randomUUID(); OzoneManager ozoneManager = cluster.getOzoneManager(); String buckKey = ozoneManager.getMetadataManager() @@ -585,8 +576,8 @@ public void testAbortUploadSuccessWithParts() throws Exception { metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey); OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey); - Assert.assertNull(omKeyInfo); - Assert.assertNull(omMultipartKeyInfo); + Assertions.assertNull(omKeyInfo); + Assertions.assertNull(omMultipartKeyInfo); // Since deleteTable operation is performed via // batchOp - Table.putWithBatch(), which is an async operation and @@ -616,22 +607,22 @@ public void testListMultipartUploadParts() throws Exception { OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals( + Assertions.assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); - Assert.assertEquals(3, + Assertions.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); verifyPartNamesInDB(partsMap, ozoneMultipartUploadPartListParts, uploadID); - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); + Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); } private void verifyPartNamesInDB(Map partsMap, - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts, - String uploadID) throws IOException { + OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts, + String uploadID) throws IOException { List listPartNames = new ArrayList<>(); String keyPartName = verifyPartNames(partsMap, 0, @@ -652,7 +643,7 @@ private void verifyPartNamesInDB(Map partsMap, keyName, uploadID); OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey); - Assert.assertNotNull(omMultipartKeyInfo); + Assertions.assertNotNull(omMultipartKeyInfo); for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo : omMultipartKeyInfo.getPartKeyInfoMap()) { @@ -663,21 +654,21 @@ private void verifyPartNamesInDB(Map partsMap, metadataMgr.getOzoneKey(volumeName, bucketName, keyName); // partKeyName format in DB - partKeyName + ClientID - Assert.assertTrue("Invalid partKeyName format in DB: " + partKeyName - + ", expected name:" + fullKeyPartName, - partKeyName.startsWith(fullKeyPartName)); + Assertions.assertTrue(partKeyName.startsWith(fullKeyPartName), + "Invalid partKeyName format in DB: " + partKeyName + + ", expected name:" + fullKeyPartName); listPartNames.remove(partKeyName); } - Assert.assertTrue("Wrong partKeyName format in DB!", - listPartNames.isEmpty()); + Assertions.assertTrue(listPartNames.isEmpty(), + "Wrong partKeyName format in DB!"); } private String verifyPartNames(Map partsMap, int index, - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) { + OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) { - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts + Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts .getPartInfoList().get(index).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(index) .getPartName()); @@ -707,37 +698,37 @@ public void testListMultipartUploadPartsWithContinuation() OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals( + Assertions.assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); - Assert.assertEquals(2, + Assertions.assertEquals(2, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts + Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts .getPartInfoList().get(0).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(0) .getPartName()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts + Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts .getPartInfoList().get(1).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(1) .getPartName()); // Get remaining - Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated()); + Assertions.assertTrue(ozoneMultipartUploadPartListParts.isTruncated()); ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2); - Assert.assertEquals(1, + Assertions.assertEquals(1, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts + Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts .getPartInfoList().get(0).getPartNumber()), ozoneMultipartUploadPartListParts.getPartInfoList().get(0) .getPartName()); // As we don't have any parts for this, we should get false here - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); + Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); } @@ -745,7 +736,7 @@ public void testListMultipartUploadPartsWithContinuation() public void testListPartsInvalidPartMarker() throws Exception { try { bucket.listParts(keyName, "random", -1, 2); - Assert.fail("Should throw exception as partNumber is an invalid number!"); + Assertions.fail("Should throw exception as partNumber is an invalid number!"); } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("Should be greater than or " + "equal to zero", ex); @@ -756,7 +747,7 @@ public void testListPartsInvalidPartMarker() throws Exception { public void testListPartsInvalidMaxParts() throws Exception { try { bucket.listParts(keyName, "random", 1, -1); - Assert.fail("Should throw exception as max parts is an invalid number!"); + Assertions.fail("Should throw exception as max parts is an invalid number!"); } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("Max Parts Should be greater " + "than zero", ex); @@ -777,15 +768,15 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() // Should return empty - Assert.assertEquals(0, + Assertions.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals( + Assertions.assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); // As we don't have any parts with greater than partNumberMarker and list // is not truncated, so it should return false here. - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); + Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); } @@ -824,53 +815,53 @@ public void testListMultipartUpload() throws Exception { uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8)); OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1"); - Assert.assertEquals(3, listMPUs.getUploads().size()); + Assertions.assertEquals(3, listMPUs.getUploads().size()); List expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { expectedList.remove(mpu.getKeyName()); } - Assert.assertEquals(0, expectedList.size()); + Assertions.assertEquals(0, expectedList.size()); listMPUs = bucket.listMultipartUploads("dir1/dir2"); - Assert.assertEquals(2, listMPUs.getUploads().size()); + Assertions.assertEquals(2, listMPUs.getUploads().size()); expectedList = new ArrayList<>(); expectedList.add(key2); expectedList.add(key3); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { expectedList.remove(mpu.getKeyName()); } - Assert.assertEquals(0, expectedList.size()); + Assertions.assertEquals(0, expectedList.size()); listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3"); - Assert.assertEquals(1, listMPUs.getUploads().size()); + Assertions.assertEquals(1, listMPUs.getUploads().size()); expectedList = new ArrayList<>(); expectedList.add(key3); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { expectedList.remove(mpu.getKeyName()); } - Assert.assertEquals(0, expectedList.size()); + Assertions.assertEquals(0, expectedList.size()); // partial key listMPUs = bucket.listMultipartUploads("d"); - Assert.assertEquals(3, listMPUs.getUploads().size()); + Assertions.assertEquals(3, listMPUs.getUploads().size()); expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { expectedList.remove(mpu.getKeyName()); } - Assert.assertEquals(0, expectedList.size()); + Assertions.assertEquals(0, expectedList.size()); // partial key listMPUs = bucket.listMultipartUploads(""); - Assert.assertEquals(3, listMPUs.getUploads().size()); + Assertions.assertEquals(3, listMPUs.getUploads().size()); expectedList = new ArrayList<>(keys); for (OzoneMultipartUpload mpu : listMPUs.getUploads()) { expectedList.remove(mpu.getKeyName()); } - Assert.assertEquals(0, expectedList.size()); + Assertions.assertEquals(0, expectedList.size()); } private String verifyUploadedPart(String uploadID, String partName, - OMMetadataManager metadataMgr) throws IOException { + OMMetadataManager metadataMgr) throws IOException { OzoneManager ozoneManager = cluster.getOzoneManager(); String buckKey = ozoneManager.getMetadataManager() .getBucketKey(volumeName, bucketName); @@ -888,28 +879,28 @@ private String verifyUploadedPart(String uploadID, String partName, OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey); - Assert.assertNotNull(omKeyInfo); - Assert.assertNotNull(omMultipartKeyInfo); - Assert.assertEquals(OzoneFSUtils.getFileName(keyName), + Assertions.assertNotNull(omKeyInfo); + Assertions.assertNotNull(omMultipartKeyInfo); + Assertions.assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName()); - Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID()); + Assertions.assertEquals(uploadID, omMultipartKeyInfo.getUploadID()); for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo : omMultipartKeyInfo.getPartKeyInfoMap()) { OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo()); - Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName()); + Assertions.assertEquals(keyName, currentKeyPartInfo.getKeyName()); // verify dbPartName - Assert.assertEquals(partName, partKeyInfo.getPartName()); + Assertions.assertEquals(partName, partKeyInfo.getPartName()); } return multipartKey; } private String getMultipartOpenKey(String multipartUploadID, - String volName, String buckName, String kName, - OMMetadataManager omMetadataManager) throws IOException { + String volName, String buckName, String kName, + OMMetadataManager omMetadataManager) throws IOException { String fileName = OzoneFSUtils.getFileName(kName); final long volumeId = omMetadataManager.getVolumeId(volName); @@ -919,13 +910,13 @@ private String getMultipartOpenKey(String multipartUploadID, omMetadataManager); String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId, - parentID, fileName, multipartUploadID); + parentID, fileName, multipartUploadID); return multipartKey; } private long getParentID(String volName, String buckName, - String kName, OMMetadataManager omMetadataManager) throws IOException { + String kName, OMMetadataManager omMetadataManager) throws IOException { Iterator pathComponents = Paths.get(kName).iterator(); final long volumeId = omMetadataManager.getVolumeId(volName); final long bucketId = omMetadataManager.getBucketId(volName, @@ -935,17 +926,17 @@ private long getParentID(String volName, String buckName, } private String initiateMultipartUpload(OzoneBucket oBucket, String kName, - ReplicationType replicationType, ReplicationFactor replicationFactor) - throws IOException { + ReplicationType replicationType, ReplicationFactor replicationFactor) + throws IOException { OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, - replicationType, replicationFactor); + replicationType, replicationFactor); - Assert.assertNotNull(multipartInfo); + Assertions.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(kName, multipartInfo.getKeyName()); - Assert.assertNotNull(multipartInfo.getUploadID()); + Assertions.assertEquals(volumeName, multipartInfo.getVolumeName()); + Assertions.assertEquals(bucketName, multipartInfo.getBucketName()); + Assertions.assertEquals(kName, multipartInfo.getKeyName()); + Assertions.assertNotNull(multipartInfo.getUploadID()); return uploadID; } @@ -954,32 +945,32 @@ private String uploadPart(OzoneBucket oBucket, String kName, String uploadID, int partNumber, byte[] data) throws IOException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, - data.length, partNumber, uploadID); + data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, - data.length); + data.length); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - ozoneOutputStream.getCommitUploadPartInfo(); + ozoneOutputStream.getCommitUploadPartInfo(); - Assert.assertNotNull(omMultipartCommitUploadPartInfo); - Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); + Assertions.assertNotNull(omMultipartCommitUploadPartInfo); + Assertions.assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); return omMultipartCommitUploadPartInfo.getPartName(); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, - String uploadID, Map partsMap) throws Exception { + String uploadID, Map partsMap) throws Exception { OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = oBucket - .completeMultipartUpload(kName, uploadID, partsMap); - - Assert.assertNotNull(omMultipartUploadCompleteInfo); - Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket - .getName()); - Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket - .getVolumeName()); - Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); - Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash()); + .completeMultipartUpload(kName, uploadID, partsMap); + + Assertions.assertNotNull(omMultipartUploadCompleteInfo); + Assertions.assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket + .getName()); + Assertions.assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket + .getVolumeName()); + Assertions.assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); + Assertions.assertNotNull(omMultipartUploadCompleteInfo.getHash()); } private byte[] generateData(int size, byte val) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index aa048537663..f1d18f4629b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -47,27 +47,18 @@ import static java.nio.charset.StandardCharsets.UTF_8; import org.apache.ratis.protocol.exceptions.GroupMismatchException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests failure detection and handling in BlockOutputStream Class by set * flush delay. */ +@Timeout(300) public class TestOzoneClientRetriesOnExceptionFlushDelay { - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; @@ -88,7 +79,7 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay { * * @throws IOException */ - @Before + @BeforeEach public void init() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; @@ -133,7 +124,7 @@ private String getKeyName() { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -152,12 +143,12 @@ public void testGroupMismatchExceptionHandling() throws Exception { byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength) .getBytes(UTF_8); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); long containerID = keyOutputStream.getStreamEntries().get(0). getBlockID().getContainerID(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); + Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size()); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() .getContainer(ContainerID.valueOf(containerID)); @@ -172,17 +163,17 @@ public void testGroupMismatchExceptionHandling() throws Exception { key.write(data1); OutputStream stream = keyOutputStream.getStreamEntries().get(0) .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); + Assertions.assertTrue(stream instanceof BlockOutputStream); BlockOutputStream blockOutputStream = (BlockOutputStream) stream; TestHelper.waitForPipelineClose(key, cluster, false); key.flush(); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream + Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream .getIoException()) instanceof GroupMismatchException); - Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() + Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() .contains(pipeline.getId())); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2); + Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size()); key.close(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); + Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size()); validateData(keyName, data1); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index 7bc8ca509fe..16f8ef1398f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -51,29 +51,21 @@ import static java.nio.charset.StandardCharsets.UTF_8; import org.apache.ratis.protocol.exceptions.GroupMismatchException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests failure detection and handling in BlockOutputStream Class. */ +@Timeout(300) public class TestOzoneClientRetriesOnExceptions { private static final int MAX_RETRIES = 3; - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; @@ -94,7 +86,7 @@ public class TestOzoneClientRetriesOnExceptions { * * @throws IOException */ - @Before + @BeforeEach public void init() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; @@ -141,7 +133,7 @@ private String getKeyName() { /** * Shutdown MiniDFSCluster. */ - @After + @AfterEach public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -154,42 +146,42 @@ public void testGroupMismatchExceptionHandling() throws Exception { String keyName = getKeyName(); int dataLength = maxFlushSize + 50; OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, - dataLength); + dataLength); // write data more than 1 chunk byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + ContainerTestHelper.getFixedLengthString(keyString, dataLength) + .getBytes(UTF_8); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); long containerID = - keyOutputStream.getStreamEntries().get(0). - getBlockID().getContainerID(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); + keyOutputStream.getStreamEntries().get(0). + getBlockID().getContainerID(); + Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size()); ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueOf(containerID)); + cluster.getStorageContainerManager().getContainerManager() + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); + cluster.getStorageContainerManager().getPipelineManager() + .getPipeline(container.getPipelineID()); XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); + xceiverClientManager.acquireClient(pipeline); xceiverClient.sendCommand(ContainerTestHelper - .getCreateContainerRequest(containerID, pipeline)); + .getCreateContainerRequest(containerID, pipeline)); xceiverClientManager.releaseClient(xceiverClient, false); key.write(data1); OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); + .getOutputStream(); + Assertions.assertTrue(stream instanceof BlockOutputStream); BlockOutputStream blockOutputStream = (BlockOutputStream) stream; TestHelper.waitForPipelineClose(key, cluster, false); key.flush(); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof GroupMismatchException); - Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() - .contains(pipeline.getId())); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2); + Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream + .getIoException()) instanceof GroupMismatchException); + Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() + .contains(pipeline.getId())); + Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size()); key.close(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); + Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size()); validateData(keyName, data1); } @@ -198,10 +190,10 @@ public void testMaxRetriesByOzoneClient() throws Exception { String keyName = getKeyName(); OzoneOutputStream key = createKey( keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); + Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List entries = keyOutputStream.getStreamEntries(); - Assert.assertEquals((MAX_RETRIES + 1), + Assertions.assertEquals((MAX_RETRIES + 1), keyOutputStream.getStreamEntries().size()); int dataLength = maxFlushSize + 50; // write data more than 1 chunk @@ -220,7 +212,7 @@ public void testMaxRetriesByOzoneClient() throws Exception { .getPipeline(container.getPipelineID()); XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline); - Assume.assumeFalse(containerList.contains(containerID)); + Assumptions.assumeFalse(containerList.contains(containerID)); containerList.add(containerID); xceiverClient.sendCommand(ContainerTestHelper .getCreateContainerRequest(containerID, pipeline)); @@ -228,47 +220,47 @@ public void testMaxRetriesByOzoneClient() throws Exception { } key.write(data1); OutputStream stream = entries.get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); + Assertions.assertTrue(stream instanceof BlockOutputStream); BlockOutputStream blockOutputStream = (BlockOutputStream) stream; TestHelper.waitForContainerClose(key, cluster); // Ensure that blocks for the key have been allocated to at least N+1 // containers so that write request will be tried on N+1 different blocks // of N+1 different containers and it will finally fail as it will hit // the max retry count of N. - Assume.assumeTrue(containerList.size() + " <= " + MAX_RETRIES, - containerList.size() > MAX_RETRIES); + Assumptions.assumeTrue(containerList.size() > MAX_RETRIES, + containerList.size() + " <= " + MAX_RETRIES); try { key.write(data1); // ensure that write is flushed to dn key.flush(); - Assert.fail("Expected exception not thrown"); + Assertions.fail("Expected exception not thrown"); } catch (IOException ioe) { - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - Assert.assertTrue(ioe. - getMessage().contains( + Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream + .getIoException()) instanceof ContainerNotOpenException); + Assertions.assertTrue(ioe. + getMessage().contains( "Retry request failed. " + - "retries get failed due to exceeded maximum " + - "allowed retries number: " + MAX_RETRIES)); + "retries get failed due to exceeded maximum " + + "allowed retries number: " + MAX_RETRIES)); } try { key.flush(); - Assert.fail("Expected exception not thrown"); + Assertions.fail("Expected exception not thrown"); } catch (IOException ioe) { - Assert.assertTrue(ioe.getMessage().contains("Stream is closed")); + Assertions.assertTrue(ioe.getMessage().contains("Stream is closed")); } try { key.close(); } catch (IOException ioe) { - Assert.fail("Expected should not be thrown"); + Assertions.fail("Expected should not be thrown"); } } private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { + long size) throws Exception { return TestHelper - .createKey(keyName, type, ReplicationFactor.ONE, - size, objectStore, volumeName, bucketName); + .createKey(keyName, type, ReplicationFactor.ONE, + size, objectStore, volumeName, bucketName); } private void validateData(String keyName, byte[] data) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index ace1af6d3ca..3f7c590bf6e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -39,13 +39,13 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.UnhealthyTest; import org.apache.ozone.test.tag.Unhealthy; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.FixMethodOrder; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; import org.junit.experimental.categories.Category; -import org.junit.runners.MethodSorters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class is to test audit logs for xxxACL APIs of Ozone Client. @@ -75,7 +75,7 @@ * all assertion based test in this class. */ @NotThreadSafe -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(MethodOrderer.MethodName.class) @Category(UnhealthyTest.class) @Unhealthy("Fix this after adding audit support for HA Acl code. This will " + "be fixed by HDDS-2038") @@ -86,10 +86,10 @@ public class TestOzoneRpcClientForAclAuditLog { private static UserGroupInformation ugi; private static final OzoneAcl USER_ACL = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS); + "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS); private static final OzoneAcl USER_ACL_2 = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "jane", IAccessAuthorizer.ACLType.ALL, ACCESS); + "jane", IAccessAuthorizer.ACLType.ALL, ACCESS); private static List aclListToAdd = new ArrayList<>(); private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; @@ -106,7 +106,7 @@ public class TestOzoneRpcClientForAclAuditLog { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { System.setProperty("log4j.configurationFile", "auditlog.properties"); ugi = UserGroupInformation.getCurrentUser(); @@ -141,7 +141,7 @@ private static void startCluster(OzoneConfiguration conf) throws Exception { /** * Close OzoneClient and shutdown MiniOzoneCluster. */ - @AfterClass + @AfterAll public static void teardown() throws IOException { shutdownCluster(); deleteAuditLog(); @@ -195,7 +195,7 @@ public void testXXXAclSuccessAudits() throws Exception { OzoneVolume retVolumeinfo = store.getVolume(volumeName); verifyLog(OMAction.READ_VOLUME.name(), volumeName, AuditEventStatus.SUCCESS.name()); - Assert.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName)); + Assertions.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName)); OzoneObj volObj = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -207,7 +207,7 @@ public void testXXXAclSuccessAudits() throws Exception { List acls = store.getAcl(volObj); verifyLog(OMAction.GET_ACL.name(), volumeName, AuditEventStatus.SUCCESS.name()); - Assert.assertTrue(acls.size() > 0); + Assertions.assertTrue(acls.size() > 0); //Testing addAcl store.addAcl(volObj, USER_ACL); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 54153744d7c..a8343c7512f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,7 +24,6 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.util.Arrays; import java.util.HashMap; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -60,7 +59,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** @@ -118,8 +116,8 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { // Write data into a key try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, ReplicationType.RATIS, - THREE, new HashMap<>())) { + value.getBytes(UTF_8).length, ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, THREE), new HashMap<>())) { out.write(value.getBytes(UTF_8)); } @@ -133,7 +131,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = bucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertArrayEquals(b, value.getBytes(UTF_8)); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } @@ -142,7 +140,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = bucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertArrayEquals(b, value.getBytes(UTF_8)); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } @@ -157,7 +155,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = newBucket.readKey(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertArrayEquals(b, value.getBytes(UTF_8)); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } @@ -166,7 +164,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { try (OzoneInputStream is = newBucket.readFile(keyName)) { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); + assertArrayEquals(b, value.getBytes(UTF_8)); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 31175a06b4d..1dcc8adadda 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; @@ -61,10 +62,10 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Tests the containerStateMachine failure handling. @@ -84,7 +85,7 @@ public class TestValidateBCSIDOnRestart { * * @throws IOException */ - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); @@ -93,14 +94,14 @@ public static void init() throws Exception { conf.setFromObject(clientConfig); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); + TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); + TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); + TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, - TimeUnit.SECONDS); + TimeUnit.SECONDS); RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class); @@ -121,9 +122,9 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). - setHbInterval(200) - .build(); + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). + setHbInterval(200) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); //the easiest way to create an open container is creating a key @@ -139,7 +140,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -150,29 +151,31 @@ public static void shutdown() { @Test public void testValidateBCSIDOnDnRestart() throws Exception { OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor( + ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis".getBytes(UTF_8)); key.flush(); key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + groupOutputStream.getLocationInfoList(); + Assertions.assertEquals(1, locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); ContainerData containerData = - TestHelper.getDatanodeService(omKeyLocationInfo, cluster) - .getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + TestHelper.getDatanodeService(omKeyLocationInfo, cluster) + .getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); + Assertions.assertTrue(containerData instanceof KeyValueContainerData); KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; + (KeyValueContainerData) containerData; key.close(); long containerID = omKeyLocationInfo.getContainerID(); @@ -182,49 +185,50 @@ public void testValidateBCSIDOnDnRestart() throws Exception { HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index); OzoneContainer ozoneContainer = - dnService.getDatanodeStateMachine() - .getContainer(); + dnService.getDatanodeStateMachine() + .getContainer(); ozoneContainer.getContainerSet().removeContainer(containerID); ContainerStateMachine stateMachine = - (ContainerStateMachine) TestHelper.getStateMachine(cluster. - getHddsDatanodes().get(index), - omKeyLocationInfo.getPipeline()); + (ContainerStateMachine) TestHelper.getStateMachine(cluster. + getHddsDatanodes().get(index), + omKeyLocationInfo.getPipeline()); SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); + (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); stateMachine.takeSnapshot(); final Path parentPath = StatemachineImplTestUtil.findLatestSnapshot(storage) .getFile().getPath(); stateMachine.buildMissingContainerSet(parentPath.toFile()); // Since the snapshot threshold is set to 1, since there are // applyTransactions, we should see snapshots - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); + Assertions.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); // make sure the missing containerSet is not empty HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); - Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty()); - Assert - .assertTrue(dispatcher.getMissingContainerSet() - .contains(containerID)); + Assertions.assertFalse(dispatcher.getMissingContainerSet().isEmpty()); + Assertions + .assertTrue(dispatcher.getMissingContainerSet() + .contains(containerID)); // write a new key key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + .createKey("ratis", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.ONE), new HashMap<>()); // First write and flush creates a container in the datanode key.write("ratis1".getBytes(UTF_8)); key.flush(); groupOutputStream = (KeyOutputStream) key.getOutputStream(); locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + Assertions.assertEquals(1, locationInfoList.size()); omKeyLocationInfo = locationInfoList.get(0); key.close(); containerID = omKeyLocationInfo.getContainerID(); dn = TestHelper.getDatanodeService(omKeyLocationInfo, - cluster); + cluster); containerData = dn.getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()) + .getContainerData(); + Assertions.assertTrue(containerData instanceof KeyValueContainerData); keyValueContainerData = (KeyValueContainerData) containerData; try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) { @@ -239,11 +243,10 @@ public void testValidateBCSIDOnDnRestart() throws Exception { index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), true); // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(index) - .getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); + Assertions.assertSame(cluster.getHddsDatanodes().get(index) + .getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(containerID) + .getContainerState(), + ContainerProtos.ContainerDataProto.State.UNHEALTHY); } } From 594b900af537e5893773381ee27922a916cd89f7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 19 Dec 2023 13:08:21 +0100 Subject: [PATCH 07/28] HDDS-9953. Simplify assertions in hadoop-hdds (#5817) --- ...erverNotLeaderExceptionMessageParsing.java | 6 +-- .../common/helpers/TestExcludeList.java | 10 ++--- .../hadoop/hdds/utils/TestResourceCache.java | 4 +- .../TestChecksumImplsComputeSameValues.java | 6 +-- .../ozone/TestHddsSecureDatanodeInit.java | 20 ++++------ .../common/TestDatanodeStoreCache.java | 2 +- ...leRecoveringContainerScrubbingService.java | 2 +- .../common/impl/TestContainerPersistence.java | 7 ++-- .../endpoint/TestHeartbeatEndpointTask.java | 8 ++-- .../keyvalue/impl/TestBlockManagerImpl.java | 5 +-- .../hdds/utils/db/TestRDBTableStore.java | 14 ++----- .../balancer/TestContainerBalancer.java | 40 +++++++------------ .../TestSCMContainerPlacementRackAware.java | 5 +-- .../replication/TestReplicationManager.java | 5 ++- .../hdds/scm/node/TestNodeReportHandler.java | 12 +++--- .../scm/pipeline/TestPipelineManagerImpl.java | 3 +- .../server/TestSCMBlockProtocolServer.java | 10 ++--- 17 files changed, 67 insertions(+), 92 deletions(-) diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java index 05ad9700578..00c290bc8d6 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestServerNotLeaderExceptionMessageParsing.java @@ -56,8 +56,7 @@ public void testServerNotLeaderException() { "at org.apache.hadoop.hdds.ratis.ServerNotLeaderException" + ".convertToNotLeaderException(ServerNotLeaderException.java:96)"; snle = new ServerNotLeaderException(message); - Assertions.assertEquals(null, - snle.getSuggestedLeader()); + Assertions.assertNull(snle.getSuggestedLeader()); message = "Server:7fdd7170-75cc-4e11-b343-c2657c2f2f39 is not the " + "leader.Suggested leader is Server:localhost:98634:8988 \n" + @@ -72,8 +71,7 @@ public void testServerNotLeaderException() { "at org.apache.hadoop.hdds.ratis.ServerNotLeaderException" + ".convertToNotLeaderException(ServerNotLeaderException.java)"; snle = new ServerNotLeaderException(message); - Assertions.assertEquals(null, - snle.getSuggestedLeader()); + Assertions.assertNull(snle.getSuggestedLeader()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java index d5330749aec..c878124cd19 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java @@ -39,9 +39,9 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() { .setIpAddress("127.0.0.1").setHostName("localhost").addPort( DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) .build()); - Assertions.assertTrue(list.getDatanodes().size() == 1); + Assertions.assertEquals(1, list.getDatanodes().size()); clock.fastForward(11); - Assertions.assertTrue(list.getDatanodes().size() == 0); + Assertions.assertEquals(0, list.getDatanodes().size()); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) .setIpAddress("127.0.0.2").setHostName("localhost").addPort( DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) @@ -50,7 +50,7 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() { .setIpAddress("127.0.0.3").setHostName("localhost").addPort( DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) .build()); - Assertions.assertTrue(list.getDatanodes().size() == 2); + Assertions.assertEquals(2, list.getDatanodes().size()); } @Test @@ -60,8 +60,8 @@ public void excludeNodeShouldNotBeCleanedIfExpiryTimeIsZero() { .setIpAddress("127.0.0.1").setHostName("localhost").addPort( DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) .build()); - Assertions.assertTrue(list.getDatanodes().size() == 1); + Assertions.assertEquals(1, list.getDatanodes().size()); clock.fastForward(1); - Assertions.assertTrue(list.getDatanodes().size() == 1); + Assertions.assertEquals(1, list.getDatanodes().size()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java index 54d59af03d1..3acaee85fa4 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceCache.java @@ -49,7 +49,7 @@ public void testResourceCache() throws InterruptedException { // put to cache with removing old element "6" as eviction FIFO resourceCache.put(1, "a"); Assertions.assertNull(resourceCache.get(6)); - Assertions.assertTrue(count.get() == 1); + Assertions.assertEquals(1, count.get()); // add 5 should be success with no removal resourceCache.put(5, "a"); @@ -58,7 +58,7 @@ public void testResourceCache() throws InterruptedException { // remove and check queue resourceCache.remove(4); Assertions.assertNull(resourceCache.get(4)); - Assertions.assertTrue(count.get() == 1); + Assertions.assertEquals(1, count.get()); } @Test diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java index 5e02ceaf083..eeba2a8e422 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.zip.CRC32; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests to verify that different checksum implementations compute the same @@ -52,7 +52,7 @@ public void testCRC32ImplsMatch() { if (NativeCRC32Wrapper.isAvailable()) { impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(1, bpc))); } - assertEquals(true, validateImpls(data, impls, bpc)); + assertTrue(validateImpls(data, impls, bpc)); } } @@ -74,7 +74,7 @@ public void testCRC32CImplsMatch() { if (NativeCRC32Wrapper.isAvailable()) { impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(2, bpc))); } - assertEquals(true, validateImpls(data, impls, bpc)); + assertTrue(validateImpls(data, impls, bpc)); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 8c3558879ae..beca5b2ee6e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.mockito.ArgumentMatchers.anyObject; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -342,8 +343,7 @@ public void testCertificateRotation() throws Exception { when(scmClient.getAllRootCaCertificates()).thenReturn(rootCaList); // check that new cert ID should not equal to current cert ID String certId = newCertHolder.getSerialNumber().toString(); - Assertions.assertFalse(certId.equals( - client.getCertificate().getSerialNumber().toString())); + assertNotEquals(certId, client.getCertificate().getSerialNumber().toString()); // start monitor task to renew key and cert client.startCertificateRenewerService(); @@ -382,12 +382,10 @@ public void testCertificateRotation() throws Exception { String newCertId = client.getCertificate().getSerialNumber().toString(); return newCertId.equals(certId2); }, 1000, CERT_LIFETIME * 1000); - Assertions.assertFalse(client.getPrivateKey().equals(privateKey1)); - Assertions.assertFalse(client.getPublicKey().equals(publicKey1)); - Assertions.assertFalse(client.getCACertificate().getSerialNumber() - .toString().equals(caCertId1)); - Assertions.assertFalse(client.getRootCACertificate().getSerialNumber() - .toString().equals(rootCaCertId1)); + assertNotEquals(privateKey1, client.getPrivateKey()); + assertNotEquals(publicKey1, client.getPublicKey()); + assertNotEquals(caCertId1, client.getCACertificate().getSerialNumber().toString()); + assertNotEquals(rootCaCertId1, client.getRootCACertificate().getSerialNumber().toString()); } /** @@ -417,16 +415,14 @@ public void testCertificateRotationRecoverableFailure() throws Exception { // check that new cert ID should not equal to current cert ID String certId = newCertHolder.getSerialNumber().toString(); - Assertions.assertFalse(certId.equals( - client.getCertificate().getSerialNumber().toString())); + assertNotEquals(certId, client.getCertificate().getSerialNumber().toString()); // start monitor task to renew key and cert client.startCertificateRenewerService(); // certificate failed to renew, client still hold the old expired cert. Thread.sleep(CERT_LIFETIME * 1000); - Assertions.assertFalse(certId.equals( - client.getCertificate().getSerialNumber().toString())); + assertNotEquals(certId, client.getCertificate().getSerialNumber().toString()); try { client.getCertificate().checkValidity(); } catch (Exception e) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java index 546fcf5155d..6cf3b2cee5a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java @@ -61,7 +61,7 @@ public void testBasicOperations() throws IOException { Assertions.assertEquals(2, cache.size()); // test get, test reference the same object using == - Assertions.assertTrue(store1 == cache.getDB(dbPath1, conf).getStore()); + Assertions.assertSame(store1, cache.getDB(dbPath1, conf).getStore()); // test remove cache.removeDB(dbPath1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java index 5076fed0b69..0979b2fe6f2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java @@ -162,7 +162,7 @@ public void testScrubbingStaleRecoveringContainers( testClock.fastForward(1000L); srcss.runPeriodicalTaskNow(); //closed container should not be scrubbed - Assertions.assertTrue(containerSet.containerCount() == 5); + Assertions.assertEquals(5, containerSet.containerCount()); containerStateMap.putAll(createTestContainers(containerSet, 5, RECOVERING).stream() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index de5d8f64561..602e9d82873 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -92,6 +92,7 @@ import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeFalse; @@ -614,7 +615,7 @@ private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException { .getVolume().getCommittedBytes(); commitDecrement = commitBytesBefore - commitBytesAfter; // did we decrement commit bytes by the amount of data we wrote? - Assertions.assertTrue(commitDecrement == info.getLen()); + assertEquals(commitDecrement, info.getLen()); return info; } @@ -810,7 +811,7 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo) getBlock(container, blockID1); Assertions.fail("Expected exception not thrown"); } catch (StorageContainerException sce) { - Assertions.assertTrue(sce.getResult() == UNKNOWN_BCSID); + assertSame(UNKNOWN_BCSID, sce.getResult()); } try { @@ -821,7 +822,7 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo) getBlock(container, blockID1); Assertions.fail("Expected exception not thrown"); } catch (StorageContainerException sce) { - Assertions.assertTrue(sce.getResult() == BCSID_MISMATCH); + assertSame(BCSID_MISMATCH, sce.getResult()); } readBlockData = blockManager. getBlock(container, blockData.getBlockID()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java index d0b9e80f3cc..96643789f4e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java @@ -134,7 +134,7 @@ public void testheartbeatWithoutReports() throws Exception { Assertions.assertTrue(heartbeat.hasDatanodeDetails()); Assertions.assertFalse(heartbeat.hasNodeReport()); Assertions.assertFalse(heartbeat.hasContainerReport()); - Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); + Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount()); Assertions.assertFalse(heartbeat.hasContainerActions()); OptionalLong termInDatanode = context.getTermOfLeaderSCM(); Assertions.assertTrue(termInDatanode.isPresent()); @@ -169,7 +169,7 @@ public void testheartbeatWithNodeReports() throws Exception { Assertions.assertTrue(heartbeat.hasDatanodeDetails()); Assertions.assertTrue(heartbeat.hasNodeReport()); Assertions.assertFalse(heartbeat.hasContainerReport()); - Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); + Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount()); Assertions.assertFalse(heartbeat.hasContainerActions()); } @@ -201,7 +201,7 @@ public void testheartbeatWithContainerReports() throws Exception { Assertions.assertTrue(heartbeat.hasDatanodeDetails()); Assertions.assertFalse(heartbeat.hasNodeReport()); Assertions.assertTrue(heartbeat.hasContainerReport()); - Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); + Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount()); Assertions.assertFalse(heartbeat.hasContainerActions()); } @@ -266,7 +266,7 @@ public void testheartbeatWithContainerActions() throws Exception { Assertions.assertTrue(heartbeat.hasDatanodeDetails()); Assertions.assertFalse(heartbeat.hasNodeReport()); Assertions.assertFalse(heartbeat.hasContainerReport()); - Assertions.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); + Assertions.assertEquals(0, heartbeat.getCommandStatusReportsCount()); Assertions.assertTrue(heartbeat.hasContainerActions()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java index a8e4cb81410..a7d6364a967 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java @@ -46,7 +46,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -202,7 +201,7 @@ public void testListBlock(ContainerTestVersionInfo versionInfo) List listBlockData = blockManager.listBlock( keyValueContainer, 1, 10); assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 1); + assertEquals(1, listBlockData.size()); for (long i = 2; i <= 10; i++) { blockID = new BlockID(1L, i); @@ -221,6 +220,6 @@ public void testListBlock(ContainerTestVersionInfo versionInfo) listBlockData = blockManager.listBlock( keyValueContainer, 1, 10); assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 10); + assertEquals(10, listBlockData.size()); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index df6fb5795fd..3ff9ece6b8c 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -562,8 +562,7 @@ public void testPrefixedIterator() throws Exception { int keyCount = 0; while (iter.hasNext()) { // iterator should only meet keys with samplePrefix - assertArrayEquals( - Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix); + assertArrayEquals(samplePrefix, Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH)); keyCount++; } @@ -573,8 +572,7 @@ public void testPrefixedIterator() throws Exception { // iterator should be able to seekToFirst iter.seekToFirst(); assertTrue(iter.hasNext()); - assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), - samplePrefix); + assertArrayEquals(samplePrefix, Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH)); } } } @@ -708,9 +706,7 @@ public void testDumpAndLoadBasic() throws Exception { int keyCount = 0; while (iter.hasNext()) { // check prefix - assertTrue(Arrays.equals( - Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), - samplePrefix)); + assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix); keyCount++; } @@ -751,9 +747,7 @@ public void testDumpAndLoadEmpty() throws Exception { int keyCount = 0; while (iter.hasNext()) { // check prefix - assertTrue(Arrays.equals( - Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), - samplePrefix)); + assertArrayEquals(Arrays.copyOf(iter.next().getKey(), PREFIX_LENGTH), samplePrefix); keyCount++; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java index d5ae7457db4..1de5d6b1a0b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java @@ -45,6 +45,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -127,31 +128,26 @@ public void testStartBalancerStop() throws Exception { startBalancer(balancerConfiguration); try { containerBalancer.startBalancer(balancerConfiguration); - Assertions.assertTrue(false, - "Exception should be thrown when startBalancer again"); + Assertions.fail("Exception should be thrown when startBalancer again"); } catch (IllegalContainerBalancerStateException e) { // start failed again, valid case } try { containerBalancer.start(); - Assertions.assertTrue(false, - "Exception should be thrown when start again"); + Assertions.fail("Exception should be thrown when start again"); } catch (IllegalContainerBalancerStateException e) { // start failed again, valid case } - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.RUNNING); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); stopBalancer(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.STOPPED); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); try { containerBalancer.stopBalancer(); - Assertions.assertTrue(false, - "Exception should be thrown when stop again"); + Assertions.fail("Exception should be thrown when stop again"); } catch (Exception e) { // stop failed as already stopped, valid case } @@ -161,23 +157,19 @@ public void testStartBalancerStop() throws Exception { public void testStartStopSCMCalls() throws Exception { containerBalancer.saveConfiguration(balancerConfiguration, true, 0); containerBalancer.start(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.RUNNING); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); containerBalancer.notifyStatusChanged(); try { containerBalancer.start(); - Assertions.assertTrue(false, - "Exception should be thrown when start again"); + Assertions.fail("Exception should be thrown when start again"); } catch (IllegalContainerBalancerStateException e) { // start failed when triggered again, valid case } - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.RUNNING); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); containerBalancer.stop(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.STOPPED); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); containerBalancer.saveConfiguration(balancerConfiguration, false, 0); } @@ -186,20 +178,16 @@ public void testNotifyStateChangeStopStart() throws Exception { containerBalancer.startBalancer(balancerConfiguration); scm.getScmContext().updateLeaderAndTerm(false, 1); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.RUNNING); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); containerBalancer.notifyStatusChanged(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.STOPPED); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); scm.getScmContext().updateLeaderAndTerm(true, 2); scm.getScmContext().setLeaderReady(); containerBalancer.notifyStatusChanged(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.RUNNING); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); containerBalancer.stop(); - Assertions.assertTrue(containerBalancer.getBalancerStatus() - == ContainerBalancerTask.Status.STOPPED); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 6016b2f14d3..92f05d772fe 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -423,7 +423,7 @@ public void testNoInfiniteLoop(int datanodeCount) { policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15); fail("Storage requested exceeds capacity, this call should fail"); } catch (Exception e) { - assertTrue(e.getClass().getSimpleName().equals("SCMException")); + assertEquals("SCMException", e.getClass().getSimpleName()); } // get metrics @@ -833,8 +833,7 @@ public void chooseNodeWithUsedAndFavouredNodesMultipleRack() // Favoured node should be returned, // as favoured node is in the different rack as used nodes. - Assertions.assertTrue(favouredNodes.get(0).getUuid() == - datanodeDetails.get(0).getUuid()); + Assertions.assertSame(favouredNodes.get(0).getUuid(), datanodeDetails.get(0).getUuid()); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index d2b2d18d358..a9093778793 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -84,6 +84,7 @@ import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createReplicasWithSameOrigin; import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.getNoNodesTestPlacementPolicy; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -518,7 +519,7 @@ public void testHealthyContainerStatus() throws ContainerNotFoundException { boolean result = replicationManager.checkContainerStatus( container, repReport); - assertEquals(false, result); + assertFalse(result); } @Test @@ -546,7 +547,7 @@ public void testUnderReplicatedContainerStatus() container, repReport); assertEquals(1, repReport.getStat( ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(true, result); + assertTrue(result); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 6f800a4d15d..dd919548cb1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -96,9 +96,9 @@ public void testNodeReport() throws IOException { Arrays.asList(metaStorageOne)).getReport(), null); nodeMetric = nodeManager.getNodeStat(dn); - Assertions.assertTrue(nodeMetric.get().getCapacity().get() == 100); - Assertions.assertTrue(nodeMetric.get().getRemaining().get() == 90); - Assertions.assertTrue(nodeMetric.get().getScmUsed().get() == 10); + Assertions.assertEquals(100, (long) nodeMetric.get().getCapacity().get()); + Assertions.assertEquals(90, (long) nodeMetric.get().getRemaining().get()); + Assertions.assertEquals(10, (long) nodeMetric.get().getScmUsed().get()); StorageReportProto storageTwo = HddsTestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); @@ -107,9 +107,9 @@ public void testNodeReport() throws IOException { Arrays.asList(metaStorageOne)), this); nodeMetric = nodeManager.getNodeStat(dn); - Assertions.assertTrue(nodeMetric.get().getCapacity().get() == 200); - Assertions.assertTrue(nodeMetric.get().getRemaining().get() == 180); - Assertions.assertTrue(nodeMetric.get().getScmUsed().get() == 20); + Assertions.assertEquals(200, (long) nodeMetric.get().getCapacity().get()); + Assertions.assertEquals(180, (long) nodeMetric.get().getRemaining().get()); + Assertions.assertEquals(20, (long) nodeMetric.get().getScmUsed().get()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 48f82b5cc95..ce6c78f5b90 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -930,8 +930,7 @@ public void testWaitForAllocatedPipeline() ContainerInfo c = provider.getContainer(1, repConfig, owner, new ExcludeList()); - Assertions.assertTrue(c.equals(container), - "Expected container was returned"); + Assertions.assertEquals(c, container, "Expected container was returned"); // Confirm that waitOnePipelineReady was called on allocated pipelines ArgumentCaptor> captor = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index 38f5dee109f..0bed0337d6f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -250,7 +250,7 @@ public void testSortDatanodes() throws Exception { System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT); + Assertions.assertEquals(NODE_COUNT, datanodeDetails.size()); // illegal client 1 client += "X"; @@ -258,14 +258,14 @@ public void testSortDatanodes() throws Exception { System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT); + Assertions.assertEquals(NODE_COUNT, datanodeDetails.size()); // illegal client 2 client = "/default-rack"; datanodeDetails = server.sortDatanodes(nodes, client); System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assertions.assertTrue(datanodeDetails.size() == NODE_COUNT); + Assertions.assertEquals(NODE_COUNT, datanodeDetails.size()); // unknown node to sort nodes.add(UUID.randomUUID().toString()); @@ -278,7 +278,7 @@ public void testSortDatanodes() throws Exception { .build(); ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp = service.sortDatanodes(request, ClientVersion.CURRENT_VERSION); - Assertions.assertTrue(resp.getNodeList().size() == NODE_COUNT); + Assertions.assertEquals(NODE_COUNT, resp.getNodeList().size()); System.out.println("client = " + client); resp.getNodeList().stream().forEach( node -> System.out.println(node.getNetworkName())); @@ -295,7 +295,7 @@ public void testSortDatanodes() throws Exception { .build(); resp = service.sortDatanodes(request, ClientVersion.CURRENT_VERSION); System.out.println("client = " + client); - Assertions.assertTrue(resp.getNodeList().size() == 0); + Assertions.assertEquals(0, resp.getNodeList().size()); resp.getNodeList().stream().forEach( node -> System.out.println(node.getNetworkName())); } From 71019a866b17c34fe30d3a90d019d47ddebcd5b7 Mon Sep 17 00:00:00 2001 From: Christos Bisias Date: Tue, 19 Dec 2023 19:41:59 +0200 Subject: [PATCH 08/28] HDDS-9933. Recon datanode 'Last Heartbeat' should print relative values (#5801) --- .../src/views/datanodes/datanodes.tsx | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index e418bf2fefd..6a6118494fa 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -42,7 +42,7 @@ interface IDatanodeResponse { hostname: string; state: DatanodeState; opState: DatanodeOpState; - lastHeartbeat: number; + lastHeartbeat: string; storageReport: IStorageReport; pipelines: IPipeline[]; containers: number; @@ -182,7 +182,7 @@ const COLUMNS = [ isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.lastHeartbeat - b.lastHeartbeat, render: (heartbeat: number) => { - return heartbeat > 0 ? moment(heartbeat).format('ll LTS') : 'NA'; + return heartbeat > 0 ? getTimeDiffFromTimestamp(heartbeat) : 'NA'; } }, { @@ -303,6 +303,26 @@ const defaultColumns: IOption[] = COLUMNS.map(column => ({ value: column.key })); +const getTimeDiffFromTimestamp = (timestamp: number): string => { + const timestampDate = new Date(timestamp); + const currentDate = new Date(); + + let elapsedTime = ""; + let duration: moment.Duration = moment.duration( + moment(currentDate).diff(moment(timestampDate)) + ) + + const durationKeys = ["seconds", "minutes", "hours", "days", "months", "years"] + durationKeys.forEach((k) => { + let time = duration["_data"][k] + if (time !== 0){ + elapsedTime = time + `${k.substring(0, 1)} ` + elapsedTime + } + }) + + return elapsedTime.trim().length === 0 ? "Just now" : elapsedTime.trim() + " ago"; +} + let cancelSignal: AbortController; export class Datanodes extends React.Component, IDatanodesState> { From 42ded03e41c49648e51c1fd9001de85db44b9eab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 20:03:29 +0100 Subject: [PATCH 09/28] HDDS-9966. Bump maven-shade-plugin to 3.5.1 (#5823) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index f922e7480c0..95a0be71875 100644 --- a/pom.xml +++ b/pom.xml @@ -265,7 +265,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1 3.1.1 3.1.0 - 3.4.1 + 3.5.1 2.5 3.4.0 3.3.0 From dabdedd5addc3d2c2c94cd210e202c3cb89a8acc Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Wed, 20 Dec 2023 02:15:44 +0530 Subject: [PATCH 10/28] HDDS-5604. Intermittent failure in TestPipelineClose (#5825) --- .../apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 6c66ecf3185..99dd1d1768d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -47,7 +47,6 @@ import org.apache.ratis.protocol.RaftGroupId; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.ArgumentCaptor; @@ -209,10 +208,8 @@ public void testPipelineCloseWithPipelineAction() throws Exception { } @Test - @Flaky("HDDS-5604") public void testPipelineCloseWithLogFailure() throws IOException, TimeoutException { - EventQueue eventQ = (EventQueue) scm.getEventQueue(); PipelineActionHandler pipelineActionTest = Mockito.mock(PipelineActionHandler.class); @@ -247,9 +244,7 @@ public void testPipelineCloseWithLogFailure() * This is expected to trigger an immediate pipeline actions report to SCM */ xceiverRatis.handleNodeLogFailure(groupId, null); - - // verify SCM receives a pipeline action report "immediately" - Mockito.verify(pipelineActionTest, Mockito.timeout(100)) + Mockito.verify(pipelineActionTest, Mockito.timeout(1500).atLeastOnce()) .onMessage( actionCaptor.capture(), Mockito.any(EventPublisher.class)); From b55437f06b4ad524ebf4af42b31a92541160bede Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 20 Dec 2023 00:07:32 +0100 Subject: [PATCH 11/28] HDDS-9829. Bump `jaxb-api` to 2.3.3, `jaxb-runtime` to 2.3.9 (#5777) --- hadoop-hdds/container-service/pom.xml | 4 ++-- hadoop-ozone/datanode/pom.xml | 8 +++---- .../dist/src/main/license/bin/LICENSE.txt | 7 +----- .../dist/src/main/license/jar-report.txt | 8 ++----- hadoop-ozone/httpfsgateway/pom.xml | 4 ++-- hadoop-ozone/insight/pom.xml | 8 +++---- hadoop-ozone/recon/pom.xml | 10 ++++++-- hadoop-ozone/s3gateway/pom.xml | 8 +++---- hadoop-ozone/tools/pom.xml | 8 +++---- pom.xml | 24 +++++++++---------- 10 files changed, 43 insertions(+), 46 deletions(-) diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 0c271508e3b..079847f7c49 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -109,8 +109,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 8c5843d382d..2f219334889 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -48,16 +48,16 @@ hdds-container-service - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb jaxb-runtime - javax.activation - activation + jakarta.activation + jakarta.activation-api diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index a361067ae7f..465f663bc55 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -210,6 +210,7 @@ See licenses/ for text of these licenses. EDL 1.0 ===================== + com.sun.activation:jakarta.activation jakarta.activation:jakarta.activation-api jakarta.xml.bind:jakarta.xml.bind-api @@ -232,14 +233,12 @@ CDDL 1.1 + GPLv2 with classpath exception com.sun.jersey:jersey-json com.sun.jersey:jersey-server com.sun.jersey:jersey-servlet - javax.activation:activation javax.annotation:javax.annotation-api javax.el:javax.el-api javax.interceptor:javax.interceptor-api javax.servlet:javax.servlet-api javax.servlet.jsp:jsp-api javax.ws.rs:jsr311-api - javax.xml.bind:jaxb-api org.glassfish.hk2.external:aopalliance-repackaged org.glassfish.hk2.external:jakarta.inject org.glassfish.hk2.external:javax.inject @@ -249,8 +248,6 @@ CDDL 1.1 + GPLv2 with classpath exception org.glassfish.hk2:hk2-utils org.glassfish.hk2:osgi-resource-locator org.glassfish.jaxb:jaxb-runtime - org.glassfish.jaxb:jaxb-core - org.glassfish.jaxb:jaxb-runtime org.glassfish.jaxb:txw2 org.glassfish.jersey.containers:jersey-container-servlet org.glassfish.jersey.containers:jersey-container-servlet-core @@ -262,7 +259,6 @@ CDDL 1.1 + GPLv2 with classpath exception org.glassfish.jersey.inject:jersey-hk2 org.glassfish.jersey.media:jersey-media-jaxb org.glassfish.jersey.media:jersey-media-json-jackson - org.jvnet.staxex:stax-ex Apache License 2.0 @@ -301,7 +297,6 @@ Apache License 2.0 com.nimbusds:nimbus-jose-jwt com.squareup.okhttp3:okhttp com.squareup.okio:okio - com.sun.xml.fastinfoset:FastInfoset commons-beanutils:commons-beanutils commons-cli:commons-cli commons-codec:commons-codec diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index e9a781862d3..8792390b2c2 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -1,4 +1,3 @@ -share/ozone/lib/activation.jar share/ozone/lib/animal-sniffer-annotations.jar share/ozone/lib/annotations.jar share/ozone/lib/annotations.jar @@ -40,7 +39,6 @@ share/ozone/lib/disruptor.jar share/ozone/lib/dnsjava.jar share/ozone/lib/error_prone_annotations.jar share/ozone/lib/failureaccess.jar -share/ozone/lib/FastInfoset.jar share/ozone/lib/gethostname4j.jar share/ozone/lib/grpc-api.jar share/ozone/lib/grpc-context.jar @@ -107,6 +105,7 @@ share/ozone/lib/jaeger-client.jar share/ozone/lib/jaeger-core.jar share/ozone/lib/jaeger-thrift.jar share/ozone/lib/jaeger-tracerresolver.jar +share/ozone/lib/jakarta.activation.jar share/ozone/lib/jakarta.activation-api.jar share/ozone/lib/jakarta.annotation-api.jar share/ozone/lib/jakarta.inject.jar @@ -119,8 +118,6 @@ share/ozone/lib/javax.el-api.jar share/ozone/lib/javax.inject.jar share/ozone/lib/javax.interceptor-api.jar share/ozone/lib/javax.servlet-api.jar -share/ozone/lib/jaxb-api.jar -share/ozone/lib/jaxb-core.jar share/ozone/lib/jaxb-runtime.jar share/ozone/lib/jcip-annotations.jar share/ozone/lib/jersey-cdi1x.jar @@ -266,11 +263,10 @@ share/ozone/lib/spring-jdbc.jar share/ozone/lib/spring-tx.jar share/ozone/lib/sqlite-jdbc.jar share/ozone/lib/stax2-api.jar -share/ozone/lib/stax-ex.jar share/ozone/lib/txw2.jar share/ozone/lib/vault-java-driver.jar share/ozone/lib/weld-servlet-shaded.Final.jar share/ozone/lib/woodstox-core.jar share/ozone/lib/zookeeper.jar share/ozone/lib/zookeeper-jute.jar -share/ozone/lib/zstd-jni.jar \ No newline at end of file +share/ozone/lib/zstd-jni.jar diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index 1ce25c2beac..f56b4006d85 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -150,8 +150,8 @@ - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index c8106b10829..4be02577e09 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -66,16 +66,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-tools - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb jaxb-runtime - javax.activation - activation + jakarta.activation + jakarta.activation-api io.dropwizard.metrics diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 11f3dee917a..4985bb99374 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -338,6 +338,12 @@ org.jooq jooq ${jooq.version} + + + javax.xml.bind + jaxb-api + + org.jooq @@ -373,8 +379,8 @@ - javax.activation - activation + jakarta.activation + jakarta.activation-api org.javassist diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index e22f0dc9ac1..a8e72a362fa 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -87,16 +87,16 @@ cdi-api - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb jaxb-runtime - javax.activation - activation + jakarta.activation + jakarta.activation-api io.grpc diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index b09b92d0e2e..c413d691181 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -74,16 +74,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api org.glassfish.jaxb jaxb-runtime - javax.activation - activation + jakarta.activation + jakarta.activation-api io.dropwizard.metrics diff --git a/pom.xml b/pom.xml index 95a0be71875..638da75d439 100644 --- a/pom.xml +++ b/pom.xml @@ -148,9 +148,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.2.4 0.8.5 3.21.0-GA - 1.1.1 - 2.3.0 - 2.3.0.1 + 1.2.2 + 2.3.3 + 2.3.9 0.1.54 2.0 3.1.0 @@ -202,7 +202,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.0.0 3.1.12 2.1.7 - 1.2.2 4.12.0 4.2.2 2.6.1 @@ -806,11 +805,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs zstd-jni ${zstd-jni.version} - - javax.activation - activation - ${javax-activation.version} - javax.annotation javax.annotation-api @@ -1393,14 +1387,20 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${findbugs.version} - javax.xml.bind - jaxb-api + jakarta.xml.bind + jakarta.xml.bind-api ${jaxb-api.version} org.glassfish.jaxb jaxb-runtime ${jaxb-runtime.version} + + + javax.xml.bind + jaxb-api + + com.sun.jersey @@ -1478,7 +1478,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs jakarta.activation jakarta.activation-api - ${jakarta.activation.version} + ${activation-api.version} com.squareup.okhttp3 From fdf8b6a93b7d7d0e93349c7632e07848dcd69f22 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 20 Dec 2023 02:56:34 +0100 Subject: [PATCH 12/28] HDDS-9885. Checkstyle check passing despite config error (#5755) --- hadoop-ozone/dev-support/checks/checkstyle.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index 97ddfa698df..cb8b6f8f915 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -30,13 +30,14 @@ declare -i rc mvn ${MAVEN_OPTIONS} checkstyle:check > "${REPORT_DIR}/output.log" rc=$? if [[ ${rc} -ne 0 ]]; then - mvn ${MAVEN_OPTIONS} clean test-compile checkstyle:check + mvn ${MAVEN_OPTIONS} clean test-compile checkstyle:check > output.log rc=$? mkdir -p "$REPORT_DIR" # removed by mvn clean -else - cat "${REPORT_DIR}/output.log" + mv output.log "${REPORT_DIR}"/ fi +cat "${REPORT_DIR}/output.log" + #Print out the exact violations with parsing XML results with sed find "." -name checkstyle-errors.xml -print0 \ | xargs -0 sed '$!N; //g" \ | tee "$REPORT_FILE" +# check if Maven failed due to some error other than checkstyle violation +if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then + grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" +fi + ## generate counter grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures" From faa19906f664f3a68ccd3b1b9d6347dced279605 Mon Sep 17 00:00:00 2001 From: Siddhant Sangwan Date: Wed, 20 Dec 2023 11:54:06 +0530 Subject: [PATCH 13/28] HDDS-9592. Replication Manager: Save UNHEALTHY replicas with highest BCSID for a QUASI_CLOSED container (#5794) --- .../replication/ContainerHealthResult.java | 9 + .../ECUnderReplicationHandler.java | 2 +- .../LegacyRatisContainerReplicaCount.java | 9 +- .../replication/LegacyReplicationManager.java | 10 +- .../replication/MisReplicationHandler.java | 2 +- .../RatisContainerReplicaCount.java | 64 ++++-- .../RatisUnderReplicationHandler.java | 109 ++++++++- .../replication/ReplicationManager.java | 4 +- .../replication/ReplicationManagerUtil.java | 78 +++++-- .../VulnerableUnhealthyReplicasHandler.java | 102 ++++++++ .../scm/node/DatanodeAdminMonitorImpl.java | 16 +- .../TestRatisUnderReplicationHandler.java | 70 ++++++ .../replication/TestReplicationManager.java | 59 +++++ .../TestReplicationManagerUtil.java | 94 +++++++- ...estVulnerableUnhealthyReplicasHandler.java | 217 ++++++++++++++++++ .../node/DatanodeAdminMonitorTestUtil.java | 2 +- .../scm/node/TestDatanodeAdminMonitor.java | 75 ++++++ 17 files changed, 864 insertions(+), 58 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java index a2262cdafdd..0abe8f6ea34 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java @@ -113,6 +113,7 @@ public static class UnderReplicatedHealthResult private boolean hasUnReplicatedOfflineIndexes = false; private boolean offlineIndexesOkAfterPending = false; private int requeueCount = 0; + private boolean hasVulnerableUnhealthy = false; public UnderReplicatedHealthResult(ContainerInfo containerInfo, int remainingRedundancy, boolean dueToOutOfService, @@ -269,6 +270,14 @@ public boolean isMissing() { return isMissing; } + public void setHasVulnerableUnhealthy(boolean hasVulnerableUnhealthy) { + this.hasVulnerableUnhealthy = hasVulnerableUnhealthy; + } + + public boolean hasVulnerableUnhealthy() { + return hasVulnerableUnhealthy; + } + @Override public String toString() { StringBuilder sb = new StringBuilder("UnderReplicatedHealthResult{") diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java index daae24f7f2e..07d38c05dab 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java @@ -128,7 +128,7 @@ public int processAndSendCommands( container.containerID(), replicas); ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes = - ReplicationManagerUtil.getExcludedAndUsedNodes( + ReplicationManagerUtil.getExcludedAndUsedNodes(container, new ArrayList<>(replicas), Collections.emptySet(), pendingOps, replicationManager); List excludedNodes diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java index f708ae1ead9..f491e2bd6f5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import java.util.List; import java.util.Set; @@ -130,6 +131,12 @@ && getReplicas().stream() public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode, NodeManager nodeManager) { return super.isSufficientlyReplicated() && - super.getVulnerableUnhealthyReplicas(nodeManager).isEmpty(); + super.getVulnerableUnhealthyReplicas(dn -> { + try { + return nodeManager.getNodeStatus(dn); + } catch (NodeNotFoundException e) { + return null; + } + }).isEmpty(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java index 07a8f730ec0..04862e0d317 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java @@ -558,7 +558,15 @@ protected void processContainer(ContainerInfo container, * match the container's Sequence ID. */ List vulnerableUnhealthy = - replicaSet.getVulnerableUnhealthyReplicas(nodeManager); + replicaSet.getVulnerableUnhealthyReplicas(dn -> { + try { + return nodeManager.getNodeStatus(dn); + } catch (NodeNotFoundException e) { + LOG.warn("Exception for datanode {} while getting vulnerable replicas for container {}, with all " + + "replicas {}.", dn, container, replicas, e); + return null; + } + }); if (!vulnerableUnhealthy.isEmpty()) { report.incrementAndSample(HealthState.UNDER_REPLICATED, container.containerID()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java index 70b2a444276..636b0e9589a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/MisReplicationHandler.java @@ -148,7 +148,7 @@ public int processAndSendCommands( .collect(Collectors.toMap(Function.identity(), sources::contains))); ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes - = ReplicationManagerUtil.getExcludedAndUsedNodes( + = ReplicationManagerUtil.getExcludedAndUsedNodes(container, new ArrayList(replicas), replicasToBeReplicated, Collections.emptyList(), replicationManager); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index bec3b1090e4..d23934184eb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult.OverReplicatedHealthResult; import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult.UnderReplicatedHealthResult; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import java.util.ArrayList; import java.util.Collections; @@ -34,6 +34,7 @@ import java.util.List; import java.util.Set; import java.util.UUID; +import java.util.function.Function; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; @@ -423,9 +424,48 @@ public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode, return isSufficientlyReplicated(); } + /** + * Checks if all replicas (except UNHEALTHY) on in-service nodes are in the + * same health state as the container. This is similar to what + * {@link ContainerReplicaCount#isHealthy()} does. The difference is in how + * both methods treat UNHEALTHY replicas. + *

+ * This method is the interface between the decommissioning flow and + * Replication Manager. Callers can use it to check whether replicas of a + * container are in the same state as the container before a datanode is + * taken offline. + *

+ *

+ * Note that this method's purpose is to only compare the replica state with + * the container state. It does not check if the container has sufficient + * number of replicas - that is the job of {@link ContainerReplicaCount + * #isSufficientlyReplicatedForOffline(DatanodeDetails, NodeManager)}. + * @return true if the container is healthy enough, which is determined by + * various checks + *

+ */ @Override public boolean isHealthyEnoughForOffline() { - return isHealthy(); + long countInService = getReplicas().stream() + .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) + .count(); + if (countInService == 0) { + /* + Having no in-service nodes is unexpected and SCM shouldn't allow this + to happen in the first place. Return false here just to be safe. + */ + return false; + } + + HddsProtos.LifeCycleState containerState = getContainer().getState(); + return (containerState == HddsProtos.LifeCycleState.CLOSED + || containerState == HddsProtos.LifeCycleState.QUASI_CLOSED) + && getReplicas().stream() + .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) + .filter(r -> r.getState() != + ContainerReplicaProto.State.UNHEALTHY) + .allMatch(r -> ReplicationManager.compareState( + containerState, r.getState())); } /** @@ -435,14 +475,14 @@ public boolean isHealthyEnoughForOffline() { * to save at least one copy of each such UNHEALTHY replica. This method * finds such UNHEALTHY replicas. * - * @param nodeManager an instance of NodeManager + * @param nodeStatusFn a function used to check the {@link NodeStatus} of a node, + * accepting a {@link DatanodeDetails} and returning {@link NodeStatus} * @return List of UNHEALTHY replicas with the greatest Sequence ID that * need to be replicated to other nodes. Empty list if this container is not * QUASI_CLOSED, doesn't have a mix of healthy and UNHEALTHY replicas, or * if there are no replicas that need to be saved. */ - List getVulnerableUnhealthyReplicas( - NodeManager nodeManager) { + public List getVulnerableUnhealthyReplicas(Function nodeStatusFn) { if (container.getState() != HddsProtos.LifeCycleState.QUASI_CLOSED) { // this method is only relevant for QUASI_CLOSED containers return Collections.emptyList(); @@ -456,7 +496,7 @@ List getVulnerableUnhealthyReplicas( } if (replica.getSequenceId() == container.getSequenceId()) { - if (replica.getState() == ContainerReplicaProto.State.UNHEALTHY) { + if (replica.getState() == ContainerReplicaProto.State.UNHEALTHY && !replica.isEmpty()) { unhealthyReplicas.add(replica); } else if (replica.getState() == ContainerReplicaProto.State.QUASI_CLOSED) { @@ -474,20 +514,16 @@ List getVulnerableUnhealthyReplicas( unhealthyReplicas.removeIf( replica -> { - try { - return !nodeManager.getNodeStatus(replica.getDatanodeDetails()) - .isHealthy(); - } catch (NodeNotFoundException e) { - return true; - } + NodeStatus status = nodeStatusFn.apply(replica.getDatanodeDetails()); + return status == null || !status.isHealthy(); }); /* - At this point, the list of unhealthyReplicas contains all UNHEALTHY + At this point, the list of unhealthyReplicas contains all UNHEALTHY non-empty replicas with the greatest Sequence ID that are on healthy Datanodes. Note that this also includes multiple copies of the same UNHEALTHY replica, that is, replicas with the same Origin ID. We need to consider the fact that replicas can be uniquely unhealthy. That is, 2 UNHEALTHY - replicas will difference Origin ID need not be exact copies of each other. + replicas with different Origin ID need not be exact copies of each other. Replicas that don't have at least one instance (multiple instances of a replica will have the same Origin ID) on an IN_SERVICE node are diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java index 98c19d16ffc..4a823fb8eea 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container.replication; +import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -98,6 +99,14 @@ public int processAndSendCommands( new RatisContainerReplicaCount(containerInfo, replicas, pendingOps, minHealthyForMaintenance, false); + if (result instanceof ContainerHealthResult.UnderReplicatedHealthResult) { + ContainerHealthResult.UnderReplicatedHealthResult + underReplicatedResult = (ContainerHealthResult.UnderReplicatedHealthResult) result; + if (underReplicatedResult.hasVulnerableUnhealthy()) { + return handleVulnerableUnhealthyReplicas(withUnhealthy, pendingOps); + } + } + // verify that this container is still under replicated and we don't have // sufficient replication after considering pending adds RatisContainerReplicaCount replicaCount = @@ -151,6 +160,104 @@ public int processAndSendCommands( return commandsSent; } + /** + * Sends a replicate command for each replica specified in + * vulnerableUnhealthy. + * @param replicaCount RatisContainerReplicaCount for this container + * @param pendingOps List of pending ops + * @return number of replicate commands sent + */ + private int handleVulnerableUnhealthyReplicas(RatisContainerReplicaCount replicaCount, + List pendingOps) throws NotLeaderException, CommandTargetOverloadedException, SCMException { + ContainerInfo container = replicaCount.getContainer(); + List vulnerableUnhealthy = replicaCount.getVulnerableUnhealthyReplicas(dn -> { + try { + return replicationManager.getNodeStatus(dn); + } catch (NodeNotFoundException e) { + LOG.warn("Exception for datanode {} while handling vulnerable replicas for container {}, with all replicas" + + " {}.", dn, container, replicaCount.getReplicas(), e); + return null; + } + }); + LOG.info("Handling vulnerable UNHEALTHY replicas {} for container {}.", vulnerableUnhealthy, container); + + int pendingAdds = 0; + for (ContainerReplicaOp op : pendingOps) { + if (op.getOpType() == ContainerReplicaOp.PendingOpType.ADD) { + pendingAdds++; + } + } + if (pendingAdds >= vulnerableUnhealthy.size()) { + LOG.debug("There are {} pending adds for container {}, while the number of UNHEALTHY replicas is {}.", + pendingAdds, container.containerID(), vulnerableUnhealthy.size()); + return 0; + } + + /* + Since we're replicating UNHEALTHY replicas, it's possible that replication keeps on failing. Shuffling gives + other replicas a chance to be replicated since there's a limit on in-flight adds. + */ + Collections.shuffle(vulnerableUnhealthy); + return replicateEachSource(replicaCount, vulnerableUnhealthy, pendingOps); + } + + /** + * Replicates each of the ContainerReplica specified in sources to new + * Datanodes. Will not consider Datanodes hosting existing replicas and + * Datanodes pending adds as targets. Note that this method simply skips + * a replica if its datanode is overloaded with commands, throwing an + * exception once all sources have been looked at. + * @param replicaCount RatisContainerReplicaCount for this container + * @param sources List containing replicas, each will be replicated + */ + private int replicateEachSource(RatisContainerReplicaCount replicaCount, List sources, + List pendingOps) throws NotLeaderException, SCMException, CommandTargetOverloadedException { + List allReplicas = replicaCount.getReplicas(); + ContainerInfo container = replicaCount.getContainer(); + + /* + We use the placement policy to get a target Datanode to which a vulnerable replica will be replicated. In + placement policy terms, a 'used node' is a Datanode which has a legit replica of this container. An 'excluded + node' is a Datanode that should not be considered to host a replica of this container, but other Datanodes in this + Datanode's rack are available. So, Datanodes of any vulnerable replicas should be excluded nodes while Datanodes + of other replicas, including UNHEALTHY replicas that are not pending delete (because they have unique origin), + should be used nodes. + */ + ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes = + ReplicationManagerUtil.getExcludedAndUsedNodes(container, allReplicas, Collections.emptySet(), pendingOps, + replicationManager); + + CommandTargetOverloadedException firstException = null; + int numCommandsSent = 0; + for (ContainerReplica replica : sources) { + // find a target for each source and send replicate command + final List target = + ReplicationManagerUtil.getTargetDatanodes(placementPolicy, 1, excludedAndUsedNodes.getUsedNodes(), + excludedAndUsedNodes.getExcludedNodes(), currentContainerSize, container); + int count = 0; + try { + count = sendReplicationCommands(container, ImmutableList.of(replica.getDatanodeDetails()), target); + } catch (CommandTargetOverloadedException e) { + LOG.info("Exception while replicating {} to target {} for container {}.", replica, target, container, e); + if (firstException == null) { + firstException = e; + } + } + + if (count == 1) { + // a command was sent to target, so it needs to be in the used nodes list because it's pending an add + excludedAndUsedNodes.getUsedNodes().add(target.get(0)); + } + numCommandsSent += count; + } + + if (firstException != null) { + throw firstException; + } + + return numCommandsSent; + } + private void removeUnhealthyReplicaIfPossible(ContainerInfo containerInfo, Set replicas, List pendingOps) throws NotLeaderException { @@ -337,7 +444,7 @@ private List getTargets( replicaCount.getContainer().containerID(), replicaCount.getReplicas()); ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes = - ReplicationManagerUtil.getExcludedAndUsedNodes( + ReplicationManagerUtil.getExcludedAndUsedNodes(replicaCount.getContainer(), replicaCount.getReplicas(), Collections.emptySet(), pendingOps, replicationManager); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index 3b9f66595f4..979cff799fa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdds.scm.container.replication.health.QuasiClosedContainerHandler; import org.apache.hadoop.hdds.scm.container.replication.health.RatisReplicationCheckHandler; import org.apache.hadoop.hdds.scm.container.replication.health.RatisUnhealthyReplicationCheckHandler; +import org.apache.hadoop.hdds.scm.container.replication.health.VulnerableUnhealthyReplicasHandler; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMService; @@ -279,7 +280,8 @@ public ReplicationManager(final ConfigurationSource conf, .addNext(ratisReplicationCheckHandler) .addNext(new ClosedWithUnhealthyReplicasHandler(this)) .addNext(ecMisReplicationCheckHandler) - .addNext(new RatisUnhealthyReplicationCheckHandler()); + .addNext(new RatisUnhealthyReplicationCheckHandler()) + .addNext(new VulnerableUnhealthyReplicasHandler(this)); start(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java index 076a81e69b5..3dcd6aa23ba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.UUID; @@ -116,6 +117,7 @@ public static List getTargetDatanodes(PlacementPolicy policy, * @return ExcludedAndUsedNodes object containing the excluded and used lists */ public static ExcludedAndUsedNodes getExcludedAndUsedNodes( + ContainerInfo container, List replicas, Set toBeRemoved, List pendingReplicaOps, @@ -123,12 +125,37 @@ public static ExcludedAndUsedNodes getExcludedAndUsedNodes( List excludedNodes = new ArrayList<>(); List usedNodes = new ArrayList<>(); + List nonUniqueUnhealthy = null; + if (container.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) { + /* + An UNHEALTHY replica with unique origin node id of a QUASI_CLOSED container should be a used node (not excluded + node) because we preserve it. The following code will find non-unique UNHEALTHY replicas. Later in the method + this list will be used to determine whether an UNHEALTHY replica's DN should be a used node or excluded node. + */ + nonUniqueUnhealthy = + selectUnhealthyReplicasForDelete(container, new HashSet<>(replicas), 0, dn -> { + try { + return replicationManager.getNodeStatus(dn); + } catch (NodeNotFoundException e) { + LOG.warn("Exception for {} while selecting used and excluded nodes for container {}.", dn, container); + return null; + } + }); + } for (ContainerReplica r : replicas) { if (r.getState() == ContainerReplicaProto.State.UNHEALTHY) { - // Hosts with an Unhealthy replica cannot receive a new replica, but - // they are not considered used as they will be removed later. - excludedNodes.add(r.getDatanodeDetails()); - continue; + if (container.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) { + // any unique UNHEALTHY will get added as used nodes in the catch-all at the end of the loop + if (nonUniqueUnhealthy != null && nonUniqueUnhealthy.contains(r)) { + excludedNodes.add(r.getDatanodeDetails()); + continue; + } + } else { + // Hosts with an UNHEALTHY replica (of a non QUASI_CLOSED container) cannot receive a new replica, but + // they are not considered used as they will be removed later. + excludedNodes.add(r.getDatanodeDetails()); + continue; + } } if (toBeRemoved.contains(r)) { // This node is currently present, but we plan to remove it so it is not @@ -195,22 +222,8 @@ public List getUsedNodes() { } } - /** - * This is intended to be call when a container is under replicated, but there - * are no spare nodes to create new replicas on, due to having too many - * unhealthy replicas or quasi-closed replicas which cannot be closed due to - * having a lagging sequence ID. The logic here will select a replica to - * delete, or return null if there are none which can be safely deleted. - * - * @param containerInfo The container to select a replica to delete from - * @param replicas The list of replicas for the container - * @param pendingDeletes number pending deletes for this container - * @return A replica to delete, or null if there are none which can be safely - * deleted. - */ - public static ContainerReplica selectUnhealthyReplicaForDelete( - ContainerInfo containerInfo, Set replicas, - int pendingDeletes, Function nodeStatusFn) { + public static List selectUnhealthyReplicasForDelete(ContainerInfo containerInfo, + Set replicas, int pendingDeletes, Function nodeStatusFn) { if (pendingDeletes > 0) { LOG.debug("Container {} has {} pending deletes which will free nodes.", containerInfo, pendingDeletes); @@ -261,18 +274,39 @@ public static ContainerReplica selectUnhealthyReplicaForDelete( deleteCandidates.sort( Comparator.comparingLong(ContainerReplica::getSequenceId)); if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) { - return deleteCandidates.size() > 0 ? deleteCandidates.get(0) : null; + return deleteCandidates.size() > 0 ? deleteCandidates : null; } if (containerInfo.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) { List nonUniqueOrigins = findNonUniqueDeleteCandidates(replicas, deleteCandidates, nodeStatusFn); - return nonUniqueOrigins.size() > 0 ? nonUniqueOrigins.get(0) : null; + return nonUniqueOrigins.size() > 0 ? nonUniqueOrigins : null; } return null; } + /** + * This is intended to be called when a container is under replicated, but there + * are no spare nodes to create new replicas on, due to having too many + * unhealthy replicas or quasi-closed replicas which cannot be closed due to + * having a lagging sequence ID. The logic here will select a replica to + * delete, or return null if there are none which can be safely deleted. + * + * @param containerInfo The container to select a replica to delete from + * @param replicas The list of replicas for the container + * @param pendingDeletes number pending deletes for this container + * @return A replica to delete, or null if there are none which can be safely + * deleted. + */ + public static ContainerReplica selectUnhealthyReplicaForDelete( + ContainerInfo containerInfo, Set replicas, + int pendingDeletes, Function nodeStatusFn) { + List containerReplicas = + selectUnhealthyReplicasForDelete(containerInfo, replicas, pendingDeletes, nodeStatusFn); + return containerReplicas != null ? containerReplicas.get(0) : null; + } + /** * Given a list of all replicas (including deleteCandidates), finds and * returns replicas which don't have unique origin node IDs. This method diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java new file mode 100644 index 00000000000..21b2d8151d2 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/VulnerableUnhealthyReplicasHandler.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.replication.health; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest; +import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult; +import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; + +/** + * A QUASI_CLOSED container may have some UNHEALTHY replicas with the + * same Sequence ID as the container. RM should try to maintain one + * copy of such replicas when there are no healthy replicas that + * match the container's Sequence ID. + */ +public class VulnerableUnhealthyReplicasHandler extends AbstractCheck { + public static final Logger LOG = LoggerFactory.getLogger(VulnerableUnhealthyReplicasHandler.class); + private final ReplicationManager replicationManager; + + public VulnerableUnhealthyReplicasHandler(ReplicationManager replicationManager) { + this.replicationManager = replicationManager; + } + + /** + * Checks if the container is QUASI_CLOSED has some vulnerable UNHEALTHY replicas that need to replicated to + * other Datanodes. These replicas have the same sequence ID as the container while other healthy replicas don't. + * If the node hosting such a replica is being taken offline, then the replica may have to be replicated to another + * node. + * @param request ContainerCheckRequest object representing the container + * @return true if some vulnerable UNHEALTHY replicas were found, else false + */ + @Override + public boolean handle(ContainerCheckRequest request) { + ContainerInfo container = request.getContainerInfo(); + if (container.getReplicationType() != RATIS) { + // This handler is only for Ratis containers. + return false; + } + if (container.getState() != HddsProtos.LifeCycleState.QUASI_CLOSED) { + return false; + } + Set replicas = request.getContainerReplicas(); + LOG.debug("Checking whether container {} with replicas {} has vulnerable UNHEALTHY replicas.", container, replicas); + RatisContainerReplicaCount replicaCount = + new RatisContainerReplicaCount(container, replicas, request.getPendingOps(), request.getMaintenanceRedundancy(), + true); + + List vulnerableUnhealthy = replicaCount.getVulnerableUnhealthyReplicas(dn -> { + try { + return replicationManager.getNodeStatus(dn); + } catch (NodeNotFoundException e) { + LOG.warn("Exception for datanode {} while handling vulnerable replicas for container {}, with all replicas" + + " {}.", dn, container, replicaCount.getReplicas(), e); + return null; + } + }); + + if (!vulnerableUnhealthy.isEmpty()) { + LOG.info("Found vulnerable UNHEALTHY replicas {} for container {}.", vulnerableUnhealthy, container); + ReplicationManagerReport report = request.getReport(); + report.incrementAndSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED, container.containerID()); + if (!request.isReadOnly()) { + ContainerHealthResult.UnderReplicatedHealthResult underRepResult = + replicaCount.toUnderHealthResult(); + underRepResult.setHasVulnerableUnhealthy(true); + request.getReplicationQueue().enqueue(underRepResult); + } + return true; + } + + return false; + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index 455307c6be3..a7423a79dcc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -362,19 +362,7 @@ private boolean checkContainersReplicatedOnNode(DatanodeDetails dn) continue; } - boolean isHealthy; - /* - If LegacyReplicationManager is enabled, then use the - isHealthyEnoughForOffline API. ReplicationManager doesn't support this - API yet. - */ - boolean legacyEnabled = conf.getBoolean("hdds.scm.replication.enable" + - ".legacy", false); - if (legacyEnabled) { - isHealthy = replicaSet.isHealthyEnoughForOffline(); - } else { - isHealthy = replicaSet.isHealthy(); - } + boolean isHealthy = replicaSet.isHealthyEnoughForOffline(); if (!isHealthy) { if (LOG.isDebugEnabled()) { unClosedIDs.add(cid); @@ -391,6 +379,8 @@ private boolean checkContainersReplicatedOnNode(DatanodeDetails dn) // state, except for any which are unhealthy. As the container is closed, we can check // if it is sufficiently replicated using replicationManager, but this only works if the // legacy RM is not enabled. + boolean legacyEnabled = conf.getBoolean("hdds.scm.replication.enable" + + ".legacy", false); boolean replicatedOK; if (legacyEnabled) { replicatedOK = replicaSet.isSufficientlyReplicatedForOffline(dn, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java index 17548bc5fef..dd7747e1271 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java @@ -551,6 +551,76 @@ public void testUnderReplicationDueToQuasiClosedReplicaWithWrongSequenceID() command.getKey())); } + /** + * A QUASI_CLOSED container may end up having UNHEALTHY replicas with the correct sequence ID, while none of the + * healthy replicas have the correct sequence ID. If any of these UNHEALTHY replicas is unique and is being taken + * offline, then it needs to be replicated to another DN for decommission to progress. This test asserts that a + * replicate command is sent for one such replica. + */ + @Test + public void testUnderReplicationWithVulnerableReplicas() throws IOException { + final long sequenceID = 20; + container = ReplicationTestUtil.createContainerInfo(RATIS_REPLICATION_CONFIG, 1, + HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID); + + final Set replicas = new HashSet<>(4); + for (int i = 0; i < 3; i++) { + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, + sequenceID - 1)); + } + final ContainerReplica unhealthyReplica = createContainerReplica(container.containerID(), 0, + DECOMMISSIONING, State.UNHEALTHY, sequenceID); + replicas.add(unhealthyReplica); + UnderReplicatedHealthResult result = getUnderReplicatedHealthResult(); + Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true); + + final Set>> commands = testProcessing(replicas, Collections.emptyList(), + result, 2, 1); + assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey()); + } + + /** + * In the push replication model, a replicate command is sent to the DN hosting the replica, and that DN is + * expected to "push" the replica to another DN. If the DN hosting the replica has too many commands already, an + * exception is thrown. This test asserts that other vulnerable UNHEALTHY replicas are still handled when an + * exception is caught for one of the replicas. Also asserts that the first thrown exception isn't lost and is + * actually rethrown once other replicas are processed, so that the container can be re-queued. + */ + @Test + public void testUnderReplicationWithVulnerableReplicasAndTargetOverloadedException() + throws NotLeaderException, CommandTargetOverloadedException { + final long sequenceID = 20; + container = ReplicationTestUtil.createContainerInfo(RATIS_REPLICATION_CONFIG, 1, + HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID); + + final Set replicas = new HashSet<>(5); + for (int i = 0; i < 3; i++) { + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, + sequenceID - 1)); + } + + /* + Create 2 unhealthy vulnerable replicas. An exception is thrown for one of the replicas, but the other replica + should still be processed and 1 command should be sent. + */ + final ContainerReplica unhealthyReplica = createContainerReplica(container.containerID(), 0, + DECOMMISSIONING, State.UNHEALTHY, sequenceID); + final ContainerReplica unhealthyReplica2 = createContainerReplica(container.containerID(), 0, + ENTERING_MAINTENANCE, State.UNHEALTHY, sequenceID); + replicas.add(unhealthyReplica); + replicas.add(unhealthyReplica2); + UnderReplicatedHealthResult result = getUnderReplicatedHealthResult(); + Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true); + ReplicationTestUtil.mockRMSendThrottleReplicateCommand(replicationManager, commandsSent, new AtomicBoolean(true)); + + RatisUnderReplicationHandler handler = new RatisUnderReplicationHandler(policy, conf, replicationManager); + assertThrows(CommandTargetOverloadedException.class, () -> handler.processAndSendCommands(replicas, + Collections.emptyList(), result, 2)); + assertEquals(1, commandsSent.size()); + DatanodeDetails dn = commandsSent.iterator().next().getKey(); + assertTrue(unhealthyReplica.getDatanodeDetails().equals(dn) || unhealthyReplica2.getDatanodeDetails().equals(dn)); + } + @Test public void testOnlyQuasiClosedReplicaWithWrongSequenceIdIsAvailable() throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index a9093778793..32463a5a6eb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.container.replication; +import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -91,6 +92,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; /** @@ -444,6 +446,63 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnUniqueOrigin() assertEquals(0, repQueue.overReplicatedQueueSize()); } + @Test + public void testQuasiClosedContainerWithVulnerableUnhealthyReplica() + throws IOException, NodeNotFoundException { + RatisReplicationConfig ratisRepConfig = + RatisReplicationConfig.getInstance(THREE); + long sequenceID = 10; + ContainerInfo container = createContainerInfo(ratisRepConfig, 1, + HddsProtos.LifeCycleState.QUASI_CLOSED, sequenceID); + + // this method creates replicas with same origin id and zero sequence id + Set replicas = + createReplicasWithSameOrigin(container.containerID(), + ContainerReplicaProto.State.QUASI_CLOSED, 0, 0, 0); + replicas.add(createContainerReplica(container.containerID(), 0, + IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY, sequenceID)); + ContainerReplica decommissioning = + createContainerReplica(container.containerID(), 0, DECOMMISSIONING, + ContainerReplicaProto.State.UNHEALTHY, sequenceID); + replicas.add(decommissioning); + storeContainerAndReplicas(container, replicas); + Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) + .thenAnswer(invocation -> { + DatanodeDetails dn = invocation.getArgument(0); + if (dn.equals(decommissioning.getDatanodeDetails())) { + return new NodeStatus(DECOMMISSIONING, HddsProtos.NodeState.HEALTHY); + } + + return NodeStatus.inServiceHealthy(); + }); + + replicationManager.processContainer(container, repQueue, repReport); + assertEquals(1, repReport.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + assertEquals(0, repReport.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); + assertEquals(1, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + + Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), + anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails())); + Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) + .thenAnswer(invocation -> { + Map map = new HashMap<>(); + map.put(SCMCommandProto.Type.replicateContainerCommand, 0); + map.put(SCMCommandProto.Type.reconstructECContainersCommand, 0); + return map; + }); + RatisUnderReplicationHandler handler = + new RatisUnderReplicationHandler(ratisPlacementPolicy, configuration, replicationManager); + + handler.processAndSendCommands(replicas, Collections.emptyList(), repQueue.dequeueUnderReplicatedContainer(), 2); + assertEquals(1, commandsSent.size()); + Pair> command = commandsSent.iterator().next(); + assertEquals(SCMCommandProto.Type.replicateContainerCommand, command.getValue().getType()); + assertEquals(decommissioning.getDatanodeDetails().getUuid(), command.getKey()); + } + /** * When there is Quasi Closed Replica with incorrect sequence id * for a Closed container, it's treated as unhealthy and deleted. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java index 3b81db7767c..c68130e79ee 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdds.scm.container.replication; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -37,6 +39,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainer; import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerReplica; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -55,7 +58,9 @@ public void setup() { @Test public void testGetExcludedAndUsedNodes() throws NodeNotFoundException { - ContainerID cid = ContainerID.valueOf(1L); + ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + ContainerID cid = container.containerID(); Set replicas = new HashSet<>(); ContainerReplica good = createContainerReplica(cid, 0, IN_SERVICE, ContainerReplicaProto.State.CLOSED, 1); @@ -108,7 +113,7 @@ public void testGetExcludedAndUsedNodes() throws NodeNotFoundException { }); ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes = - ReplicationManagerUtil.getExcludedAndUsedNodes( + ReplicationManagerUtil.getExcludedAndUsedNodes(container, new ArrayList<>(replicas), toBeRemoved, pending, replicationManager); @@ -131,4 +136,89 @@ public void testGetExcludedAndUsedNodes() throws NodeNotFoundException { .contains(pendingDelete)); } + @Test + public void testGetUsedAndExcludedNodesForQuasiClosedContainer() throws NodeNotFoundException { + ContainerInfo container = createContainer(HddsProtos.LifeCycleState.QUASI_CLOSED, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + ContainerID cid = container.containerID(); + Set replicas = new HashSet<>(); + ContainerReplica good = createContainerReplica(cid, 0, IN_SERVICE, + ContainerReplicaProto.State.QUASI_CLOSED, 1); + replicas.add(good); + + ContainerReplica remove = createContainerReplica(cid, 0, + IN_SERVICE, ContainerReplicaProto.State.QUASI_CLOSED, 1); + replicas.add(remove); + Set toBeRemoved = new HashSet<>(); + toBeRemoved.add(remove); + + // this replica should be on the used nodes list + ContainerReplica unhealthyWithUniqueOrigin = createContainerReplica( + cid, 0, IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY, 1); + replicas.add(unhealthyWithUniqueOrigin); + + // this one should be on the excluded nodes list + ContainerReplica unhealthyWithNonUniqueOrigin = createContainerReplica(cid, 0, IN_SERVICE, + ContainerReplicaProto.State.UNHEALTHY, container.getNumberOfKeys(), container.getUsedBytes(), + MockDatanodeDetails.randomDatanodeDetails(), good.getOriginDatanodeId()); + replicas.add(unhealthyWithNonUniqueOrigin); + + ContainerReplica decommissioning = + createContainerReplica(cid, 0, + DECOMMISSIONING, ContainerReplicaProto.State.QUASI_CLOSED, 1); + replicas.add(decommissioning); + + ContainerReplica maintenance = + createContainerReplica(cid, 0, + IN_MAINTENANCE, ContainerReplicaProto.State.QUASI_CLOSED, 1); + replicas.add(maintenance); + + // Finally, add a pending add and delete. The add should go onto the used + // list and the delete added to the excluded nodes. + DatanodeDetails pendingAdd = MockDatanodeDetails.randomDatanodeDetails(); + DatanodeDetails pendingDelete = MockDatanodeDetails.randomDatanodeDetails(); + List pending = new ArrayList<>(); + pending.add(ContainerReplicaOp.create( + ContainerReplicaOp.PendingOpType.ADD, pendingAdd, 0)); + pending.add(ContainerReplicaOp.create( + ContainerReplicaOp.PendingOpType.DELETE, pendingDelete, 0)); + + Mockito.when(replicationManager.getNodeStatus(Mockito.any())).thenAnswer( + invocation -> { + final DatanodeDetails dn = invocation.getArgument(0); + for (ContainerReplica r : replicas) { + if (r.getDatanodeDetails().equals(dn)) { + return new NodeStatus( + r.getDatanodeDetails().getPersistedOpState(), + HddsProtos.NodeState.HEALTHY); + } + } + throw new NodeNotFoundException(dn.getUuidString()); + }); + + ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes = + ReplicationManagerUtil.getExcludedAndUsedNodes(container, + new ArrayList<>(replicas), toBeRemoved, pending, + replicationManager); + + assertEquals(4, excludedAndUsedNodes.getUsedNodes().size()); + assertTrue(excludedAndUsedNodes.getUsedNodes() + .contains(good.getDatanodeDetails())); + assertTrue(excludedAndUsedNodes.getUsedNodes() + .contains(maintenance.getDatanodeDetails())); + assertTrue(excludedAndUsedNodes.getUsedNodes() + .contains(pendingAdd)); + assertTrue(excludedAndUsedNodes.getUsedNodes().contains(unhealthyWithUniqueOrigin.getDatanodeDetails())); + + assertEquals(4, excludedAndUsedNodes.getExcludedNodes().size()); + assertTrue(excludedAndUsedNodes.getExcludedNodes() + .contains(unhealthyWithNonUniqueOrigin.getDatanodeDetails())); + assertTrue(excludedAndUsedNodes.getExcludedNodes() + .contains(decommissioning.getDatanodeDetails())); + assertTrue(excludedAndUsedNodes.getExcludedNodes() + .contains(remove.getDatanodeDetails())); + assertTrue(excludedAndUsedNodes.getExcludedNodes() + .contains(pendingDelete)); + } + } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java new file mode 100644 index 00000000000..72a89f02862 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.replication.health; + +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationQueue; +import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerInfo; +import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerReplica; +import static org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createReplicas; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for {@link VulnerableUnhealthyReplicasHandler}. + */ +public class TestVulnerableUnhealthyReplicasHandler { + private ReplicationManager replicationManager; + private ReplicationConfig repConfig; + private ReplicationQueue repQueue; + private ContainerCheckRequest.Builder requestBuilder; + private ReplicationManagerReport report; + private VulnerableUnhealthyReplicasHandler handler; + + @BeforeEach + public void setup() throws NodeNotFoundException { + replicationManager = Mockito.mock(ReplicationManager.class); + handler = new VulnerableUnhealthyReplicasHandler(replicationManager); + repConfig = RatisReplicationConfig.getInstance(THREE); + repQueue = new ReplicationQueue(); + report = new ReplicationManagerReport(); + requestBuilder = new ContainerCheckRequest.Builder() + .setReplicationQueue(repQueue) + .setMaintenanceRedundancy(2) + .setPendingOps(Collections.emptyList()) + .setReport(report); + + Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + .thenReturn(NodeStatus.inServiceHealthy()); + } + + @Test + public void testReturnsFalseForECContainer() { + ContainerInfo container = createContainerInfo(new ECReplicationConfig(3, 2)); + Set replicas = createReplicas(container.containerID(), 1, 2, 3, 4); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertFalse(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testReturnsFalseForClosedContainer() { + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.CLOSED); + Set replicas = createReplicas(container.containerID(), 0, 0, 0); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertFalse(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testReturnsFalseForQuasiClosedContainerWithNoUnhealthyReplicas() { + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED); + Set replicas = createReplicas(container.containerID(), State.QUASI_CLOSED, 0, 0, 0); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertFalse(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testReturnsFalseForQuasiClosedContainerWithNoVulnerableReplicas() { + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED); + Set replicas = createReplicas(container.containerID(), 0, 0, 0); + // create UNHEALTHY replica with unique origin id on an IN_SERVICE node + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.UNHEALTHY)); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertFalse(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplica() throws NodeNotFoundException { + long sequenceId = 10; + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId); + Set replicas = new HashSet<>(4); + for (int i = 0; i < 3; i++) { + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, + container.getSequenceId() - 1)); + } + // create UNHEALTHY replica with unique origin id on a DECOMMISSIONING node + ContainerReplica unhealthy = + createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId); + replicas.add(unhealthy); + Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + .thenAnswer(invocation -> { + DatanodeDetails dn = invocation.getArgument(0); + if (dn.equals(unhealthy.getDatanodeDetails())) { + return new NodeStatus(DECOMMISSIONING, HEALTHY); + } + return NodeStatus.inServiceHealthy(); + }); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertTrue(handler.handle(requestBuilder.build())); + assertEquals(1, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testReturnsFalseForVulnerableReplicaWithAnotherCopy() throws NodeNotFoundException { + long sequenceId = 10; + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId); + Set replicas = new HashSet<>(4); + for (int i = 0; i < 3; i++) { + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, + container.getSequenceId() - 1)); + } + // create UNHEALTHY replica with a non-unique origin id on a DECOMMISSIONING node + ContainerReplica unhealthy = + createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId); + replicas.add(unhealthy); + Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + .thenAnswer(invocation -> { + DatanodeDetails dn = invocation.getArgument(0); + if (dn.equals(unhealthy.getDatanodeDetails())) { + return new NodeStatus(DECOMMISSIONING, HEALTHY); + } + return NodeStatus.inServiceHealthy(); + }); + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.UNHEALTHY, + container.getNumberOfKeys(), container.getUsedBytes(), MockDatanodeDetails.randomDatanodeDetails(), + unhealthy.getOriginDatanodeId(), container.getSequenceId())); + requestBuilder.setContainerReplicas(replicas).setContainerInfo(container); + + assertFalse(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } + + @Test + public void testDoesNotEnqueueForReadOnlyRequest() throws NodeNotFoundException { + long sequenceId = 10; + ContainerInfo container = createContainerInfo(repConfig, 1, LifeCycleState.QUASI_CLOSED, sequenceId); + Set replicas = new HashSet<>(4); + for (int i = 0; i < 3; i++) { + replicas.add(createContainerReplica(container.containerID(), 0, IN_SERVICE, State.QUASI_CLOSED, + container.getSequenceId() - 1)); + } + // create UNHEALTHY replica with unique origin id on a DECOMMISSIONING node + ContainerReplica unhealthy = + createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId); + replicas.add(unhealthy); + Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + .thenAnswer(invocation -> { + DatanodeDetails dn = invocation.getArgument(0); + if (dn.equals(unhealthy.getDatanodeDetails())) { + return new NodeStatus(DECOMMISSIONING, HEALTHY); + } + return NodeStatus.inServiceHealthy(); + }); + requestBuilder.setContainerReplicas(replicas) + .setContainerInfo(container) + .setReadOnly(true); + + assertTrue(handler.handle(requestBuilder.build())); + assertEquals(0, repQueue.underReplicatedQueueSize()); + assertEquals(0, repQueue.overReplicatedQueueSize()); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java index 4433c0cb6f2..4ff937f98c5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java @@ -193,7 +193,7 @@ public static void mockGetContainerReplicaCountForEC( mockCheckContainerState(repManager, underReplicated); } - private static void mockCheckContainerState(ReplicationManager repManager, boolean underReplicated) + static void mockCheckContainerState(ReplicationManager repManager, boolean underReplicated) throws ContainerNotFoundException { Mockito.when(repManager.checkContainerStatus(Mockito.any(ContainerInfo.class), Mockito.any(ReplicationManagerReport.class))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 4b389fbcf2f..17107cfa958 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.replication.LegacyRatisContainerReplicaCount; +import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil; @@ -298,6 +299,80 @@ public void testDecommissionWaitsForUnhealthyReplicaToReplicate() nodeManager.getNodeStatus(dn1).getOperationalState()); } + /** + * Situation: A QUASI_CLOSED container has an UNHEALTHY replica with the + * greatest BCSID, and three QUASI_CLOSED replicas with a smaller BCSID. The + * UNHEALTHY container is on a decommissioning node, and there are no other + * copies of this replica, that is, replicas with the same Origin ID as + * this replica. + * + * Expectation: Decommissioning should not complete until the UNHEALTHY + * replica has been replicated to another node. + */ + @Test + public void testDecommissionWaitsForUnhealthyReplicaToReplicateNewRM() + throws NodeNotFoundException, ContainerNotFoundException { + DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); + nodeManager.register(dn1, + new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY)); + + // create 3 QUASI_CLOSED replicas with containerID 1 and same origin ID + ContainerID containerID = ContainerID.valueOf(1); + Set replicas = + ReplicationTestUtil.createReplicasWithSameOrigin(containerID, + State.QUASI_CLOSED, 0, 0, 0); + + // the container's sequence id is greater than the healthy replicas' + ContainerInfo container = ReplicationTestUtil.createContainerInfo( + RatisReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.THREE), containerID.getId(), + HddsProtos.LifeCycleState.QUASI_CLOSED, + replicas.iterator().next().getSequenceId() + 1); + // UNHEALTHY replica is on a unique origin and has same sequence id as + // the container + ContainerReplica unhealthy = + ReplicationTestUtil.createContainerReplica(containerID, 0, + dn1.getPersistedOpState(), State.UNHEALTHY, + container.getNumberOfKeys(), container.getUsedBytes(), dn1, + dn1.getUuid(), container.getSequenceId()); + replicas.add(unhealthy); + nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); + + Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + .thenReturn(new RatisContainerReplicaCount(container, replicas, + Collections.emptyList(), 2, false)); + DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true); + + // start monitoring dn1 + monitor.startMonitoring(dn1); + monitor.run(); + assertEquals(1, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + + // Running the monitor again causes it to remain DECOMMISSIONING + // as nothing has changed. + monitor.run(); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + + // add a copy of the UNHEALTHY replica on a new node, dn1 should get + // decommissioned now + ContainerReplica copyOfUnhealthyOnNewNode = unhealthy.toBuilder() + .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) + .build(); + replicas.add(copyOfUnhealthyOnNewNode); + Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + .thenReturn(new RatisContainerReplicaCount(container, replicas, + Collections.emptyList(), 2, false)); + DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false); + monitor.run(); + assertEquals(0, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED, + nodeManager.getNodeStatus(dn1).getOperationalState()); + } + /** * Consider a QUASI_CLOSED container with only UNHEALTHY replicas. If one * of its nodes is decommissioned, the decommissioning should succeed. From cd0c55e4ffa879b3e7cffe182237f0e1c824a7c0 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 20 Dec 2023 08:47:59 +0100 Subject: [PATCH 14/28] HDDS-9828. Do not use Files.createTempFile in tests (#5824) --- .../keyvalue/helpers/TestChunkUtils.java | 188 ++++++++---------- .../security/symmetric/TestLocalKeyStore.java | 18 +- .../hdds/utils/TestNativeLibraryLoader.java | 12 +- .../managed/TestManagedSSTDumpIterator.java | 11 +- .../util/TestManagedSstFileReader.java | 15 +- .../ha/TestInterSCMGrpcProtocolService.java | 8 +- .../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 29 +-- .../hdds/scm/TestSCMDbCheckpointServlet.java | 120 +++++------ .../ozone/om/TestOMDbCheckpointServlet.java | 35 ++-- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 8 +- .../ratis/TestOzoneManagerRatisRequest.java | 5 +- .../om/service/TestRangerBGSyncService.java | 3 +- .../om/snapshot/TestSnapshotDiffManager.java | 11 +- 13 files changed, 234 insertions(+), 229 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java index 037de863c00..bda8b7d5a9a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java @@ -28,7 +28,6 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; -import java.util.LinkedList; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; @@ -55,22 +54,25 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tests for {@link ChunkUtils}. */ -public class TestChunkUtils { +class TestChunkUtils { private static final Logger LOG = LoggerFactory.getLogger(TestChunkUtils.class); - private static final String PREFIX = TestChunkUtils.class.getSimpleName(); private static final int BUFFER_CAPACITY = 1 << 20; private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10; private static final Random RANDOM = new Random(); + @TempDir + private Path tempDir; + static ChunkBuffer readData(File file, long off, long len) throws StorageContainerException { LOG.info("off={}, len={}", off, len); @@ -79,130 +81,112 @@ static ChunkBuffer readData(File file, long off, long len) } @Test - public void concurrentReadOfSameFile() throws Exception { + void concurrentReadOfSameFile() throws Exception { String s = "Hello World"; byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); - Path tempFile = Files.createTempFile(PREFIX, "concurrent"); - try { - int len = data.limit(); - int offset = 0; - File file = tempFile.toFile(); - ChunkUtils.writeData(file, data, offset, len, null, true); - int threads = 10; - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - AtomicBoolean failed = new AtomicBoolean(); - for (int i = 0; i < threads; i++) { - final int threadNumber = i; - executor.execute(() -> { - try { - final ChunkBuffer chunk = readData(file, offset, len); - // There should be only one element in readBuffers - final List buffers = chunk.asByteBufferList(); - Assertions.assertEquals(1, buffers.size()); - final ByteBuffer readBuffer = buffers.get(0); - - LOG.info("Read data ({}): {}", threadNumber, - new String(readBuffer.array(), UTF_8)); - if (!Arrays.equals(array, readBuffer.array())) { - failed.set(true); - } - assertEquals(len, readBuffer.remaining()); - } catch (Exception e) { - LOG.error("Failed to read data ({})", threadNumber, e); + Path tempFile = tempDir.resolve("concurrent"); + int len = data.limit(); + int offset = 0; + File file = tempFile.toFile(); + ChunkUtils.writeData(file, data, offset, len, null, true); + int threads = 10; + ExecutorService executor = new ThreadPoolExecutor(threads, threads, + 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); + AtomicInteger processed = new AtomicInteger(); + AtomicBoolean failed = new AtomicBoolean(); + for (int i = 0; i < threads; i++) { + final int threadNumber = i; + executor.execute(() -> { + try { + final ChunkBuffer chunk = readData(file, offset, len); + // There should be only one element in readBuffers + final List buffers = chunk.asByteBufferList(); + Assertions.assertEquals(1, buffers.size()); + final ByteBuffer readBuffer = buffers.get(0); + + LOG.info("Read data ({}): {}", threadNumber, + new String(readBuffer.array(), UTF_8)); + if (!Arrays.equals(array, readBuffer.array())) { failed.set(true); } - processed.incrementAndGet(); - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, (int) TimeUnit.SECONDS.toMillis(5)); - } finally { - executor.shutdownNow(); - } - assertFalse(failed.get()); + assertEquals(len, readBuffer.remaining()); + } catch (Exception e) { + LOG.error("Failed to read data ({})", threadNumber, e); + failed.set(true); + } + processed.incrementAndGet(); + }); + } + try { + GenericTestUtils.waitFor(() -> processed.get() == threads, + 100, (int) TimeUnit.SECONDS.toMillis(5)); } finally { - Files.deleteIfExists(tempFile); + executor.shutdownNow(); } + assertFalse(failed.get()); } @Test - public void concurrentProcessing() throws Exception { + void concurrentProcessing() throws Exception { final int perThreadWait = 1000; final int maxTotalWait = 5000; int threads = 20; - List paths = new LinkedList<>(); + ExecutorService executor = new ThreadPoolExecutor(threads, threads, + 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); + AtomicInteger processed = new AtomicInteger(); + for (int i = 0; i < threads; i++) { + Path path = tempDir.resolve(String.valueOf(i)); + executor.execute(() -> { + try { + ChunkUtils.processFileExclusively(path, () -> { + try { + Thread.sleep(perThreadWait); + } catch (InterruptedException e) { + e.printStackTrace(); + } + processed.incrementAndGet(); + return null; + }); + } catch (InterruptedException e) { + e.printStackTrace(); + } + }); + } try { - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - for (int i = 0; i < threads; i++) { - Path path = Files.createTempFile(PREFIX, String.valueOf(i)); - paths.add(path); - executor.execute(() -> { - try { - ChunkUtils.processFileExclusively(path, () -> { - try { - Thread.sleep(perThreadWait); - } catch (InterruptedException e) { - e.printStackTrace(); - } - processed.incrementAndGet(); - return null; - }); - } catch (InterruptedException e) { - e.printStackTrace(); - } - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, maxTotalWait); - } finally { - executor.shutdownNow(); - } + GenericTestUtils.waitFor(() -> processed.get() == threads, + 100, maxTotalWait); } finally { - for (Path path : paths) { - FileUtils.deleteQuietly(path.toFile()); - } + executor.shutdownNow(); } } @Test - public void serialRead() throws Exception { + void serialRead() throws IOException { String s = "Hello World"; byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); - Path tempFile = Files.createTempFile(PREFIX, "serial"); - try { - File file = tempFile.toFile(); - int len = data.limit(); - int offset = 0; - ChunkUtils.writeData(file, data, offset, len, null, true); - - final ChunkBuffer chunk = readData(file, offset, len); - // There should be only one element in readBuffers - final List buffers = chunk.asByteBufferList(); - Assertions.assertEquals(1, buffers.size()); - final ByteBuffer readBuffer = buffers.get(0); - - assertArrayEquals(array, readBuffer.array()); - assertEquals(len, readBuffer.remaining()); - } catch (Exception e) { - LOG.error("Failed to read data", e); - } finally { - Files.deleteIfExists(tempFile); - } + Path tempFile = tempDir.resolve("serial"); + File file = tempFile.toFile(); + int len = data.limit(); + int offset = 0; + ChunkUtils.writeData(file, data, offset, len, null, true); + + final ChunkBuffer chunk = readData(file, offset, len); + // There should be only one element in readBuffers + final List buffers = chunk.asByteBufferList(); + Assertions.assertEquals(1, buffers.size()); + final ByteBuffer readBuffer = buffers.get(0); + + assertArrayEquals(array, readBuffer.array()); + assertEquals(len, readBuffer.remaining()); } @Test - public void validateChunkForOverwrite() throws IOException { + void validateChunkForOverwrite() throws IOException { - Path tempFile = Files.createTempFile(PREFIX, "overwrite"); + Path tempFile = tempDir.resolve("overwrite"); FileUtils.write(tempFile.toFile(), "test", UTF_8); Assertions.assertTrue( @@ -226,7 +210,7 @@ public void validateChunkForOverwrite() throws IOException { } @Test - public void readMissingFile() { + void readMissingFile() { // given int len = 123; int offset = 0; @@ -242,7 +226,7 @@ public void readMissingFile() { } @Test - public void testReadData() throws Exception { + void testReadData() throws Exception { final File dir = GenericTestUtils.getTestDir("testReadData"); try { Assertions.assertTrue(dir.mkdirs()); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java index b5c717399d0..393a0c5f011 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -53,17 +54,20 @@ /** * Test cases for {@link LocalSecretKeyStore}. */ -public class TestLocalKeyStore { +class TestLocalKeyStore { private SecretKeyStore secretKeyStore; private Path testSecretFile; + @TempDir + private Path tempDir; + @BeforeEach - private void setup() throws Exception { - testSecretFile = Files.createTempFile("key-strore-test", ".json"); + void setup() throws IOException { + testSecretFile = Files.createFile(tempDir.resolve("key-store-test.json")); secretKeyStore = new LocalSecretKeyStore(testSecretFile); } - public static Stream saveAndLoadTestCases() throws Exception { + static Stream saveAndLoadTestCases() throws Exception { return Stream.of( // empty Arguments.of(ImmutableList.of()), @@ -81,7 +85,7 @@ public static Stream saveAndLoadTestCases() throws Exception { @ParameterizedTest @MethodSource("saveAndLoadTestCases") - public void testSaveAndLoad(List keys) throws IOException { + void testSaveAndLoad(List keys) throws IOException { secretKeyStore.save(keys); // Ensure the intended file exists and is readable and writeable to @@ -100,7 +104,7 @@ public void testSaveAndLoad(List keys) throws IOException { * Verifies that secret keys are overwritten by subsequent writes. */ @Test - public void testOverwrite() throws Exception { + void testOverwrite() throws Exception { List initialKeys = newArrayList(generateKey("HmacSHA256")); secretKeyStore.save(initialKeys); @@ -123,7 +127,7 @@ public void testOverwrite() throws Exception { * test fails, instead, analyse the backward-compatibility of the change. */ @Test - public void testLoadExistingFile() throws Exception { + void testLoadExistingFile() throws Exception { // copy test file content to the backing file. String testJson = "[\n" + " {\n" + diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 472954f2bd5..24218c5687e 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -20,6 +20,7 @@ import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.MockedStatic; @@ -27,7 +28,7 @@ import java.io.ByteArrayInputStream; import java.io.File; -import java.io.IOException; +import java.nio.file.Path; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -42,10 +43,11 @@ */ public class TestNativeLibraryLoader { - private static Stream nativeLibraryDirectoryLocations() - throws IOException { - return Stream.of("", File.createTempFile("prefix", "suffix") - .getParentFile().getAbsolutePath(), null); + @TempDir + private static Path tempDir; + + private static Stream nativeLibraryDirectoryLocations() { + return Stream.of("", tempDir.toAbsolutePath().toString(), null); } @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java index 99d2a6ced59..505d68d9413 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java @@ -28,6 +28,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -40,6 +41,8 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.Optional; @@ -61,10 +64,12 @@ */ class TestManagedSSTDumpIterator { + @TempDir + private Path tempDir; + private File createSSTFileWithKeys( TreeMap, String> keys) throws Exception { - File file = File.createTempFile("tmp_sst_file", ".sst"); - file.deleteOnExit(); + File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); ManagedOptions managedOptions = new ManagedOptions(); ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( @@ -252,7 +257,7 @@ public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes) ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(inputBytes); ManagedSSTDumpTool tool = Mockito.mock(ManagedSSTDumpTool.class); - File file = File.createTempFile("tmp", ".sst"); + File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile(); Future future = Mockito.mock(Future.class); Mockito.when(future.isDone()).thenReturn(false); Mockito.when(future.get()).thenReturn(0); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java index 8c897b01d2e..588e54ad8b3 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java @@ -31,6 +31,7 @@ import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.rocksdb.RocksDBException; @@ -47,6 +48,7 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -58,6 +60,11 @@ */ class TestManagedSstFileReader { + @TempDir + private File tempDir; + + private final AtomicInteger fileCounter = new AtomicInteger(); + // Key prefix containing all characters, to check if all characters can be // written & read from rocksdb through SSTDumptool private static final String KEY_PREFIX = IntStream.range(0, 256).boxed() @@ -65,9 +72,8 @@ class TestManagedSstFileReader { .collect(Collectors.joining("")); private String createRandomSSTFile(TreeMap keys) - throws IOException, RocksDBException { - File file = File.createTempFile("tmp_sst_file", ".sst"); - file.deleteOnExit(); + throws RocksDBException { + File file = new File(tempDir, "tmp_sst_file" + fileCounter.incrementAndGet() + ".sst"); try (ManagedOptions managedOptions = new ManagedOptions(); ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions(); @@ -84,6 +90,7 @@ private String createRandomSSTFile(TreeMap keys) } sstFileWriter.finish(); } + Assertions.assertTrue(file.exists()); return file.getAbsolutePath(); } @@ -142,7 +149,7 @@ public void testGetKeyStream(int numberOfFiles) new ManagedSstFileReader(files).getKeyStream( lowerBound.orElse(null), upperBound.orElse(null))) { keyStream.forEach(key -> { - Assertions.assertEquals(keysInBoundary.get(key), 1); + Assertions.assertEquals(1, keysInBoundary.get(key)); Assertions.assertNotNull(keysInBoundary.remove(key)); }); keysInBoundary.values() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java index f966f1b65ba..95b6abc04ac 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java @@ -72,7 +72,7 @@ * * @see HDDS-8901 */ -public class TestInterSCMGrpcProtocolService { +class TestInterSCMGrpcProtocolService { private static final String CP_FILE_NAME = "cpFile"; private static final String CP_CONTENTS = "Hello world!"; @@ -89,7 +89,7 @@ public class TestInterSCMGrpcProtocolService { private Path temp; @Test - public void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception { + void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception { int port = new Random().nextInt(1000) + 45000; OzoneConfiguration conf = setupConfiguration(port); SCMCertificateClient @@ -100,7 +100,7 @@ public void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception { InterSCMGrpcClient client = new InterSCMGrpcClient("localhost", port, conf, scmCertClient); - Path tempFile = Files.createTempFile(temp, CP_FILE_NAME, ""); + Path tempFile = temp.resolve(CP_FILE_NAME); CompletableFuture res = client.download(tempFile); Path downloaded = res.get(); @@ -182,7 +182,7 @@ private DBStore dbStore() throws IOException { } private DBCheckpoint checkPoint() throws IOException { - Path checkPointLocation = Files.createTempDirectory(temp, "cpDir"); + Path checkPointLocation = Files.createDirectory(temp.resolve("cpDir")); Path cpFile = Paths.get(checkPointLocation.toString(), CP_FILE_NAME); Files.write(cpFile, CP_CONTENTS.getBytes(UTF_8)); DBCheckpoint checkpoint = mock(DBCheckpoint.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index 8b1b2adfdf3..90b5daabada 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -43,6 +43,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -60,14 +61,14 @@ * Setting a timeout for every test method to 300 seconds. */ @Timeout(value = 300) -public class TestOzoneFsSnapshot { +class TestOzoneFsSnapshot { private static MiniOzoneCluster cluster; private static final String OM_SERVICE_ID = "om-service-test1"; private static OzoneManager ozoneManager; private static OzoneFsShell shell; private static final String VOLUME = - "vol-" + RandomStringUtils.randomNumeric(5);; + "vol-" + RandomStringUtils.randomNumeric(5); private static final String BUCKET = "buck-" + RandomStringUtils.randomNumeric(5); private static final String KEY = @@ -80,7 +81,7 @@ public class TestOzoneFsSnapshot { BUCKET_PATH + OM_KEY_PREFIX + KEY; @BeforeAll - public static void initClass() throws Exception { + static void initClass() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); @@ -106,7 +107,7 @@ public static void initClass() throws Exception { } @AfterAll - public static void shutdown() throws IOException { + static void shutdown() throws IOException { shell.close(); if (cluster != null) { cluster.shutdown(); @@ -129,7 +130,7 @@ private static void createVolBuckKey() } @Test - public void testCreateSnapshotDuplicateName() throws Exception { + void testCreateSnapshotDuplicateName() throws Exception { String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); int res = ToolRunner.run(shell, @@ -144,7 +145,7 @@ public void testCreateSnapshotDuplicateName() throws Exception { } @Test - public void testCreateSnapshotWithSubDirInput() throws Exception { + void testCreateSnapshotWithSubDirInput() throws Exception { // Test that: // $ ozone fs -createSnapshot ofs://om/vol1/buck2/dir3/ snap1 // @@ -185,7 +186,7 @@ public void testCreateSnapshotWithSubDirInput() throws Exception { @ValueSource(strings = {"snap-1", "snap75795657617173401188448010125899089001363595171500499231286", "sn1"}) - public void testCreateSnapshotSuccess(String snapshotName) + void testCreateSnapshotSuccess(String snapshotName) throws Exception { int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); @@ -241,7 +242,7 @@ private static Stream createSnapshotFailureScenarios() { @ParameterizedTest(name = "{0}") @MethodSource("createSnapshotFailureScenarios") - public void testCreateSnapshotFailure(String description, + void testCreateSnapshotFailure(String description, String paramBucketPath, String snapshotName, String expectedMessage, @@ -258,12 +259,12 @@ public void testCreateSnapshotFailure(String description, * Test list snapshot and snapshot keys with "ozone fs -ls". */ @Test - public void testFsLsSnapshot() throws Exception { + void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String newKey = "key-" + RandomStringUtils.randomNumeric(5); String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + newKey; // Write a non-zero byte key. - Path tempFile = Files.createTempFile("testFsLsSnapshot-", "any-suffix"); + Path tempFile = tempDir.resolve("testFsLsSnapshot-any-suffix"); FileUtils.write(tempFile.toFile(), "random data", UTF_8); execShellCommandAndGetOutput(0, new String[]{"-put", tempFile.toString(), newKeyPath}); @@ -294,7 +295,7 @@ public void testFsLsSnapshot() throws Exception { } @Test - public void testDeleteBucketWithSnapshot() throws Exception { + void testDeleteBucketWithSnapshot() throws Exception { String snapshotName = createSnapshot(); String snapshotPath = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH @@ -326,7 +327,7 @@ public void testDeleteBucketWithSnapshot() throws Exception { } @Test - public void testSnapshotDeleteSuccess() throws Exception { + void testSnapshotDeleteSuccess() throws Exception { String snapshotName = createSnapshot(); // Delete the created snapshot int res = ToolRunner.run(shell, @@ -372,7 +373,7 @@ private static Stream deleteSnapshotFailureScenarios() { @ParameterizedTest(name = "{0}") @MethodSource("deleteSnapshotFailureScenarios") - public void testSnapshotDeleteFailure(String description, + void testSnapshotDeleteFailure(String description, String paramBucketPath, String snapshotName, String expectedMessage, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java index b180b224755..a8a8fba852b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java @@ -25,11 +25,12 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -44,7 +45,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.commons.io.FileUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.MULTIPART_FORM_DATA_BOUNDARY; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; @@ -54,6 +54,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -154,67 +155,66 @@ public void shutdown() { @ParameterizedTest @MethodSource("getHttpMethods") - public void testEndpoint(String httpMethod) + void testEndpoint(String httpMethod, @TempDir Path tempDir) throws ServletException, IOException, InterruptedException { this.method = httpMethod; - File tempFile = null; - try { - List toExcludeList = new ArrayList<>(); - toExcludeList.add("sstFile1.sst"); - toExcludeList.add("sstFile2.sst"); - - setupHttpMethod(toExcludeList); - - doNothing().when(responseMock).setContentType("application/x-tgz"); - doNothing().when(responseMock).setHeader(Mockito.anyString(), - Mockito.anyString()); - - tempFile = File.createTempFile("testEndpoint_" + System - .currentTimeMillis(), ".tar"); - - FileOutputStream fileOutputStream = new FileOutputStream(tempFile); - when(responseMock.getOutputStream()).thenReturn( - new ServletOutputStream() { - @Override - public boolean isReady() { - return true; - } - - @Override - public void setWriteListener(WriteListener writeListener) { - } - - @Override - public void write(int b) throws IOException { - fileOutputStream.write(b); - } - }); - - when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn( - new DBCheckpointServlet.Lock()); - scmDbCheckpointServletMock.init(); - long initialCheckpointCount = - scmMetrics.getDBCheckpointMetrics().getNumCheckpoints(); - - doEndpoint(); - - Assertions.assertTrue(tempFile.length() > 0); - Assertions.assertTrue( - scmMetrics.getDBCheckpointMetrics(). - getLastCheckpointCreationTimeTaken() > 0); - Assertions.assertTrue( - scmMetrics.getDBCheckpointMetrics(). - getLastCheckpointStreamingTimeTaken() > 0); - Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics(). - getNumCheckpoints() > initialCheckpointCount); - - Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(), - any(), any(), eq(toExcludeList), any(), any()); - } finally { - FileUtils.deleteQuietly(tempFile); - } + List toExcludeList = new ArrayList<>(); + toExcludeList.add("sstFile1.sst"); + toExcludeList.add("sstFile2.sst"); + + setupHttpMethod(toExcludeList); + + doNothing().when(responseMock).setContentType("application/x-tgz"); + doNothing().when(responseMock).setHeader(Mockito.anyString(), + Mockito.anyString()); + + final Path outputPath = tempDir.resolve("testEndpoint.tar"); + when(responseMock.getOutputStream()).thenReturn( + new ServletOutputStream() { + private final OutputStream fileOutputStream = Files.newOutputStream(outputPath); + + @Override + public boolean isReady() { + return true; + } + @Override + public void setWriteListener(WriteListener writeListener) { + } + + @Override + public void close() throws IOException { + fileOutputStream.close(); + super.close(); + } + + @Override + public void write(int b) throws IOException { + fileOutputStream.write(b); + } + }); + + when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn( + new DBCheckpointServlet.Lock()); + scmDbCheckpointServletMock.init(); + long initialCheckpointCount = + scmMetrics.getDBCheckpointMetrics().getNumCheckpoints(); + + doEndpoint(); + + Assertions.assertTrue(outputPath.toFile().length() > 0); + Assertions.assertTrue( + scmMetrics.getDBCheckpointMetrics(). + getLastCheckpointCreationTimeTaken() > 0); + Assertions.assertTrue( + scmMetrics.getDBCheckpointMetrics(). + getLastCheckpointStreamingTimeTaken() > 0); + Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics(). + getNumCheckpoints() > initialCheckpointCount); + + Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(), + any(), any(), eq(toExcludeList), any(), any()); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index d4f1f777877..a835944eefe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -29,6 +29,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.io.OutputStreamWriter; import java.nio.charset.StandardCharsets; import java.nio.file.Files; @@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.commons.io.FileUtils; import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdds.utils.HddsServerUtil.OZONE_RATIS_SNAPSHOT_COMPLETE_FLAG_NAME; @@ -140,9 +140,10 @@ public class TestOMDbCheckpointServlet { private Path compactionDirPath; private DBCheckpoint dbCheckpoint; private String method; - private File folder; + @TempDir + private Path folder; private static final String FABRICATED_FILE_NAME = "fabricatedFile.sst"; - private FileOutputStream fileOutputStream; + /** * Create a MiniDFSCluster for testing. *

@@ -151,16 +152,15 @@ public class TestOMDbCheckpointServlet { * @throws Exception */ @BeforeEach - public void init(@TempDir File tempDir) throws Exception { - folder = tempDir; + void init() throws Exception { conf = new OzoneConfiguration(); - tempFile = File.createTempFile("temp_" + System - .currentTimeMillis(), ".tar"); - - fileOutputStream = new FileOutputStream(tempFile); + final Path tempPath = folder.resolve("temp.tar"); + tempFile = tempPath.toFile(); servletOutputStream = new ServletOutputStream() { + private final OutputStream fileOutputStream = Files.newOutputStream(tempPath); + @Override public boolean isReady() { return true; @@ -170,6 +170,12 @@ public boolean isReady() { public void setWriteListener(WriteListener writeListener) { } + @Override + public void close() throws IOException { + fileOutputStream.close(); + super.close(); + } + @Override public void write(int b) throws IOException { fileOutputStream.write(b); @@ -185,7 +191,6 @@ public void shutdown() throws InterruptedException { if (cluster != null) { cluster.shutdown(); } - FileUtils.deleteQuietly(tempFile); } private void setupCluster() throws Exception { @@ -458,7 +463,7 @@ public void testWriteDbDataToStream() throws Exception { dbCheckpoint = realCheckpoint.get(); // Untar the file into a temp folder to be examined. - String testDirName = folder.getAbsolutePath(); + String testDirName = folder.resolve("testDir").toString(); int testDirLength = testDirName.length() + 1; String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME; int newDbDirLength = newDbDirName.length() + 1; @@ -556,14 +561,14 @@ public void testWriteDbDataWithoutOmSnapshot() .thenReturn(null); // Get the tarball. - Path tmpdir = Files.createTempDirectory("bootstrapData"); + Path tmpdir = folder.resolve("bootstrapData"); try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) { omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock, fileOutputStream, new ArrayList<>(), new ArrayList<>(), tmpdir); } // Untar the file into a temp folder to be examined. - String testDirName = folder.getAbsolutePath(); + String testDirName = folder.resolve("testDir").toString(); int testDirLength = testDirName.length() + 1; FileUtil.unTar(tempFile, new File(testDirName)); @@ -603,14 +608,14 @@ public void testWriteDbDataWithToExcludeFileList() .thenReturn(null); // Get the tarball. - Path tmpdir = Files.createTempDirectory("bootstrapData"); + Path tmpdir = folder.resolve("bootstrapData"); try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) { omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock, fileOutputStream, toExcludeList, excludedList, tmpdir); } // Untar the file into a temp folder to be examined. - String testDirName = folder.getAbsolutePath(); + String testDirName = folder.resolve("testDir").toString(); int testDirLength = testDirName.length() + 1; FileUtil.unTar(tempFile, new File(testDirName)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index cd932f6efde..093f1107b5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -202,7 +202,7 @@ public void shutdown() { @ValueSource(ints = {100}) // tried up to 1000 snapshots and this test works, but some of the // timeouts have to be increased. - public void testInstallSnapshot(int numSnapshotsToCreate) throws Exception { + void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws Exception { // Get the leader OM String leaderOMNodeId = OmFailoverProxyUtil .getFailoverProxyProvider(objectStore.getClientProxy()) @@ -221,7 +221,7 @@ public void testInstallSnapshot(int numSnapshotsToCreate) throws Exception { FaultInjector faultInjector = new SnapshotMaxSizeInjector(leaderOM, followerOM.getOmSnapshotProvider().getSnapshotDir(), - sstSetList); + sstSetList, tempDir); followerOM.getOmSnapshotProvider().setInjector(faultInjector); // Create some snapshots, each with new keys @@ -1186,11 +1186,11 @@ private static class SnapshotMaxSizeInjector extends FaultInjector { private final List> sstSetList; private final Path tempDir; SnapshotMaxSizeInjector(OzoneManager om, File snapshotDir, - List> sstSetList) throws IOException { + List> sstSetList, Path tempDir) { this.om = om; this.snapshotDir = snapshotDir; this.sstSetList = sstSetList; - this.tempDir = Files.createTempDirectory("tmpDirPrefix"); + this.tempDir = tempDir; init(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java index e5d9605711a..d25cdf298ed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java @@ -39,7 +39,6 @@ import org.mockito.Mockito; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -65,7 +64,7 @@ public class TestOzoneManagerRatisRequest { public void testRequestWithNonExistentBucket() throws Exception { ozoneManager = Mockito.mock(OzoneManager.class); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - Files.createTempDirectory(folder, "om").toString()); + folder.resolve("om").toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); @@ -106,7 +105,7 @@ public void testUnknownRequestHandling() ozoneManager = Mockito.mock(OzoneManager.class); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - Files.createTempDirectory(folder, "om").toString()); + folder.resolve("om").toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index 3b70d8af1a5..08358054fcc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -59,7 +59,6 @@ import org.slf4j.event.Level; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; @@ -188,7 +187,7 @@ public void setUp() throws IOException { omMetrics = OMMetrics.create(); conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, - Files.createTempDirectory(folder.toAbsolutePath(), "om").toString()); + folder.resolve("om").toAbsolutePath().toString()); // No need to conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, ...) here // as we did the trick earlier with mockito. omMetadataManager = new OmMetadataManagerImpl(conf, ozoneManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 5229ea46fbc..28af68e2539 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -101,7 +101,6 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; -import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -207,6 +206,8 @@ public class TestSnapshotDiffManager { private final OMMetrics omMetrics = OMMetrics.create(); @TempDir private File dbDir; + @TempDir + private File snapDiffDir; @Mock private RocksDBCheckpointDiffer differ; @Mock @@ -431,7 +432,7 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); - String diffDir = Files.createTempDirectory("snapdiff_dir").toString(); + String diffDir = snapDiffDir.getAbsolutePath(); Set randomStrings = IntStream.range(0, numberOfFiles) .mapToObj(i -> RandomStringUtils.randomAlphabetic(10)) .collect(Collectors.toSet()); @@ -526,8 +527,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, toSnapshotInfo, false, Collections.emptyMap(), - Files.createTempDirectory("snapdiff_dir").toAbsolutePath() - .toString()); + snapDiffDir.getAbsolutePath()); assertEquals(deltaStrings, deltaFiles); } } @@ -591,8 +591,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) toSnapshotInfo, false, Collections.emptyMap(), - Files.createTempDirectory("snapdiff_dir").toAbsolutePath() - .toString()); + snapDiffDir.getAbsolutePath()); assertEquals(deltaStrings, deltaFiles); rcFromSnapshot.close(); From be2e19948b6c8fb3f3b9a0c69ee6dfd88967f840 Mon Sep 17 00:00:00 2001 From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com> Date: Wed, 20 Dec 2023 19:03:36 +0800 Subject: [PATCH 15/28] HDDS-9942. Move BufferAllocator to test (#5836) --- .../java/org/apache/ozone/erasurecode/BufferAllocator.java | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename hadoop-hdds/erasurecode/src/{main => test}/java/org/apache/ozone/erasurecode/BufferAllocator.java (100%) diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/BufferAllocator.java similarity index 100% rename from hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java rename to hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/BufferAllocator.java From aa2aa742762d70b45e085b72b96b75cedb8f4f64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Dec 2023 13:38:00 +0100 Subject: [PATCH 16/28] HDDS-9969. Bump maven-compiler-plugin to 3.9.0 (#5774) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 638da75d439..359f86451b4 100644 --- a/pom.xml +++ b/pom.xml @@ -261,7 +261,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${maven-surefire-plugin.version} 3.3.2 - 3.1 + 3.9.0 3.1.1 3.1.0 3.5.1 From 477c8decfd4ca5c2fce69d9a4e89d00a4f0e472c Mon Sep 17 00:00:00 2001 From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com> Date: Wed, 20 Dec 2023 22:12:35 +0800 Subject: [PATCH 17/28] HDDS-9948. Compose annotation for tests parameterized with ContainerLayoutVersion (#5839) --- .../common/impl/TestContainerDataYaml.java | 34 +--- .../TestContainerDeletionChoosingPolicy.java | 36 ++-- .../common/impl/TestContainerSet.java | 28 +-- .../common/impl/TestHddsDispatcher.java | 12 +- .../TestCloseContainerCommandHandler.java | 33 +--- .../keyvalue/ContainerLayoutTestInfo.java | 20 +- .../TestKeyValueContainerMarkUnhealthy.java | 25 +-- .../keyvalue/TestKeyValueHandler.java | 37 ++-- .../TestReplicationSupervisor.java | 178 ++++++++---------- .../client/rpc/read/TestChunkInputStream.java | 12 +- .../client/rpc/read/TestKeyInputStream.java | 8 +- 11 files changed, 163 insertions(+), 260 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index 93f2a11a59e..4bd2ece41eb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -29,11 +29,8 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import java.io.File; import java.io.IOException; @@ -43,6 +40,7 @@ import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -69,10 +67,6 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) { this.layoutVersion = layoutVersion; } - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - /** * Creates a .container file. cleanup() should be called at the end of the * test when container file is created. @@ -111,8 +105,7 @@ private void cleanup() { FileUtil.fullyDelete(new File(testRoot)); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testCreateContainerFile(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -179,8 +172,7 @@ public void testCreateContainerFile(ContainerLayoutVersion layout) kvData.getDataScanTimestamp().longValue()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testCreateContainerFileWithoutReplicaIndex( ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -191,14 +183,13 @@ public void testCreateContainerFileWithoutReplicaIndex( final String content = FileUtils.readFileToString(containerFile, Charset.defaultCharset()); - Assertions.assertFalse(content.contains("replicaIndex"), + assertFalse(content.contains("replicaIndex"), "ReplicaIndex shouldn't be persisted if zero"); cleanup(); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testIncorrectContainerFile(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -216,8 +207,7 @@ public void testIncorrectContainerFile(ContainerLayoutVersion layout) } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testCheckBackWardCompatibilityOfContainerFile( ContainerLayoutVersion layout) { setLayoutVersion(layout); @@ -258,8 +248,7 @@ public void testCheckBackWardCompatibilityOfContainerFile( /** * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testChecksumInContainerFile(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -278,8 +267,7 @@ public void testChecksumInContainerFile(ContainerLayoutVersion layout) /** * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testChecksumInContainerFileWithReplicaIndex( ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -306,8 +294,7 @@ private KeyValueContainerData getKeyValueContainerData() throws IOException { /** * Test to verify incorrect checksum is detected. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testIncorrectChecksum(ContainerLayoutVersion layout) { setLayoutVersion(layout); try { @@ -323,8 +310,7 @@ public void testIncorrectChecksum(ContainerLayoutVersion layout) { /** * Test to verify disabled checksum with incorrect checksum. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testDisabledChecksum(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index b4eb7064538..972e0efa6ca 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.ozone.container.common.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; import java.util.HashMap; @@ -41,10 +44,7 @@ import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; /** @@ -66,10 +66,6 @@ public void setLayoutVersion(ContainerLayoutVersion layout) { this.layoutVersion = layout; } - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - @BeforeEach public void init() throws Throwable { conf = new OzoneConfiguration(); @@ -77,8 +73,7 @@ public void init() throws Throwable { .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testRandomChoosingPolicy(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -86,7 +81,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) if (containerDir.exists()) { FileUtils.deleteDirectory(new File(path)); } - Assertions.assertTrue(containerDir.mkdirs()); + assertTrue(containerDir.mkdirs()); conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -105,7 +100,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) data.closeContainer(); KeyValueContainer container = new KeyValueContainer(data, conf); containerSet.addContainer(container); - Assertions.assertTrue( + assertTrue( containerSet.getContainerMapCopy() .containsKey(data.getContainerID())); } @@ -121,7 +116,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) for (ContainerBlockInfo pr : result0) { totPendingBlocks += pr.getNumBlocksToDelete(); } - Assertions.assertTrue(totPendingBlocks >= blockLimitPerInterval); + assertTrue(totPendingBlocks >= blockLimitPerInterval); // test random choosing. We choose 100 times the 3 datanodes twice. //We expect different order at least once. @@ -138,12 +133,11 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) } } } - Assertions.fail("Chosen container results were same 100 times"); + fail("Chosen container results were same 100 times"); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -151,7 +145,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) if (containerDir.exists()) { FileUtils.deleteDirectory(new File(path)); } - Assertions.assertTrue(containerDir.mkdirs()); + assertTrue(containerDir.mkdirs()); conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -182,7 +176,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) KeyValueContainer container = new KeyValueContainer(data, conf); data.closeContainer(); containerSet.addContainer(container); - Assertions.assertTrue( + assertTrue( containerSet.getContainerMapCopy().containsKey(containerId)); } numberOfBlocks.sort(Collections.reverseOrder()); @@ -196,7 +190,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) for (ContainerBlockInfo pr : result0) { totPendingBlocks += pr.getNumBlocksToDelete(); } - Assertions.assertTrue(totPendingBlocks >= blockLimitPerInterval); + assertTrue(totPendingBlocks >= blockLimitPerInterval); List result1 = blockDeletingService @@ -211,7 +205,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) break; } } - Assertions.assertEquals(containerCount, result1.size()); + assertEquals(containerCount, result1.size()); // verify the order of return list int initialName2CountSize = name2Count.size(); @@ -220,11 +214,11 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) int currentCount = name2Count.remove(data.getContainerData().getContainerID()); // previous count should not smaller than next one - Assertions.assertTrue(currentCount > 0 && currentCount <= lastCount); + assertTrue(currentCount > 0 && currentCount <= lastCount); lastCount = currentCount; } // ensure all the container data are compared - Assertions.assertEquals(result1.size(), + assertEquals(result1.size(), initialName2CountSize - name2Count.size()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index e075cd66ae3..13b8fb6d30a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -30,8 +30,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import java.io.IOException; @@ -65,12 +63,7 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) { this.layoutVersion = layoutVersion; } - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testAddGetRemoveContainer(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); @@ -112,8 +105,7 @@ public void testAddGetRemoveContainer(ContainerLayoutVersion layout) assertFalse(containerSet.removeContainer(1000L)); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testIteratorsAndCount(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); @@ -159,8 +151,7 @@ public void testIteratorsAndCount(ContainerLayoutVersion layout) } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testIteratorPerVolume(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); @@ -205,8 +196,7 @@ public void testIteratorPerVolume(ContainerLayoutVersion layout) assertEquals(5, count2); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void iteratorIsOrderedByScanTime(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); @@ -259,8 +249,7 @@ public void iteratorIsOrderedByScanTime(ContainerLayoutVersion layout) assertEquals(containerCount, containersToBeScanned); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testGetContainerReport(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); @@ -273,9 +262,7 @@ public void testGetContainerReport(ContainerLayoutVersion layout) assertEquals(10, containerReportsRequestProto.getReportsList().size()); } - - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testListContainer(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); @@ -289,8 +276,7 @@ public void testListContainer(ContainerLayoutVersion layout) assertContainerIds(startId, count, result); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testListContainerFromFirstKey(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 3215271245e..b547282476c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -63,8 +63,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,12 +101,7 @@ public class TestHddsDispatcher { c -> { }; - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenFull( ContainerLayoutVersion layout) throws IOException { @@ -166,8 +159,7 @@ public void testContainerCloseActionWhenFull( } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenVolumeFull( ContainerLayoutVersion layoutVersion) throws Exception { String testDir = GenericTestUtils.getTempPath( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 343341f26a0..79107ce111e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -35,8 +35,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; import java.util.UUID; @@ -77,10 +75,6 @@ public void initLayoutVerison(ContainerLayoutVersion layout) init(); } - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - private void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); context = ContainerTestUtils.getMockContext(randomDatanodeDetails(), conf); @@ -109,8 +103,7 @@ private void init() throws Exception { .thenReturn(false); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithPipeline(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -125,8 +118,7 @@ public void closeContainerWithPipeline(ContainerLayoutVersion layout) .quasiCloseContainer(eq(container), any()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithoutPipeline(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -144,8 +136,7 @@ public void closeContainerWithoutPipeline(ContainerLayoutVersion layout) .quasiCloseContainer(eq(container), any()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithForceFlagSet(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -159,8 +150,7 @@ public void closeContainerWithForceFlagSet(ContainerLayoutVersion layout) verify(containerHandler).closeContainer(container); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void forceCloseQuasiClosedContainer(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -177,8 +167,7 @@ public void forceCloseQuasiClosedContainer(ContainerLayoutVersion layout) .closeContainer(container); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void forceCloseOpenContainer(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -194,8 +183,7 @@ public void forceCloseOpenContainer(ContainerLayoutVersion layout) .closeContainer(container); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void forceCloseOpenContainerWithPipeline(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -213,8 +201,7 @@ public void forceCloseOpenContainerWithPipeline(ContainerLayoutVersion layout) .closeContainer(container); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeAlreadyClosedContainer(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -238,8 +225,7 @@ public void closeAlreadyClosedContainer(ContainerLayoutVersion layout) .submitRequest(any(), any()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeNonExistenceContainer(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); @@ -253,8 +239,7 @@ public void closeNonExistenceContainer(ContainerLayoutVersion layout) } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void closeMissingContainer(ContainerLayoutVersion layout) throws Exception { initLayoutVerison(layout); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java index 6773264830b..ab6e2c857c5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java @@ -24,10 +24,15 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.io.File; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; -import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -116,9 +121,14 @@ private static void assertFileCount(File dir, long count) { assertEquals(count, files.length); } - public static Iterable containerLayoutParameters() { - return ContainerLayoutVersion.getAllVersions().stream() - .map(each -> new Object[] {each}) - .collect(toList()); + /** + * Composite annotation for tests parameterized with {@link ContainerLayoutTestInfo}. + */ + @Target(ElementType.METHOD) + @Retention(RetentionPolicy.RUNTIME) + @ParameterizedTest + @MethodSource("org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion#getAllVersions") + public @interface ContainerTest { + // composite annotation } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java index 380d0dd9a4e..ef8148600ea 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java @@ -28,11 +28,8 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +44,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -78,10 +76,6 @@ private void initTestData(ContainerLayoutVersion layoutVersion) throws Exception setup(); } - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - public void setup() throws Exception { conf = new OzoneConfiguration(); datanodeId = UUID.randomUUID(); @@ -125,8 +119,7 @@ public void teardown() { * * @throws IOException */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testMarkContainerUnhealthy(ContainerLayoutVersion layoutVersion) throws Exception { initTestData(layoutVersion); assertThat(keyValueContainerData.getState(), is(OPEN)); @@ -146,12 +139,11 @@ public void testMarkContainerUnhealthy(ContainerLayoutVersion layoutVersion) thr * * @throws IOException */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testCloseUnhealthyContainer(ContainerLayoutVersion layoutVersion) throws Exception { initTestData(layoutVersion); keyValueContainer.markContainerUnhealthy(); - Assertions.assertThrows(StorageContainerException.class, () -> + assertThrows(StorageContainerException.class, () -> keyValueContainer.markContainerForClose()); } @@ -159,8 +151,7 @@ public void testCloseUnhealthyContainer(ContainerLayoutVersion layoutVersion) th /** * Attempting to mark a closed container as unhealthy should succeed. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testMarkClosedContainerAsUnhealthy(ContainerLayoutVersion layoutVersion) throws Exception { initTestData(layoutVersion); // We need to create the container so the compact-on-close operation @@ -174,8 +165,7 @@ public void testMarkClosedContainerAsUnhealthy(ContainerLayoutVersion layoutVers /** * Attempting to mark a quasi-closed container as unhealthy should succeed. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testMarkQuasiClosedContainerAsUnhealthy(ContainerLayoutVersion layoutVersion) throws Exception { initTestData(layoutVersion); // We need to create the container so the sync-on-quasi-close operation @@ -189,8 +179,7 @@ public void testMarkQuasiClosedContainerAsUnhealthy(ContainerLayoutVersion layou /** * Attempting to mark a closing container as unhealthy should succeed. */ - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testMarkClosingContainerAsUnhealthy(ContainerLayoutVersion layoutVersion) throws Exception { initTestData(layoutVersion); keyValueContainer.markContainerForClose(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 76cc5f25f01..a970013ef8c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -58,17 +58,15 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; - -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; import org.mockito.Mockito; @@ -93,10 +91,6 @@ public class TestKeyValueHandler { private HddsDispatcher dispatcher; private KeyValueHandler handler; - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - @BeforeEach public void setup() throws StorageContainerException { // Create mock HddsDispatcher and KeyValueHandler. @@ -309,9 +303,7 @@ private ContainerCommandRequestProto getDummyCommandRequestProto( .build(); } - - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testCloseInvalidContainer(ContainerLayoutVersion layoutVersion) throws IOException { long containerID = 1234L; @@ -387,17 +379,17 @@ public void testDeleteContainer() throws IOException { createContainerRequest(datanodeId, containerID); kvHandler.handleCreateContainer(createContainer, null); - Assertions.assertEquals(1, icrReceived.get()); - Assertions.assertNotNull(containerSet.getContainer(containerID)); + assertEquals(1, icrReceived.get()); + assertNotNull(containerSet.getContainer(containerID)); kvHandler.deleteContainer(containerSet.getContainer(containerID), true); - Assertions.assertEquals(2, icrReceived.get()); - Assertions.assertNull(containerSet.getContainer(containerID)); + assertEquals(2, icrReceived.get()); + assertNull(containerSet.getContainer(containerID)); File[] deletedContainers = hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(deletedContainers); - Assertions.assertEquals(0, deletedContainers.length); + assertEquals(0, deletedContainers.length); // Case 2 : failed move of container dir to tmp location should trigger // a volume scan @@ -409,9 +401,9 @@ public void testDeleteContainer() throws IOException { kvHandler.handleCreateContainer(createContainer2, null); - Assertions.assertEquals(3, icrReceived.get()); + assertEquals(3, icrReceived.get()); Container container = containerSet.getContainer(container2ID); - Assertions.assertNotNull(container); + assertNotNull(container); File deletedContainerDir = hddsVolume.getDeletedContainerDir(); // to simulate failed move File dummyDir = new File(DUMMY_PATH); @@ -419,8 +411,7 @@ public void testDeleteContainer() throws IOException { try { kvHandler.deleteContainer(container, true); } catch (StorageContainerException sce) { - Assertions.assertTrue( - sce.getMessage().contains("Failed to move container")); + assertTrue(sce.getMessage().contains("Failed to move container")); } Mockito.verify(volumeSet).checkVolumeAsync(hddsVolume); // cleanup @@ -436,7 +427,7 @@ public void testDeleteContainer() throws IOException { String expectedLog = "Delete container issued on containerID 2 which is " + "in a failed volume"; - Assertions.assertTrue(kvHandlerLogs.getOutput().contains(expectedLog)); + assertTrue(kvHandlerLogs.getOutput().contains(expectedLog)); } finally { FileUtils.deleteDirectory(new File(testDir)); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 1a17e4cea73..fb923ab6a30 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -64,10 +64,7 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.TestClock; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import javax.annotation.Nonnull; @@ -80,6 +77,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status.DONE; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.fromSources; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; @@ -112,10 +111,6 @@ public class TestReplicationSupervisor { private TestClock clock; private DatanodeDetails datanode; - private static Iterable layoutVersion() { - return ContainerLayoutTestInfo.containerLayoutParameters(); - } - @BeforeEach public void setUp() throws Exception { clock = new TestClock(Instant.now(), ZoneId.systemDefault()); @@ -136,8 +131,7 @@ public void cleanup() { replicatorRef.set(null); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void normal(ContainerLayoutVersion layout) { this.layoutVersion = layout; // GIVEN @@ -152,24 +146,23 @@ public void normal(ContainerLayoutVersion layout) { supervisor.addTask(createTask(2L)); supervisor.addTask(createTask(5L)); - Assertions.assertEquals(3, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(3, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(0, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(3, set.containerCount()); + assertEquals(3, supervisor.getReplicationRequestCount()); + assertEquals(3, supervisor.getReplicationSuccessCount()); + assertEquals(0, supervisor.getReplicationFailureCount()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(3, set.containerCount()); MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); metrics.getMetrics(metricsCollector, true); - Assertions.assertEquals(1, metricsCollector.getRecords().size()); + assertEquals(1, metricsCollector.getRecords().size()); } finally { metrics.unRegister(); supervisor.stop(); } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void duplicateMessage(ContainerLayoutVersion layout) { this.layoutVersion = layout; // GIVEN @@ -184,20 +177,19 @@ public void duplicateMessage(ContainerLayoutVersion layout) { supervisor.addTask(createTask(6L)); //THEN - Assertions.assertEquals(4, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(1, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(0, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(3, supervisor.getReplicationSkippedCount()); - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(1, set.containerCount()); + assertEquals(4, supervisor.getReplicationRequestCount()); + assertEquals(1, supervisor.getReplicationSuccessCount()); + assertEquals(0, supervisor.getReplicationFailureCount()); + assertEquals(3, supervisor.getReplicationSkippedCount()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(1, set.containerCount()); } finally { supervisor.stop(); } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void failureHandling(ContainerLayoutVersion layout) { this.layoutVersion = layout; // GIVEN @@ -210,20 +202,19 @@ public void failureHandling(ContainerLayoutVersion layout) { supervisor.addTask(task); //THEN - Assertions.assertEquals(1, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(0, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(1, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(0, set.containerCount()); - Assertions.assertEquals(ReplicationTask.Status.FAILED, task.getStatus()); + assertEquals(1, supervisor.getReplicationRequestCount()); + assertEquals(0, supervisor.getReplicationSuccessCount()); + assertEquals(1, supervisor.getReplicationFailureCount()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(0, set.containerCount()); + assertEquals(ReplicationTask.Status.FAILED, task.getStatus()); } finally { supervisor.stop(); } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void stalledDownload() { // GIVEN ReplicationSupervisor supervisor = supervisorWith(__ -> noopReplicator, @@ -238,23 +229,22 @@ public void stalledDownload() { supervisor.addTask(createECTask(5L)); //THEN - Assertions.assertEquals(0, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(0, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(0, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(5, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(3, supervisor.getInFlightReplications( + assertEquals(0, supervisor.getReplicationRequestCount()); + assertEquals(0, supervisor.getReplicationSuccessCount()); + assertEquals(0, supervisor.getReplicationFailureCount()); + assertEquals(5, supervisor.getTotalInFlightReplications()); + assertEquals(3, supervisor.getInFlightReplications( ReplicationTask.class)); - Assertions.assertEquals(2, supervisor.getInFlightReplications( + assertEquals(2, supervisor.getInFlightReplications( ECReconstructionCoordinatorTask.class)); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(0, set.containerCount()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(0, set.containerCount()); } finally { supervisor.stop(); } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void slowDownload() { // GIVEN ReplicationSupervisor supervisor = supervisorWith(__ -> slowReplicator, @@ -268,22 +258,21 @@ public void slowDownload() { supervisor.addTask(createTask(3L)); //THEN - Assertions.assertEquals(3, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(2, supervisor.getQueueSize()); + assertEquals(3, supervisor.getTotalInFlightReplications()); + assertEquals(2, supervisor.getQueueSize()); // Sleep 4s, wait all tasks processed try { Thread.sleep(4000); } catch (InterruptedException e) { } - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); } finally { supervisor.stop(); } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testDownloadAndImportReplicatorFailure() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); @@ -322,14 +311,13 @@ public void testDownloadAndImportReplicatorFailure() throws IOException { .captureLogs(DownloadAndImportReplicator.LOG); supervisor.addTask(createTask(1L)); - Assertions.assertEquals(1, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(0, supervisor.getReplicationSuccessCount()); - Assertions.assertTrue(logCapturer.getOutput() + assertEquals(1, supervisor.getReplicationFailureCount()); + assertEquals(0, supervisor.getReplicationSuccessCount()); + assertTrue(logCapturer.getOutput() .contains("Container 1 replication was unsuccessful.")); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testTaskBeyondDeadline(ContainerLayoutVersion layout) { this.layoutVersion = layout; ReplicationSupervisor supervisor = @@ -352,18 +340,17 @@ public void testTaskBeyondDeadline(ContainerLayoutVersion layout) { supervisor.addTask(task2); supervisor.addTask(task3); - Assertions.assertEquals(3, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(2, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(0, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(1, supervisor.getReplicationTimeoutCount()); - Assertions.assertEquals(2, set.containerCount()); + assertEquals(3, supervisor.getReplicationRequestCount()); + assertEquals(2, supervisor.getReplicationSuccessCount()); + assertEquals(0, supervisor.getReplicationFailureCount()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(1, supervisor.getReplicationTimeoutCount()); + assertEquals(2, set.containerCount()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testDatanodeOutOfService(ContainerLayoutVersion layout) { this.layoutVersion = layout; ReplicationSupervisor supervisor = @@ -379,17 +366,16 @@ public void testDatanodeOutOfService(ContainerLayoutVersion layout) { supervisor.addTask(new ReplicationTask(pushCmd, replicatorRef.get())); supervisor.addTask(new ReplicationTask(pullCmd, replicatorRef.get())); - Assertions.assertEquals(2, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(1, supervisor.getReplicationSuccessCount()); - Assertions.assertEquals(0, supervisor.getReplicationFailureCount()); - Assertions.assertEquals(0, supervisor.getTotalInFlightReplications()); - Assertions.assertEquals(0, supervisor.getQueueSize()); - Assertions.assertEquals(0, supervisor.getReplicationTimeoutCount()); - Assertions.assertEquals(1, set.containerCount()); + assertEquals(2, supervisor.getReplicationRequestCount()); + assertEquals(1, supervisor.getReplicationSuccessCount()); + assertEquals(0, supervisor.getReplicationFailureCount()); + assertEquals(0, supervisor.getTotalInFlightReplications()); + assertEquals(0, supervisor.getQueueSize()); + assertEquals(0, supervisor.getReplicationTimeoutCount()); + assertEquals(1, set.containerCount()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) { this.layoutVersion = layout; final long newTerm = 2; @@ -399,12 +385,11 @@ public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) { context.setTermOfLeaderSCM(newTerm); supervisor.addTask(createTask(1L)); - Assertions.assertEquals(1, supervisor.getReplicationRequestCount()); - Assertions.assertEquals(0, supervisor.getReplicationSuccessCount()); + assertEquals(1, supervisor.getReplicationRequestCount()); + assertEquals(0, supervisor.getReplicationSuccessCount()); } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testPriorityOrdering(ContainerLayoutVersion layout) throws InterruptedException { this.layoutVersion = layout; @@ -458,19 +443,19 @@ public void testPriorityOrdering(ContainerLayoutVersion layout) // Before unblocking the queue, check the queue count for the OrderedTask. // We loaded 3 High / normal priority and 2 low. The counter should not // include the low counts. - Assertions.assertEquals(3, + assertEquals(3, supervisor.getInFlightReplications(OrderedTask.class)); - Assertions.assertEquals(1, + assertEquals(1, supervisor.getInFlightReplications(BlockingTask.class)); // Unblock the queue completeRunning.countDown(); // Wait for all tasks to complete tasksCompleteLatch.await(); - Assertions.assertEquals(expectedOrder, completionOrder); - Assertions.assertEquals(0, + assertEquals(expectedOrder, completionOrder); + assertEquals(0, supervisor.getInFlightReplications(OrderedTask.class)); - Assertions.assertEquals(0, + assertEquals(0, supervisor.getInFlightReplications(BlockingTask.class)); } @@ -610,7 +595,7 @@ public void replicate(ReplicationTask task) { } // assumes same-thread execution - Assertions.assertEquals(1, supervisor.getTotalInFlightReplications()); + assertEquals(1, supervisor.getTotalInFlightReplications()); KeyValueContainerData kvcd = new KeyValueContainerData(task.getContainerId(), @@ -623,7 +608,7 @@ public void replicate(ReplicationTask task) { set.addContainer(kvc); task.setStatus(DONE); } catch (Exception e) { - Assertions.fail("Unexpected error: " + e.getMessage()); + fail("Unexpected error: " + e.getMessage()); } } } @@ -665,8 +650,7 @@ public void execute(@Nonnull Runnable command) { } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void poolSizeCanBeIncreased() { datanode.setPersistedOpState(IN_SERVICE); ReplicationSupervisor subject = ReplicationSupervisor.newBuilder() @@ -680,8 +664,7 @@ public void poolSizeCanBeIncreased() { } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void poolSizeCanBeDecreased() { datanode.setPersistedOpState(IN_MAINTENANCE); ReplicationSupervisor subject = ReplicationSupervisor.newBuilder() @@ -695,8 +678,7 @@ public void poolSizeCanBeDecreased() { } } - @ParameterizedTest - @MethodSource("layoutVersion") + @ContainerLayoutTestInfo.ContainerTest public void testMaxQueueSize() { List datanodes = new ArrayList<>(); datanodes.add(MockDatanodeDetails.randomDatanodeDetails()); @@ -724,22 +706,22 @@ public void testMaxQueueSize() { // in progress task will be limited by max. queue size, // since all tasks are discarded by the executor, none of them complete - Assertions.assertEquals(maxQueueSize, rs.getTotalInFlightReplications()); + assertEquals(maxQueueSize, rs.getTotalInFlightReplications()); // queue size is doubled rs.nodeStateUpdated(HddsProtos.NodeOperationalState.DECOMMISSIONING); - Assertions.assertEquals(2 * maxQueueSize, rs.getMaxQueueSize()); - Assertions.assertEquals(2 * replicationMaxStreams, threadPoolSize.get()); + assertEquals(2 * maxQueueSize, rs.getMaxQueueSize()); + assertEquals(2 * replicationMaxStreams, threadPoolSize.get()); // can schedule more tasks scheduleTasks(datanodes, rs); - Assertions.assertEquals( + assertEquals( 2 * maxQueueSize, rs.getTotalInFlightReplications()); // queue size is restored rs.nodeStateUpdated(IN_SERVICE); - Assertions.assertEquals(maxQueueSize, rs.getMaxQueueSize()); - Assertions.assertEquals(replicationMaxStreams, threadPoolSize.get()); + assertEquals(maxQueueSize, rs.getMaxQueueSize()); + assertEquals(replicationMaxStreams, threadPoolSize.get()); } //schedule 10 container replication diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java index cc7375c0347..c5301ba4194 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.List; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; @@ -27,10 +26,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.KeyInputStream; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.om.TestBucket; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -41,16 +38,11 @@ */ class TestChunkInputStream extends TestInputStreamBase { - private static List layouts() { - return ContainerLayoutVersion.getAllVersions(); - } - /** * Run the tests as a single test method to avoid needing a new mini-cluster * for each test. */ - @ParameterizedTest - @MethodSource("layouts") + @ContainerLayoutTestInfo.ContainerTest void testAll(ContainerLayoutVersion layout) throws Exception { try (MiniOzoneCluster cluster = newCluster(layout)) { cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java index bfcc0bdd7b0..3ab8ae31188 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.om.TestBucket; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -64,10 +65,6 @@ */ class TestKeyInputStream extends TestInputStreamBase { - private static List layouts() { - return ContainerLayoutVersion.getAllVersions(); - } - /** * This method does random seeks and reads and validates the reads are * correct or not. @@ -124,8 +121,7 @@ private void validate(TestBucket bucket, KeyInputStream keyInputStream, * This test runs the others as a single test, so to avoid creating a new * mini-cluster for each test. */ - @ParameterizedTest - @MethodSource("layouts") + @ContainerLayoutTestInfo.ContainerTest void testNonReplicationReads(ContainerLayoutVersion layout) throws Exception { try (MiniOzoneCluster cluster = newCluster(layout)) { cluster.waitForClusterToBeReady(); From 726dc5506b8705fa86c13bce2ae79cf48b059eef Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Wed, 20 Dec 2023 23:51:46 +0800 Subject: [PATCH 18/28] HDDS-9971. Fix issues in allocateBlock when clientMachine is null (#5837) --- .../ScmBlockLocationProtocolClientSideTranslatorPB.java | 6 +++++- .../hadoop/hdds/scm/server/SCMBlockProtocolServer.java | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index b072c3690b7..2e724969998 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -162,9 +163,12 @@ public List allocateBlock( .setNumBlocks(num) .setType(replicationConfig.getReplicationType()) .setOwner(owner) - .setClient(clientMachine) .setExcludeList(excludeList.getProtoBuf()); + if (StringUtils.isNotEmpty(clientMachine)) { + requestBuilder.setClient(clientMachine); + } + switch (replicationConfig.getReplicationType()) { case STAND_ALONE: requestBuilder.setFactor( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index c610c95d2b2..41c96d969f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.concurrent.TimeoutException; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -383,6 +384,9 @@ public List sortDatanodes(List nodes, } private Node getClientNode(String clientMachine) { + if (StringUtils.isEmpty(clientMachine)) { + return null; + } List datanodes = scm.getScmNodeManager() .getNodesByAddress(clientMachine); return !datanodes.isEmpty() ? datanodes.get(0) : From 0e07225dbd20eeaf3a9c67a7facf91baeb105bb7 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Wed, 20 Dec 2023 20:19:56 +0300 Subject: [PATCH 19/28] HDDS-9807. Consider volume committed space when checking if datanode can host new container (#5721) --- .../common/impl/StorageLocationReport.java | 56 +++++++++++++- .../StorageLocationReportMXBean.java | 4 + .../common/volume/AvailableSpaceFilter.java | 8 +- .../common/volume/MutableVolumeSet.java | 4 + .../common/volume/VolumeInfoMetrics.java | 5 ++ .../container/common/volume/VolumeUsage.java | 8 ++ .../container/ozoneimpl/OzoneContainer.java | 2 +- .../static/swagger-resources/recon-api.yaml | 6 ++ .../src/main/proto/hdds.proto | 2 + .../ScmServerDatanodeHeartbeatProtocol.proto | 2 + .../hdds/scm/SCMCommonPlacementPolicy.java | 13 +++- .../balancer/ContainerBalancerTask.java | 2 +- .../container/placement/metrics/NodeStat.java | 15 +++- .../placement/metrics/SCMNodeMetric.java | 16 ++-- .../placement/metrics/SCMNodeStat.java | 48 ++++++++++-- .../hdds/scm/node/DatanodeUsageInfo.java | 2 + .../hadoop/hdds/scm/node/SCMNodeManager.java | 14 +++- .../hdds/scm/pipeline/PipelineProvider.java | 8 +- .../scm/pipeline/RatisPipelineProvider.java | 2 +- .../scm/TestSCMCommonPlacementPolicy.java | 73 ++++++++++++++++++- .../hdds/scm/container/MockNodeManager.java | 2 +- .../balancer/TestContainerBalancerTask.java | 3 +- .../balancer/TestFindTargetStrategy.java | 22 +++--- .../TestSCMContainerPlacementCapacity.java | 8 +- .../placement/TestDatanodeMetrics.java | 8 +- .../scm/cli/datanode/UsageInfoSubcommand.java | 24 +++++- .../cli/datanode/TestUsageInfoSubcommand.java | 33 +++++++++ .../ozone/recon/api/ClusterStateEndpoint.java | 3 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 3 +- .../api/types/DatanodeStorageReport.java | 9 ++- .../webapps/recon/ozone-recon-web/api/db.json | 57 ++++++++++----- .../src/components/storageBar/storageBar.less | 5 ++ .../src/components/storageBar/storageBar.tsx | 5 +- .../src/types/datanode.types.tsx | 1 + .../src/views/datanodes/datanodes.tsx | 4 +- .../api/TestNSSummaryEndpointWithFSO.java | 2 +- .../api/TestNSSummaryEndpointWithLegacy.java | 2 +- 37 files changed, 398 insertions(+), 83 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 0222050da5e..f31d45a7782 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.impl; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto. StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto. @@ -27,6 +28,7 @@ StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.ozone.container.common.interfaces .StorageLocationReportMXBean; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import java.io.IOException; @@ -42,17 +44,22 @@ public final class StorageLocationReport implements private final long capacity; private final long scmUsed; private final long remaining; + private final long committed; + private final long freeSpaceToSpare; private final StorageType storageType; private final String storageLocation; + @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, - String storageLocation) { + long scmUsed, long remaining, long committed, long freeSpaceToSpare, + StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; + this.committed = committed; + this.freeSpaceToSpare = freeSpaceToSpare; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -82,6 +89,16 @@ public long getRemaining() { return remaining; } + @Override + public long getCommitted() { + return committed; + } + + @Override + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + @Override public String getStorageLocation() { return storageLocation; @@ -157,14 +174,22 @@ private static StorageType getStorageType(StorageTypeProto proto) throws * @throws IOException In case, the storage type specified is invalid. */ public StorageReportProto getProtoBufMessage() throws IOException { + return getProtoBufMessage(null); + } + + public StorageReportProto getProtoBufMessage(ConfigurationSource conf) + throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) + .setCommitted(getCommitted()) .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) + .setFreeSpaceToSpare(conf != null ? + VolumeUsage.getMinVolumeFreeSpace(conf, getCapacity()) : 0) .build(); } @@ -266,6 +291,8 @@ public static class Builder { private long capacity; private long scmUsed; private long remaining; + private long committed; + private long freeSpaceToSpare; private StorageType storageType; private String storageLocation; @@ -334,6 +361,29 @@ public Builder setStorageType(StorageType storageTypeValue) { return this; } + /** + * Sets the committed bytes count. + * (bytes for previously created containers) + * @param committed previously created containers size + * @return StorageLocationReport.Builder + */ + public Builder setCommitted(long committed) { + this.committed = committed; + return this; + } + + /** + * Sets the free space available to spare. + * (depends on datanode volume config, + * consider 'hdds.datanode.volume.min.*' configuration properties) + * @param freeSpaceToSpare the size of free volume space available to spare + * @return StorageLocationReport.Builder + */ + public Builder setFreeSpaceToSpare(long freeSpaceToSpare) { + this.freeSpaceToSpare = freeSpaceToSpare; + return this; + } + /** * Sets the storageLocation. * @@ -352,7 +402,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); + remaining, committed, freeSpaceToSpare, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index fd063678137..74c4336bc65 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -33,6 +33,10 @@ public interface StorageLocationReportMXBean { long getRemaining(); + long getCommitted(); + + long getFreeSpaceToSpare(); + String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 13041eb4d66..622c85a52fa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -25,7 +25,7 @@ * Filter for selecting volumes with enough space for a new container. * Keeps track of ineligible volumes for logging/debug purposes. */ -class AvailableSpaceFilter implements Predicate { +public class AvailableSpaceFilter implements Predicate { private final long requiredSpace; private final Map fullVolumes = @@ -42,10 +42,10 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - long volumeFreeSpace = + long volumeFreeSpaceToSpare = VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); - boolean hasEnoughSpace = - available > Math.max(requiredSpace, volumeFreeSpace); + boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed, + requiredSpace, volumeFreeSpaceToSpare); mostAvailableSpace = Math.max(available, mostAvailableSpace); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 985ddea8deb..3c0b6e618ee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,6 +464,7 @@ public StorageLocationReport[] getStorageReport() { long scmUsed = 0; long remaining = 0; long capacity = 0; + long committed = 0; String rootDir = ""; failed = true; if (volumeInfo.isPresent()) { @@ -472,6 +473,8 @@ public StorageLocationReport[] getStorageReport() { scmUsed = volumeInfo.get().getScmUsed(); remaining = volumeInfo.get().getAvailable(); capacity = volumeInfo.get().getCapacity(); + committed = (volume instanceof HddsVolume) ? + ((HddsVolume) volume).getCommittedBytes() : 0; failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + @@ -491,6 +494,7 @@ public StorageLocationReport[] getStorageReport() { .setCapacity(capacity) .setRemaining(remaining) .setScmUsed(scmUsed) + .setCommitted(committed) .setStorageType(volume.getStorageType()); StorageLocationReport r = builder.build(); reports[counter++] = r; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index c90dcea81ff..18e7354ec1d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -142,4 +142,9 @@ public long getTotalCapacity() { return (getUsed() + getAvailable() + getReserved()); } + @Metric("Returns the Committed bytes of the Volume") + public long getCommitted() { + return volume.getCommittedBytes(); + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index e7a06abc9e3..57cf0a8b9dd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -162,6 +162,14 @@ public static long getMinVolumeFreeSpace(ConfigurationSource conf, } + public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace, + long volumeCommittedBytesCount, + long requiredSpace, + long volumeFreeSpaceToSpare) { + return (volumeAvailableSpace - volumeCommittedBytesCount) > + Math.max(requiredSpace, volumeFreeSpaceToSpare); + } + /** * Class representing precomputed space values of a volume. * This class is intended to store precomputed values, such as capacity diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 1e34fb10493..277ab4464e3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -524,7 +524,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() = StorageContainerDatanodeProtocolProtos. NodeReportProto.newBuilder(); for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); + nrb.addStorageReport(reports[i].getProtoBufMessage(config)); } StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport(); diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml index 3b41132f5f5..9ff32877665 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml +++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml @@ -1433,6 +1433,9 @@ components: remaining: type: number example: 1080410456064 + committed: + type: number + example: 1080410456 containers: type: integer example: 26 @@ -1480,6 +1483,9 @@ components: remaining: type: number example: 270071111680 + committed: + type: number + example: 27007111 pipelines: type: array items: diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 5c20745c061..3f346300b3e 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -187,6 +187,8 @@ message DatanodeUsageInfoProto { optional int64 remaining = 3; optional DatanodeDetailsProto node = 4; optional int64 containerCount = 5; + optional int64 committed = 6; + optional int64 freeSpaceToSpare = 7; } /** diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index de9e39789b5..2994073c024 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -179,6 +179,8 @@ message StorageReportProto { optional uint64 remaining = 5 [default = 0]; optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; + optional uint64 committed = 8 [default = 0]; + optional uint64 freeSpaceToSpare = 9 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 4c96175b6c0..46cb142bb11 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -274,7 +275,7 @@ public List filterNodesWithSpace(List nodes, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List nodesWithSpace = nodes.stream().filter(d -> - hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired)) + hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf)) .collect(Collectors.toList()); if (nodesWithSpace.size() < nodesRequired) { @@ -298,7 +299,9 @@ public List filterNodesWithSpace(List nodes, * @return true if we have enough space. */ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long metadataSizeRequired, long dataSizeRequired) { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); boolean enoughForData = false; @@ -308,7 +311,9 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (reportProto.getRemaining() > dataSizeRequired) { + if (VolumeUsage.hasVolumeEnoughSpace(reportProto.getRemaining(), + reportProto.getCommitted(), dataSizeRequired, + reportProto.getFreeSpaceToSpare())) { enoughForData = true; break; } @@ -494,7 +499,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); if (nodeStatus.isNodeWritable() && (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired))) { + dataSizeRequired, conf))) { LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + "required data size is {} and NodeStatus is {}", datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 6541d75d279..abbc50ac86a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -932,7 +932,7 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { return 0; } SCMNodeStat aggregatedStats = new SCMNodeStat( - 0, 0, 0); + 0, 0, 0, 0, 0); for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index d6857d395cf..eedc89dfc58 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -42,6 +42,18 @@ interface NodeStat { */ LongMetric getRemaining(); + /** + * Get the committed space of the node. + * @return the committed space of the node + */ + LongMetric getCommitted(); + + /** + * Get a min free space available to spare on the node. + * @return a min free space available to spare + */ + LongMetric getFreeSpaceToSpare(); + /** * Set the total/used/remaining space. * @param capacity - total space. @@ -49,7 +61,8 @@ interface NodeStat { * @param remain - remaining space. */ @VisibleForTesting - void set(long capacity, long used, long remain); + void set(long capacity, long used, long remain, long committed, + long freeSpaceToSpare); /** * Adding of the stat. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 2f5c6f33f73..330bf67416a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -36,16 +36,19 @@ public SCMNodeMetric(SCMNodeStat stat) { } /** - * Set the capacity, used and remaining space on a datanode. + * Set the capacity, used, remaining and committed space on a datanode. * - * @param capacity in bytes - * @param used in bytes + * @param capacity in bytes + * @param used in bytes * @param remaining in bytes + * @param committed + * @paaram committed in bytes */ @VisibleForTesting - public SCMNodeMetric(long capacity, long used, long remaining) { + public SCMNodeMetric(long capacity, long used, long remaining, + long committed, long freeSpaceToSpare) { this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining); + this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare); } /** @@ -156,7 +159,8 @@ public SCMNodeStat get() { @Override public void set(SCMNodeStat value) { stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get()); + value.getRemaining().get(), value.getCommitted().get(), + value.getFreeSpaceToSpare().get()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 962bbb464ec..2a848a04eff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -28,16 +28,20 @@ public class SCMNodeStat implements NodeStat { private LongMetric capacity; private LongMetric scmUsed; private LongMetric remaining; + private LongMetric committed; + private LongMetric freeSpaceToSpare; public SCMNodeStat() { - this(0L, 0L, 0L); + this(0L, 0L, 0L, 0L, 0L); } public SCMNodeStat(SCMNodeStat other) { - this(other.capacity.get(), other.scmUsed.get(), other.remaining.get()); + this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(), + other.committed.get(), other.freeSpaceToSpare.get()); } - public SCMNodeStat(long capacity, long used, long remaining) { + public SCMNodeStat(long capacity, long used, long remaining, long committed, + long freeSpaceToSpare) { Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(used >= 0, "used space cannot be " + @@ -47,6 +51,8 @@ public SCMNodeStat(long capacity, long used, long remaining) { this.capacity = new LongMetric(capacity); this.scmUsed = new LongMetric(used); this.remaining = new LongMetric(remaining); + this.committed = new LongMetric(committed); + this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare); } /** @@ -73,6 +79,24 @@ public LongMetric getRemaining() { return remaining; } + /** + * + * @return the total committed space on the node + */ + @Override + public LongMetric getCommitted() { + return committed; + } + + /** + * Get a min space available to spare on the node. + * @return a min free space available to spare on the node + */ + @Override + public LongMetric getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + /** * Set the capacity, used and remaining space on a datanode. * @@ -82,7 +106,8 @@ public LongMetric getRemaining() { */ @Override @VisibleForTesting - public void set(long newCapacity, long newUsed, long newRemaining) { + public void set(long newCapacity, long newUsed, long newRemaining, + long newCommitted, long newFreeSpaceToSpare) { Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -93,6 +118,8 @@ public void set(long newCapacity, long newUsed, long newRemaining) { this.capacity = new LongMetric(newCapacity); this.scmUsed = new LongMetric(newUsed); this.remaining = new LongMetric(newRemaining); + this.committed = new LongMetric(newCommitted); + this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare); } /** @@ -106,6 +133,9 @@ public SCMNodeStat add(NodeStat stat) { this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() + stat.getCommitted().get()); + this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + + stat.getFreeSpaceToSpare().get()); return this; } @@ -120,6 +150,9 @@ public SCMNodeStat subtract(NodeStat stat) { this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() - stat.getCommitted().get()); + this.freeSpaceToSpare.set(freeSpaceToSpare.get() - + stat.getFreeSpaceToSpare().get()); return this; } @@ -129,13 +162,16 @@ public boolean equals(Object to) { SCMNodeStat tempStat = (SCMNodeStat) to; return capacity.isEqual(tempStat.getCapacity().get()) && scmUsed.isEqual(tempStat.getScmUsed().get()) && - remaining.isEqual(tempStat.getRemaining().get()); + remaining.isEqual(tempStat.getRemaining().get()) && + committed.isEqual(tempStat.getCommitted().get()) && + freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()); } return false; } @Override public int hashCode() { - return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); + return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ + committed.get() ^ freeSpaceToSpare.get()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 14353cfa7e3..4f7df496906 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -205,6 +205,8 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { builder.setCapacity(scmNodeStat.getCapacity().get()); builder.setUsed(scmNodeStat.getScmUsed().get()); builder.setRemaining(scmNodeStat.getRemaining().get()); + builder.setCommitted(scmNodeStat.getCommitted().get()); + builder.setFreeSpaceToSpare(scmNodeStat.getFreeSpaceToSpare().get()); } builder.setContainerCount(containerCount); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index b34f5819f6b..e2cce7ac09f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -876,13 +876,18 @@ public SCMNodeStat getStats() { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; + long freeSpaceToSpare = 0L; for (SCMNodeStat stat : getNodeStats().values()) { capacity += stat.getCapacity().get(); used += stat.getScmUsed().get(); remaining += stat.getRemaining().get(); + committed += stat.getCommitted().get(); + freeSpaceToSpare += stat.getFreeSpaceToSpare().get(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } /** @@ -987,6 +992,8 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; + long freeSpaceToSpare = 0L; final DatanodeInfo datanodeInfo = nodeStateManager .getNode(datanodeDetails); @@ -996,8 +1003,11 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { capacity += reportProto.getCapacity(); used += reportProto.getScmUsed(); remaining += reportProto.getRemaining(); + committed += reportProto.getCommitted(); + freeSpaceToSpare += reportProto.getFreeSpaceToSpare(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuidString()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 4adcd53eb38..0ec74d2405c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -85,12 +86,15 @@ protected abstract Pipeline createForRead( protected abstract void shutdown(); List pickNodesNotUsed(REPLICATION_CONFIG replicationConfig, - long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) + throws SCMException { int nodesRequired = replicationConfig.getRequiredNodes(); List healthyDNs = pickAllNodesNotUsed(replicationConfig); List healthyDNsWithSpace = healthyDNs.stream() .filter(dn -> SCMCommonPlacementPolicy - .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired)) + .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf)) .limit(nodesRequired) .collect(Collectors.toList()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 1b62120c1ee..8336bce5eae 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -163,7 +163,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, switch (factor) { case ONE: dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, - containerSizeBytes); + containerSizeBytes, conf); break; case THREE: List excludeDueToEngagement = filterPipelineEngagement(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index ffefc7c5f5d..87497a9f070 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -24,30 +24,40 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.function.Function; import java.util.stream.Stream; @@ -448,11 +458,66 @@ protected List chooseDatanodesInternal( } }; dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - Assertions.assertFalse(usedNodesIdentity.get()); + assertFalse(usedNodesIdentity.get()); dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } + @Test + public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { + NodeManager nodeMngr = mock(NodeManager.class); + UUID datanodeUuid = UUID.randomUUID(); + DummyPlacementPolicy placementPolicy = + new DummyPlacementPolicy(nodeMngr, conf, 1); + DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); + when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); + + DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); + NodeStatus nodeStatus = mock(NodeStatus.class); + when(nodeStatus.isNodeWritable()).thenReturn(true); + when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); + when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); + + // capacity = 200000, used = 90000, remaining = 101000, committed = 500 + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(500) + .setFreeSpaceToSpare(10000) + .build(); + // capacity = 200000, used = 90000, remaining = 101000, committed = 1000 + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(1000) + .setFreeSpaceToSpare(100000) + .build(); + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + metaReport = HddsTestUtils.createMetadataStorageReport("/data/metadata", + 200); + when(datanodeInfo.getStorageReports()) + .thenReturn(Collections.singletonList(storageReport1)) + .thenReturn(Collections.singletonList(storageReport2)); + when(datanodeInfo.getMetadataStorageReports()) + .thenReturn(Collections.singletonList(metaReport)); + + + // 500 committed bytes: + // + // 101000 500 + // | | + // (remaining - committed) > Math.max(4000, freeSpaceToSpare) + // | + // 100000 + // + // Summary: 101000 - 500 > 100000 == true + assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + + // 1000 committed bytes: + // Summary: 101000 - 1000 > 100000 == false + assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + } + private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { private Map rackMap; private List racks; @@ -485,7 +550,7 @@ private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { super(nodeManager, conf); this.rackCnt = rackCnt; this.racks = IntStream.range(0, rackCnt) - .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList()); + .mapToObj(i -> mock(Node.class)).collect(Collectors.toList()); List datanodeDetails = nodeManager.getAllNodes(); rackMap = datanodeRackMap.entrySet().stream() .collect(Collectors.toMap( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 98638ebe009..794dedceef0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -227,7 +227,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; newStat.set( (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining); + (NODES[x % NODES.length].used), remaining, 0, 100000); this.nodeMetricMap.put(datanodeDetails, newStat); aggregateStat.add(newStat); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 4bc3cf43cf6..56d02dabb5f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -1207,7 +1207,8 @@ private double createCluster() { datanodeCapacity = (long) (datanodeUsedSpace / nodeUtilizations.get(i)); } SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, - datanodeCapacity - datanodeUsedSpace); + datanodeCapacity - datanodeUsedSpace, 0, + datanodeCapacity - datanodeUsedSpace - 1); nodesInCluster.get(i).setScmNodeStat(stat); clusterUsedSpace += datanodeUsedSpace; clusterCapacity += datanodeCapacity; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java index 7e734042d88..bb6f17bcc10 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java @@ -56,11 +56,11 @@ public void testFindTargetGreedyByUsage() { //create three datanodes with different usageinfo DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30)); //insert in ascending order overUtilizedDatanodes.add(dui1); @@ -98,11 +98,11 @@ public void testFindTargetGreedyByUsage() { public void testResetPotentialTargets() { // create three datanodes with different usage infos DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70)); + .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90)); + .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70)); List potentialTargets = new ArrayList<>(); potentialTargets.add(dui1); @@ -179,18 +179,18 @@ public void testFindTargetGreedyByNetworkTopology() { List overUtilizedDatanodes = new ArrayList<>(); //set the farthest target with the lowest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90))); + new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80))); //set the tree targets, which have the same network topology distance //to source , with different usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20))); + new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40))); + new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60))); + new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50))); //set the nearest target with the highest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10))); + new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5))); FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 910fe75ede6..e51f9731ad4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -103,13 +103,13 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new ArrayList<>(datanodes)); when(mockNodeManager.getNodeStat(any())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); + .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); + .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L)); + .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19)); when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); + .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20)); when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer( invocation -> datanodes.stream() .filter(dn -> dn.getUuid().equals(invocation.getArgument(0))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index 6ba2fc440a4..9c9bfad582f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -31,13 +31,13 @@ public class TestDatanodeMetrics { @Test public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals((long) stat.getCapacity().get(), 100L); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals(100L, (long) stat.getCapacity().get()); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); @@ -53,8 +53,8 @@ public void testSCMNodeMetric() { assertTrue(metric.isGreater(zeroMetric.get())); // Another case when nodes have similar weight - SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L); - SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L); + SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000); + SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000); assertTrue(new SCMNodeMetric(stat2).isGreater(stat1)); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index d46513b24bb..b967fa0658c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,8 +155,16 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); - System.out.printf("%-13s: %d %n%n", "Container(s)", + System.out.printf("%-13s: %d %n", "Container(s)", info.getContainerCount()); + System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", + info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted())); + System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable", + (info.getRemaining() - info.getCommitted()) + " B", + StringUtils.byteDesc((info.getRemaining() - info.getCommitted()))); + System.out.printf("%-24s: %s (%s) %n%n", "Free Space To Spare", + info.getFreeSpaceToSpare() + " B", + StringUtils.byteDesc(info.getFreeSpaceToSpare())); } /** @@ -181,6 +189,8 @@ private static class DatanodeUsage { private long capacity = 0; private long used = 0; private long remaining = 0; + private long committed = 0; + private long freeSpaceToSpare = 0; private long containerCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { @@ -196,9 +206,15 @@ private static class DatanodeUsage { if (proto.hasRemaining()) { remaining = proto.getRemaining(); } + if (proto.hasCommitted()) { + committed = proto.getCommitted(); + } if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } + if (proto.hasFreeSpaceToSpare()) { + freeSpaceToSpare = proto.getFreeSpaceToSpare(); + } } public DatanodeDetails getDatanodeDetails() { @@ -220,6 +236,12 @@ public long getOzoneUsed() { public long getRemaining() { return remaining; } + public long getCommitted() { + return committed; + } + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } public long getContainerCount() { return containerCount; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index 0cc8ed9be63..a52a0a7ed8f 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.codec.CharEncoding; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -97,6 +98,38 @@ public void testCorrectJsonValuesInReport() throws IOException { json.get(0).get("containerCount").longValue()); } + @Test + public void testOutputDataFieldsAligning() throws IOException { + // given + ScmClient scmClient = mock(ScmClient.class); + Mockito.when(scmClient.getDatanodeUsageInfo( + Mockito.anyBoolean(), Mockito.anyInt())) + .thenAnswer(invocation -> getUsageProto()); + + CommandLine c = new CommandLine(cmd); + c.parseArgs("-m"); + + // when + cmd.execute(scmClient); + + // then + String output = outContent.toString(CharEncoding.UTF_8); + Assertions.assertTrue(output.contains("UUID :")); + Assertions.assertTrue(output.contains("IP Address :")); + Assertions.assertTrue(output.contains("Hostname :")); + Assertions.assertTrue(output.contains("Capacity :")); + Assertions.assertTrue(output.contains("Total Used :")); + Assertions.assertTrue(output.contains("Total Used % :")); + Assertions.assertTrue(output.contains("Ozone Used :")); + Assertions.assertTrue(output.contains("Ozone Used % :")); + Assertions.assertTrue(output.contains("Remaining :")); + Assertions.assertTrue(output.contains("Remaining % :")); + Assertions.assertTrue(output.contains("Container(s) :")); + Assertions.assertTrue(output.contains("Container Pre-allocated :")); + Assertions.assertTrue(output.contains("Remaining Allocatable :")); + Assertions.assertTrue(output.contains("Free Space To Spare :")); + } + private List getUsageProto() { List result = new ArrayList<>(); result.add(HddsProtos.DatanodeUsageInfoProto.newBuilder() diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java index bc87c402eb2..b074e5ba56a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java @@ -120,7 +120,8 @@ public Response getClusterState() { SCMNodeStat stats = nodeManager.getStats(); DatanodeStorageReport storageReport = new DatanodeStorageReport(stats.getCapacity().get(), - stats.getScmUsed().get(), stats.getRemaining().get()); + stats.getScmUsed().get(), stats.getRemaining().get(), + stats.getCommitted().get()); ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder(); GlobalStats volumeRecord = globalStatsDao.findById( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 33df0ca1bd5..968bfbc4634 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -168,6 +168,7 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long capacity = nodeStat.getCapacity().get(); long used = nodeStat.getScmUsed().get(); long remaining = nodeStat.getRemaining().get(); - return new DatanodeStorageReport(capacity, used, remaining); + long committed = nodeStat.getCommitted().get(); + return new DatanodeStorageReport(capacity, used, remaining, committed); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java index d3fbb598c1b..43a20317a29 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java @@ -24,11 +24,14 @@ public class DatanodeStorageReport { private long capacity; private long used; private long remaining; + private long committed; - public DatanodeStorageReport(long capacity, long used, long remaining) { + public DatanodeStorageReport(long capacity, long used, long remaining, + long committed) { this.capacity = capacity; this.used = used; this.remaining = remaining; + this.committed = committed; } public long getCapacity() { @@ -42,4 +45,8 @@ public long getUsed() { public long getRemaining() { return remaining; } + + public long getCommitted() { + return committed; + } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 60362299fa5..204609f66fe 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -29,7 +29,8 @@ "storageReport": { "capacity": 62725623808, "used": 488288256, - "remaining": 21005319168 + "remaining": 21005319168, + "committed": 10240000 }, "pipelines": [ { @@ -62,7 +63,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34563456 }, "pipelines": [ { @@ -95,7 +97,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34562 }, "pipelines": [ { @@ -128,7 +131,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 4576435 }, "pipelines": [ { @@ -161,7 +165,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 3453121 }, "pipelines": [ { @@ -194,7 +199,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 3457623435 }, "pipelines": [ { @@ -233,7 +239,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 345624 }, "pipelines": [ { @@ -272,7 +279,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 123464574 }, "pipelines": [ { @@ -311,7 +319,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 556721345 }, "pipelines": [ { @@ -350,7 +359,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 45671235234 }, "pipelines": [ { @@ -389,7 +399,8 @@ "storageReport": { "capacity": 140737488355328, "used": 0, - "remaining": 110737488355328 + "remaining": 110737488355328, + "committed": 0 }, "pipelines": [], "containers": 0, @@ -409,7 +420,8 @@ "storageReport": { "capacity": 805306368000, "used": 644245094400, - "remaining": 121061273600 + "remaining": 121061273600, + "committed": 4572345234 }, "pipelines": [ { @@ -442,7 +454,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34563453 }, "pipelines": [ { @@ -475,7 +488,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 94784247808 + "remaining": 94784247808, + "committed": 7234234 }, "pipelines": [ { @@ -514,7 +528,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34562346 }, "pipelines": [ { @@ -547,7 +562,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 76757023244288 + "remaining": 76757023244288, + "committed": 834324523 }, "pipelines": [ { @@ -580,7 +596,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 66757023244288 + "remaining": 66757023244288, + "committed": 346467345 }, "pipelines": [ { @@ -619,7 +636,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 96157023244288 + "remaining": 96157023244288, + "committed": 45245456 }, "pipelines": [ { @@ -652,7 +670,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 94757023244288 + "remaining": 94757023244288, + "committed": 45673234 }, "pipelines": [ { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less index b2dddbcaa37..ecba534cc08 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less @@ -19,6 +19,7 @@ @progress-gray: #d0d0d0; @progress-blue: #1890ff; @progress-green: #52c41a; +@progress-dark-grey: #424242; .storage-cell-container { position: relative; @@ -45,3 +46,7 @@ .remaining-bg { color: @progress-gray; } + +.committed-bg { + color: @progress-dark-grey; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx index 10decce103f..9263c6817be 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx @@ -32,6 +32,7 @@ interface IStorageBarProps extends RouteComponentProps { total: number; used: number; remaining: number; + committed: number; showMeta?: boolean; } @@ -39,6 +40,7 @@ const defaultProps = { total: 0, used: 0, remaining: 0, + committed: 0, showMeta: true }; @@ -46,7 +48,7 @@ class StorageBar extends React.Component { static defaultProps = defaultProps; render() { - const {total, used, remaining, showMeta} = this.props; + const {total, used, remaining, committed, showMeta} = this.props; const nonOzoneUsed = total - remaining - used; const totalUsed = total - remaining; const tooltip = ( @@ -54,6 +56,7 @@ class StorageBar extends React.Component {
Ozone Used ({size(used)})
Non Ozone Used ({size(nonOzoneUsed)})
Remaining ({size(remaining)})
+
Container Pre-allocated ({size(committed)})
); const metaElement = showMeta ?
{size(used)} + {size(nonOzoneUsed)} / {size(total)}
: null; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx index 8f92742916f..d69466ac0fe 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx @@ -30,4 +30,5 @@ export interface IStorageReport { capacity: number; used: number; remaining: number; + committed: number; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 6a6118494fa..19f306ae4f6 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -69,6 +69,7 @@ interface IDatanode { storageUsed: number; storageTotal: number; storageRemaining: number; + storageCommitted: number; pipelines: IPipeline[]; containers: number; openContainers: number; @@ -173,7 +174,7 @@ const COLUMNS = [ render: (text: string, record: IDatanode) => ( + remaining={record.storageRemaining} committed={record.storageCommitted}/> )}, { title: 'Last Heartbeat', @@ -378,6 +379,7 @@ export class Datanodes extends React.Component, IDatanode storageUsed: datanode.storageReport.used, storageTotal: datanode.storageReport.capacity, storageRemaining: datanode.storageReport.remaining, + storageCommitted: datanode.storageReport.committed, pipelines: datanode.pipelines, containers: datanode.containers, openContainers: datanode.openContainers, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index d3bee19ba6e..cbe850b918f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -1248,6 +1248,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index b324bd6b427..ba00f843f44 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -1286,6 +1286,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } From 24f6ea4a4247b6fe9bc96edd20f2b89f749a05c4 Mon Sep 17 00:00:00 2001 From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com> Date: Thu, 21 Dec 2023 00:13:30 +0530 Subject: [PATCH 20/28] HDDS-9833. Migrate simple shell integration tests to JUnit5 (#5840) --- .../hadoop/ozone/StandardOutputTestBase.java | 8 +-- .../apache/hadoop/ozone/om/TestListKeys.java | 18 ++--- .../ozone/shell/TestNSSummaryAdmin.java | 52 +++++--------- .../ozone/shell/TestOzoneDatanodeShell.java | 37 ++++------ .../ozone/shell/TestOzoneTenantShell.java | 68 +++++++++---------- .../hadoop/ozone/shell/TestReconfigShell.java | 38 ++++------- .../hadoop/ozone/shell/TestScmAdminHA.java | 10 +-- 7 files changed, 89 insertions(+), 142 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java index 40a54474c10..33e081c522a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.ozone; -import org.junit.After; -import org.junit.Before; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import java.io.ByteArrayOutputStream; import java.io.PrintStream; @@ -42,7 +42,7 @@ public class StandardOutputTestBase { * * @throws UnsupportedEncodingException */ - @Before + @BeforeEach public void setUpStreams() throws UnsupportedEncodingException { System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); @@ -51,7 +51,7 @@ public void setUpStreams() throws UnsupportedEncodingException { /** * Restore original error and output streams after test. */ - @After + @AfterEach public void restoreStreams() { System.setOut(originalOut); System.setErr(originalErr); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index b2007c7e027..d373eeae71a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -27,16 +27,12 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.Assert; -import org.junit.Rule; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -54,11 +50,13 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.junit.jupiter.params.provider.Arguments.of; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test covers listKeys(keyPrefix, startKey, shallow) combinations * in a legacy/OBS bucket layout type. */ +@Timeout(1200) public class TestListKeys { private static MiniOzoneCluster cluster = null; @@ -71,9 +69,6 @@ public class TestListKeys { private static OzoneBucket legacyOzoneBucket; private static OzoneClient client; - @Rule - public TestRule timeout = new JUnit5AwareTimeout(new Timeout(1200000)); - /** * Create a MiniDFSCluster for testing. *

@@ -297,7 +292,7 @@ private void checkKeyShallowList(String keyPrefix, String startKey, List keyLists = new ArrayList<>(); while (ozoneKeyIterator.hasNext()) { OzoneKey ozoneKey = ozoneKeyIterator.next(); - Assert.assertEquals(expectedReplication, ozoneKey.getReplicationConfig()); + assertEquals(expectedReplication, ozoneKey.getReplicationConfig()); keyLists.add(ozoneKey.getName()); } LinkedList outputKeysList = new LinkedList(keyLists); @@ -308,7 +303,7 @@ private void checkKeyShallowList(String keyPrefix, String startKey, } System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + startKey); - Assert.assertEquals(keys, outputKeysList); + assertEquals(keys, outputKeysList); } private static void createKeys(OzoneBucket ozoneBucket, List keys) @@ -337,7 +332,6 @@ private static void createKey(OzoneBucket ozoneBucket, String key, int length, ozoneInputStream.read(read, 0, length); ozoneInputStream.close(); - Assert.assertEquals(new String(input, StandardCharsets.UTF_8), - new String(read, StandardCharsets.UTF_8)); + assertEquals(new String(input, StandardCharsets.UTF_8), new String(read, StandardCharsets.UTF_8)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java index 1d4100203c0..c07a48b1058 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java @@ -29,23 +29,22 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.ozone.test.JUnit5AwareTimeout; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.UnsupportedEncodingException; import java.util.UUID; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; /** * Test for Namespace CLI. */ +@Timeout(60) public class TestNSSummaryAdmin extends StandardOutputTestBase { private static ObjectStore store; @@ -58,10 +57,7 @@ public class TestNSSummaryAdmin extends StandardOutputTestBase { private static String bucketFSO; private static OzoneClient client; - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(60)); - - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); @@ -81,7 +77,7 @@ public static void init() throws Exception { createVolumeAndBuckets(); } - @AfterClass + @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -120,13 +116,9 @@ public void testNSSummaryCLIRoot() throws UnsupportedEncodingException { String path = "/"; executeAdminCommands(path); // Should throw warning - only buckets can have bucket layout. - Assert.assertTrue( - getOutContentString().contains( - "[Warning] Namespace CLI is not designed for OBS bucket layout.")); - Assert.assertTrue(getOutContentString() - .contains("Put more files into it to visualize DU")); - Assert.assertTrue(getOutContentString().contains( - "Put more files into it to visualize file size distribution")); + assertTrue(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); + assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); + assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); } /** @@ -138,13 +130,9 @@ public void testNSSummaryCLIFSO() throws UnsupportedEncodingException { String path = "/" + volumeName + "/" + bucketFSO; executeAdminCommands(path); // Should not throw warning, since bucket is in FSO bucket layout. - Assert.assertFalse( - getOutContentString().contains( - "[Warning] Namespace CLI is not designed for OBS bucket layout.")); - Assert.assertTrue(getOutContentString() - .contains("Put more files into it to visualize DU")); - Assert.assertTrue(getOutContentString().contains( - "Put more files into it to visualize file size distribution")); + assertFalse(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); + assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); + assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); } /** @@ -156,13 +144,9 @@ public void testNSSummaryCLIOBS() throws UnsupportedEncodingException { String path = "/" + volumeName + "/" + bucketOBS; executeAdminCommands(path); // Should throw warning, since bucket is in OBS bucket layout. - Assert.assertTrue( - getOutContentString().contains( - "[Warning] Namespace CLI is not designed for OBS bucket layout.")); - Assert.assertTrue(getOutContentString() - .contains("Put more files into it to visualize DU")); - Assert.assertTrue(getOutContentString().contains( - "Put more files into it to visualize file size distribution")); + assertTrue(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); + assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); + assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java index 2fac6d575fc..df668bf44c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java @@ -17,24 +17,17 @@ */ package org.apache.hadoop.ozone.shell; -import static org.junit.Assert.fail; - import java.util.Arrays; import java.util.List; import org.apache.hadoop.ozone.HddsDatanodeService; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Strings; - -import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; import picocli.CommandLine; import picocli.CommandLine.ExecutionException; import picocli.CommandLine.IExceptionHandler2; @@ -42,17 +35,15 @@ import picocli.CommandLine.ParseResult; import picocli.CommandLine.RunLast; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + /** * This test class specified for testing Ozone datanode shell command. */ +@Timeout(300) public class TestOzoneDatanodeShell { - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private static final Logger LOG = LoggerFactory.getLogger(TestOzoneDatanodeShell.class); @@ -63,7 +54,7 @@ public class TestOzoneDatanodeShell { * * @throws Exception */ - @BeforeClass + @BeforeAll public static void init() { datanode = new TestHddsDatanodeService(new String[] {}); } @@ -101,20 +92,16 @@ private void executeDatanodeWithError(HddsDatanodeService hdds, String[] args, } else { try { executeDatanode(hdds, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); + fail("Exception is expected from command execution " + Arrays.asList(args)); } catch (Exception ex) { if (!Strings.isNullOrEmpty(expectedError)) { Throwable exceptionToCheck = ex; if (exceptionToCheck.getCause() != null) { exceptionToCheck = exceptionToCheck.getCause(); } - Assert.assertTrue( - String.format( - "Error of shell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); + assertTrue(exceptionToCheck.getMessage().contains(expectedError), + String.format("Error of shell code doesn't contain the " + "exception [%s] in [%s]", expectedError, + exceptionToCheck.getMessage())); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 3c719299b5f..e542e9494d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -38,16 +38,12 @@ import org.apache.hadoop.ozone.shell.tenant.TenantShell; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -69,7 +65,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Integration test for Ozone tenant shell command. HA enabled. @@ -77,6 +76,7 @@ * TODO: HDDS-6338. Add a Kerberized version of this * TODO: HDDS-6336. Add a mock Ranger server to test Ranger HTTP endpoint calls */ +@Timeout(300) public class TestOzoneTenantShell { private static final Logger LOG = @@ -91,8 +91,6 @@ public class TestOzoneTenantShell { /** * Set the timeout for every test. */ - @Rule - public TestRule testTimeout = new JUnit5AwareTimeout(Timeout.seconds(300)); private static File baseDir; private static File testFile; @@ -122,7 +120,7 @@ public class TestOzoneTenantShell { * * @throws Exception */ - @BeforeClass + @BeforeAll public static void init() throws Exception { // Remove audit log output if it exists if (AUDIT_LOG_FILE.exists()) { @@ -174,7 +172,7 @@ public static void init() throws Exception { /** * shutdown MiniOzoneCluster. */ - @AfterClass + @AfterAll public static void shutdown() { if (cluster != null) { cluster.shutdown(); @@ -189,7 +187,7 @@ public static void shutdown() { } } - @Before + @BeforeEach public void setup() throws UnsupportedEncodingException { System.setOut(new PrintStream(out, false, UTF_8.name())); System.setErr(new PrintStream(err, false, UTF_8.name())); @@ -205,7 +203,7 @@ public void setup() throws UnsupportedEncodingException { GenericTestUtils.setLogLevel(OMRangerBGSyncService.LOG, Level.DEBUG); } - @After + @AfterEach public void reset() { // reset stream after each unit test out.reset(); @@ -258,20 +256,16 @@ private void executeWithError(OzoneShell shell, String[] args, } else { try { execute(shell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); + fail("Exception is expected from command execution " + Arrays.asList(args)); } catch (Exception ex) { if (!Strings.isNullOrEmpty(expectedError)) { Throwable exceptionToCheck = ex; if (exceptionToCheck.getCause() != null) { exceptionToCheck = exceptionToCheck.getCause(); } - Assert.assertTrue( - String.format( - "Error of OzoneShell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); + assertTrue(exceptionToCheck.getMessage().contains(expectedError), + String.format("Error of OzoneShell code doesn't contain the exception [%s] in [%s]", expectedError, + exceptionToCheck.getMessage())); } } } @@ -362,9 +356,9 @@ private void checkOutput(ByteArrayOutputStream stream, String stringToMatch, private void checkOutput(String str, String stringToMatch, boolean exactMatch) { if (exactMatch) { - Assert.assertEquals(stringToMatch, str); + assertEquals(stringToMatch, str); } else { - Assert.assertTrue(str, str.contains(stringToMatch)); + assertTrue(str.contains(stringToMatch), str); } } @@ -373,7 +367,7 @@ private void deleteVolume(String volumeName) throws IOException { checkOutput(out, "Volume " + volumeName + " is deleted\n", true); checkOutput(err, "", true); // Exit code should be 0 - Assert.assertEquals(0, exitC); + assertEquals(0, exitC); } @Test @@ -436,7 +430,7 @@ public void testAssignAdmin() throws IOException { public void testOzoneTenantBasicOperations() throws IOException { List lines = FileUtils.readLines(AUDIT_LOG_FILE, (String)null); - Assert.assertEquals(0, lines.size()); + assertEquals(0, lines.size()); executeHA(tenantShell, new String[] {"list"}); checkOutput(out, "", true); @@ -453,12 +447,12 @@ public void testOzoneTenantBasicOperations() throws IOException { checkOutput(err, "", true); lines = FileUtils.readLines(AUDIT_LOG_FILE, (String)null); - Assert.assertTrue(lines.size() > 0); + assertTrue(lines.size() > 0); checkOutput(lines.get(lines.size() - 1), "ret=SUCCESS", false); // Check volume creation OmVolumeArgs volArgs = cluster.getOzoneManager().getVolumeInfo("finance"); - Assert.assertEquals("finance", volArgs.getVolume()); + assertEquals("finance", volArgs.getVolume()); // Creating the tenant with the same name again should fail executeHA(tenantShell, new String[] {"create", "finance"}); @@ -647,7 +641,7 @@ public void testOzoneTenantBasicOperations() throws IOException { // Attempt to delete tenant with accessIds still assigned to it, should fail int exitCode = executeHA(tenantShell, new String[] {"delete", "dev"}); - Assert.assertTrue("Tenant delete should fail!", exitCode != 0); + assertNotEquals(0, exitCode, "Tenant delete should fail!"); checkOutput(out, "", true); checkOutput(err, "Tenant 'dev' is not empty. All accessIds associated " + "to this tenant must be revoked before the tenant can be deleted. " @@ -665,7 +659,7 @@ public void testOzoneTenantBasicOperations() throws IOException { // Delete dev volume should fail because the volume reference count > 0L exitCode = execute(ozoneSh, new String[] {"volume", "delete", "dev"}); - Assert.assertTrue("Volume delete should fail!", exitCode != 0); + assertNotEquals(0, exitCode, "Volume delete should fail!"); checkOutput(out, "", true); checkOutput(err, "Volume reference count is not zero (1). " + "Ozone features are enabled on this volume. " @@ -743,7 +737,7 @@ public void testListTenantUsers() throws IOException { int exitCode = executeHA(tenantShell, new String[] { "user", "list", "unknown"}); - Assert.assertTrue("Expected non-zero exit code", exitCode != 0); + assertNotEquals(0, exitCode, "Expected non-zero exit code"); checkOutput(out, "", true); checkOutput(err, "Tenant 'unknown' doesn't exist.\n", true); @@ -804,7 +798,7 @@ public void testTenantSetSecret() throws IOException, InterruptedException { int exitCode = executeHA(tenantShell, new String[] { "user", "setsecret", tenantName + "$alice", "--secret=short"}); - Assert.assertTrue("Expected non-zero exit code", exitCode != 0); + assertNotEquals(0, exitCode, "Expected non-zero exit code"); checkOutput(out, "", true); checkOutput(err, "Secret key length should be at least 8 characters\n", true); @@ -843,7 +837,7 @@ public void testTenantSetSecret() throws IOException, InterruptedException { int exitC = executeHA(tenantShell, new String[] { "user", "setsecret", tenantName + "$alice", "--secret=somesecret2"}); - Assert.assertTrue("Should return non-zero exit code!", exitC != 0); + assertNotEquals(0, exitC, "Should return non-zero exit code!"); checkOutput(out, "", true); checkOutput(err, "Requested accessId 'tenant-test-set-secret$alice'" + " doesn't belong to current user 'bob', nor does current user" @@ -1083,7 +1077,7 @@ public void testCreateTenantOnExistingVolume() throws IOException { final String testVolume = "existing-volume-1"; int exitC = execute(ozoneSh, new String[] {"volume", "create", testVolume}); // Volume create should succeed - Assert.assertEquals(0, exitC); + assertEquals(0, exitC); checkOutput(out, "", true); checkOutput(err, "", true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index b578995a4ca..50742791287 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -35,31 +35,23 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ozone.test.GenericTestUtils.SystemOutCapturer; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.apache.ozone.test.JUnit5AwareTimeout; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * * Integration test for {@code ozone admin reconfig} command. HA enabled. */ +@Timeout(300) public class TestReconfigShell { private static final int DATANODE_COUNT = 3; - - /** - * Set a timeout for each test. - */ - @Rule - public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300)); - private static MiniOzoneCluster cluster; private static List datanodeServices; private static OzoneAdmin ozoneAdmin; @@ -71,7 +63,7 @@ public class TestReconfigShell { /** * Create a Mini Cluster for testing. */ - @BeforeClass + @BeforeAll public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); @@ -91,7 +83,7 @@ public static void setup() throws Exception { nm = storageContainerManager.getScmNodeManager(); } - @AfterClass + @AfterAll public static void shutdown() { if (cluster != null) { cluster.shutdown(); @@ -146,9 +138,7 @@ private void assertReconfigurablePropertiesOutput( List outs = Arrays.asList(output.split(System.getProperty("line.separator"))); for (String property : except) { - Assert.assertTrue( - String.format("Not found %s in output: %s", property, output), - outs.contains(property)); + assertTrue(outs.contains(property), String.format("Not found %s in output: %s", property, output)); } } @@ -156,7 +146,7 @@ private void assertReconfigurablePropertiesOutput( public void testDatanodeBulkReconfig() throws Exception { // All Dn are normal, So All the Dn will be reconfig List dns = cluster.getHddsDatanodes(); - Assert.assertEquals(DATANODE_COUNT, dns.size()); + assertEquals(DATANODE_COUNT, dns.size()); executeAndAssertBulkReconfigCount(DATANODE_COUNT); // Shutdown a Dn, it will not be reconfig, @@ -186,10 +176,8 @@ private void executeAndAssertBulkReconfigCount(int except) "reconfig", "--in-service-datanodes", "properties"}); String output = capture.getOutput(); - Assert.assertTrue(String.format( - "Excepted successfully %d. output: %s%n", except, output), - capture.getOutput().contains( - String.format("successfully %d", except))); + assertTrue(capture.getOutput().contains(String.format("successfully %d", except)), + String.format("Excepted successfully %d. output: %s%n", except, output)); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 16c664c8111..3754bfceccd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -24,9 +24,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * This class tests ozone admin scm commands. @@ -40,7 +40,7 @@ public class TestScmAdminHA { private static String scmId; private static MiniOzoneCluster cluster; - @BeforeClass + @BeforeAll public static void init() throws Exception { ozoneAdmin = new OzoneAdmin(); conf = new OzoneConfiguration(); @@ -62,7 +62,7 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); } - @AfterClass + @AfterAll public static void shutdown() { if (cluster != null) { cluster.shutdown(); From dc0a10403a069892aec8152a65d646eeccd43a1d Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Wed, 20 Dec 2023 20:05:47 +0000 Subject: [PATCH 21/28] HDDS-9322. Remove duplicate containers when loading volumes on a datanode (#5324) --- .../container/ozoneimpl/ContainerReader.java | 85 ++++++++++- .../ozoneimpl/TestContainerReader.java | 133 ++++++++++++++++-- 2 files changed, 202 insertions(+), 16 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 5f300a446d6..edbff14aca8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.DELETED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerDataProto.State.RECOVERING; @@ -225,7 +226,15 @@ public void verifyAndFixupContainerData(ContainerData containerData) cleanupContainer(hddsVolume, kvContainer); return; } - containerSet.addContainer(kvContainer); + try { + containerSet.addContainer(kvContainer); + } catch (StorageContainerException e) { + if (e.getResult() != ContainerProtos.Result.CONTAINER_EXISTS) { + throw e; + } + resolveDuplicate((KeyValueContainer) containerSet.getContainer( + kvContainer.getContainerData().getContainerID()), kvContainer); + } } else { throw new StorageContainerException("Container File is corrupted. " + "ContainerType is KeyValueContainer but cast to " + @@ -240,6 +249,80 @@ public void verifyAndFixupContainerData(ContainerData containerData) } } + private void resolveDuplicate(KeyValueContainer existing, + KeyValueContainer toAdd) throws IOException { + if (existing.getContainerData().getReplicaIndex() != 0 || + toAdd.getContainerData().getReplicaIndex() != 0) { + // This is an EC container. As EC Containers don't have a BSCID, we can't + // know which one has the most recent data. Additionally, it is possible + // for both copies to have a different replica index for the same + // container. Therefore we just let whatever one is loaded first win AND + // leave the other one on disk. + LOG.warn("Container {} is present at {} and at {}. Both are EC " + + "containers. Leaving both containers on disk.", + existing.getContainerData().getContainerID(), + existing.getContainerData().getContainerPath(), + toAdd.getContainerData().getContainerPath()); + return; + } + + long existingBCSID = existing.getBlockCommitSequenceId(); + ContainerProtos.ContainerDataProto.State existingState + = existing.getContainerState(); + long toAddBCSID = toAdd.getBlockCommitSequenceId(); + ContainerProtos.ContainerDataProto.State toAddState + = toAdd.getContainerState(); + + if (existingState != toAddState) { + if (existingState == CLOSED) { + // If we have mis-matched states, always pick a closed one + LOG.warn("Container {} is present at {} with state CLOSED and at " + + "{} with state {}. Removing the latter container.", + existing.getContainerData().getContainerID(), + existing.getContainerData().getContainerPath(), + toAdd.getContainerData().getContainerPath(), toAddState); + KeyValueContainerUtil.removeContainer(toAdd.getContainerData(), + hddsVolume.getConf()); + return; + } else if (toAddState == CLOSED) { + LOG.warn("Container {} is present at {} with state CLOSED and at " + + "{} with state {}. Removing the latter container.", + toAdd.getContainerData().getContainerID(), + toAdd.getContainerData().getContainerPath(), + existing.getContainerData().getContainerPath(), existingState); + swapAndRemoveContainer(existing, toAdd); + return; + } + } + + if (existingBCSID >= toAddBCSID) { + // existing is newer or equal, so remove the one we have yet to load. + LOG.warn("Container {} is present at {} with a newer or equal BCSID " + + "than at {}. Removing the latter container.", + existing.getContainerData().getContainerID(), + existing.getContainerData().getContainerPath(), + toAdd.getContainerData().getContainerPath()); + KeyValueContainerUtil.removeContainer(toAdd.getContainerData(), + hddsVolume.getConf()); + } else { + LOG.warn("Container {} is present at {} with a lesser BCSID " + + "than at {}. Removing the former container.", + existing.getContainerData().getContainerID(), + existing.getContainerData().getContainerPath(), + toAdd.getContainerData().getContainerPath()); + swapAndRemoveContainer(existing, toAdd); + } + } + + private void swapAndRemoveContainer(KeyValueContainer existing, + KeyValueContainer toAdd) throws IOException { + containerSet.removeContainer( + existing.getContainerData().getContainerID()); + containerSet.addContainer(toAdd); + KeyValueContainerUtil.removeContainer(existing.getContainerData(), + hddsVolume.getConf()); + } + private void cleanupContainer( HddsVolume volume, KeyValueContainer kvContainer) { try { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 3e947e135b9..5248caaf65b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -32,12 +32,14 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -49,8 +51,10 @@ import org.mockito.Mockito; import java.io.File; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -321,6 +325,10 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + for (StorageVolume v : volumeSets.getVolumesList()) { + StorageVolumeUtil.checkVolume(v, clusterId, clusterId, conf, + null, null); + } createDbInstancesForTestIfNeeded(volumeSets, clusterId, clusterId, conf); ContainerCache cache = ContainerCache.getInstance(conf); cache.shutdownCache(); @@ -330,24 +338,42 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) final int containerCount = 100; blockCount = containerCount; - for (int i = 0; i < containerCount; i++) { - KeyValueContainerData keyValueContainerData = - new KeyValueContainerData(i, layout, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - KeyValueContainer keyValueContainer = - new KeyValueContainer(keyValueContainerData, - conf); - keyValueContainer.create(volumeSets, policy, clusterId); + KeyValueContainer conflict01 = null; + KeyValueContainer conflict02 = null; + KeyValueContainer conflict11 = null; + KeyValueContainer conflict12 = null; + KeyValueContainer conflict21 = null; + KeyValueContainer conflict22 = null; + KeyValueContainer ec1 = null; + KeyValueContainer ec2 = null; + long baseBCSID = 10L; - List blkNames; - if (i % 2 == 0) { - blkNames = addBlocks(keyValueContainer, true); - markBlocksForDelete(keyValueContainer, true, blkNames, i); + for (int i = 0; i < containerCount; i++) { + if (i == 0) { + // Create a duplicate container with ID 0. Both have the same BSCID + conflict01 = + createContainerWithId(0, volumeSets, policy, baseBCSID, 0); + conflict02 = + createContainerWithId(0, volumeSets, policy, baseBCSID, 0); + } else if (i == 1) { + // Create a duplicate container with ID 1 so that the one has a + // larger BCSID + conflict11 = + createContainerWithId(1, volumeSets, policy, baseBCSID, 0); + conflict12 = createContainerWithId( + 1, volumeSets, policy, baseBCSID - 1, 0); + } else if (i == 2) { + conflict21 = + createContainerWithId(i, volumeSets, policy, baseBCSID, 0); + conflict22 = + createContainerWithId(i, volumeSets, policy, baseBCSID, 0); + conflict22.close(); + } else if (i == 3) { + ec1 = createContainerWithId(i, volumeSets, policy, baseBCSID, 1); + ec2 = createContainerWithId(i, volumeSets, policy, baseBCSID, 1); } else { - blkNames = addBlocks(keyValueContainer, false); - markBlocksForDelete(keyValueContainer, false, blkNames, i); + createContainerWithId(i, volumeSets, policy, baseBCSID, 0); } } // Close the RocksDB instance for this container and remove from the cache @@ -374,11 +400,88 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s"); Assertions.assertEquals(containerCount, containerSet.getContainerMap().entrySet().size()); + Assertions.assertEquals(volumeSet.getFailedVolumesList().size(), 0); + + // One of the conflict01 or conflict02 should have had its container path + // removed. + List paths = new ArrayList<>(); + paths.add(Paths.get(conflict01.getContainerData().getContainerPath())); + paths.add(Paths.get(conflict02.getContainerData().getContainerPath())); + int exist = 0; + for (Path p : paths) { + if (Files.exists(p)) { + exist++; + } + } + Assertions.assertEquals(1, exist); + Assertions.assertTrue(paths.contains(Paths.get( + containerSet.getContainer(0).getContainerData().getContainerPath()))); + + // For conflict1, the one with the larger BCSID should win, which is + // conflict11. + Assertions.assertFalse(Files.exists(Paths.get( + conflict12.getContainerData().getContainerPath()))); + Assertions.assertEquals(conflict11.getContainerData().getContainerPath(), + containerSet.getContainer(1).getContainerData().getContainerPath()); + Assertions.assertEquals(baseBCSID, containerSet.getContainer(1) + .getContainerData().getBlockCommitSequenceId()); + + // For conflict2, the closed on (conflict22) should win. + Assertions.assertFalse(Files.exists(Paths.get( + conflict21.getContainerData().getContainerPath()))); + Assertions.assertEquals(conflict22.getContainerData().getContainerPath(), + containerSet.getContainer(2).getContainerData().getContainerPath()); + Assertions.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, + containerSet.getContainer(2).getContainerData().getState()); + + // For the EC conflict, both containers should be left on disk + Assertions.assertTrue(Files.exists(Paths.get( + ec1.getContainerData().getContainerPath()))); + Assertions.assertTrue(Files.exists(Paths.get( + ec2.getContainerData().getContainerPath()))); + Assertions.assertNotNull(containerSet.getContainer(3)); + // There should be no open containers cached by the ContainerReader as it // opens and closed them avoiding the cache. Assertions.assertEquals(0, cache.size()); } + private KeyValueContainer createContainerWithId(int id, VolumeSet volSet, + VolumeChoosingPolicy policy, long bcsid, int replicaIndex) + throws Exception { + KeyValueContainerData keyValueContainerData = + new KeyValueContainerData(id, layout, + (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), + datanodeId.toString()); + keyValueContainerData.setReplicaIndex(replicaIndex); + + KeyValueContainer keyValueContainer = + new KeyValueContainer(keyValueContainerData, + conf); + keyValueContainer.create(volSet, policy, clusterId); + + List blkNames; + if (id % 2 == 0) { + blkNames = addBlocks(keyValueContainer, true); + markBlocksForDelete(keyValueContainer, true, blkNames, id); + } else { + blkNames = addBlocks(keyValueContainer, false); + markBlocksForDelete(keyValueContainer, false, blkNames, id); + } + setBlockCommitSequence(keyValueContainerData, bcsid); + return keyValueContainer; + } + + private void setBlockCommitSequence(KeyValueContainerData cData, long val) + throws IOException { + try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) { + metadataStore.getStore().getMetadataTable() + .put(cData.getBcsIdKey(), val); + metadataStore.getStore().flushDB(); + } + cData.updateBlockCommitSequenceId(val); + } + @ContainerTestVersionInfo.ContainerTest public void testMarkedDeletedContainerCleared( ContainerTestVersionInfo versionInfo) throws Exception { From 077e09b89e4d325243f0d29117a082d9da9a342f Mon Sep 17 00:00:00 2001 From: Christos Bisias Date: Wed, 20 Dec 2023 23:20:08 +0200 Subject: [PATCH 22/28] HDDS-9950. 'ozone fs -ls' on volume shows the volume owner as the bucket owner (#5816) --- .../fs/ozone/TestRootedOzoneFileSystem.java | 33 +++++++++++++++++++ .../BasicRootedOzoneClientAdapterImpl.java | 14 +++----- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 272a12a492b..c7d19678c91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -1012,6 +1012,39 @@ private void teardownVolumeBucketWithDir(Path bucketPath1) objectStore.deleteVolume(ofsPath.getVolumeName()); } + /** + * Create a bucket with a different owner than the volume owner + * and test the owner on listStatus. + */ + @Test + public void testListStatusWithDifferentBucketOwner() throws IOException { + String volName = getRandomNonExistVolumeName(); + objectStore.createVolume(volName); + OzoneVolume ozoneVolume = objectStore.getVolume(volName); + + String buckName = "bucket-" + RandomStringUtils.randomNumeric(5); + UserGroupInformation currUgi = UserGroupInformation.getCurrentUser(); + String bucketOwner = currUgi.getUserName() + RandomStringUtils.randomNumeric(5); + BucketArgs bucketArgs = BucketArgs.newBuilder() + .setOwner(bucketOwner) + .build(); + ozoneVolume.createBucket(buckName, bucketArgs); + + Path volPath = new Path(OZONE_URI_DELIMITER + volName); + + OzoneBucket ozoneBucket = ozoneVolume.getBucket(buckName); + + FileStatus[] fileStatusVolume = ofs.listStatus(volPath); + assertEquals(1, fileStatusVolume.length); + // FileStatus owner is different from the volume owner. + // Owner is the same as the bucket owner returned by the ObjectStore. + assertNotEquals(ozoneVolume.getOwner(), fileStatusVolume[0].getOwner()); + assertEquals(ozoneBucket.getOwner(), fileStatusVolume[0].getOwner()); + + ozoneVolume.deleteBucket(buckName); + objectStore.deleteVolume(volName); + } + /** * OFS: Test non-recursive listStatus on root and volume. */ diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 193e080f0e0..8e9a6f4b9fb 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -790,16 +790,12 @@ private List listStatusVolume(String volumeStr, OFSPath ofsStartPath = new OFSPath(startPath, config); // list buckets in the volume OzoneVolume volume = objectStore.getVolume(volumeStr); - UserGroupInformation ugi = - UserGroupInformation.createRemoteUser(volume.getOwner()); - String owner = ugi.getShortUserName(); - String group = getGroupName(ugi); Iterator iter = volume.listBuckets(null, ofsStartPath.getBucketName()); List res = new ArrayList<>(); while (iter.hasNext() && res.size() < numEntries) { OzoneBucket bucket = iter.next(); - res.add(getFileStatusAdapterForBucket(bucket, uri, owner, group)); + res.add(getFileStatusAdapterForBucket(bucket, uri)); if (recursive) { String pathStrNext = volumeStr + OZONE_URI_DELIMITER + bucket.getName(); res.addAll(listStatus(pathStrNext, recursive, startPath, @@ -1112,12 +1108,9 @@ private static FileStatusAdapter getFileStatusAdapterForVolume( * Generate a FileStatusAdapter for a bucket. * @param ozoneBucket OzoneBucket object. * @param uri Full URI to OFS root. - * @param owner Owner of the parent volume of the bucket. - * @param group Group of the parent volume of the bucket. * @return FileStatusAdapter for a bucket. */ - private static FileStatusAdapter getFileStatusAdapterForBucket( - OzoneBucket ozoneBucket, URI uri, String owner, String group) { + private static FileStatusAdapter getFileStatusAdapterForBucket(OzoneBucket ozoneBucket, URI uri) { String pathStr = uri.toString() + OZONE_URI_DELIMITER + ozoneBucket.getVolumeName() + OZONE_URI_DELIMITER + ozoneBucket.getName(); @@ -1127,6 +1120,9 @@ private static FileStatusAdapter getFileStatusAdapterForBucket( ozoneBucket.getName(), pathStr); } Path path = new Path(pathStr); + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(ozoneBucket.getOwner()); + String owner = ugi.getShortUserName(); + String group = getGroupName(ugi); return new FileStatusAdapter(0L, 0L, path, true, (short)0, 0L, ozoneBucket.getCreationTime().getEpochSecond() * 1000, 0L, FsPermission.getDirDefault().toShort(), From a2f3927c45028f4cb13bf828dd78c8f2ab18585d Mon Sep 17 00:00:00 2001 From: hmohamedansari <8217721+hmohamedansari@users.noreply.github.com> Date: Thu, 21 Dec 2023 14:09:49 +0530 Subject: [PATCH 23/28] HDDS-9368. Fix syntax error in run.sh for bash older than 4.2 (#5379) --- hadoop-ozone/dist/src/main/compose/ozone/run.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/ozone/run.sh b/hadoop-ozone/dist/src/main/compose/ozone/run.sh index 5bdb2996cb3..671d2cda6c9 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/run.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/run.sh @@ -21,7 +21,7 @@ ORIG_DATANODES="${OZONE_DATANODES:-}" ORIG_REPLICATION_FACTOR="${OZONE_REPLICATION_FACTOR:-}" # only support replication factor of 1 or 3 -if [[ -v OZONE_REPLICATION_FACTOR ]] && [[ ${OZONE_REPLICATION_FACTOR} -ne 1 ]] && [[ ${OZONE_REPLICATION_FACTOR} -ne 3 ]]; then +if [[ -n ${OZONE_REPLICATION_FACTOR} ]] && [[ ${OZONE_REPLICATION_FACTOR} -ne 1 ]] && [[ ${OZONE_REPLICATION_FACTOR} -ne 3 ]]; then # assume invalid replication factor was intended as "number of datanodes" if [[ -z ${ORIG_DATANODES} ]]; then OZONE_DATANODES=${OZONE_REPLICATION_FACTOR} @@ -30,22 +30,22 @@ if [[ -v OZONE_REPLICATION_FACTOR ]] && [[ ${OZONE_REPLICATION_FACTOR} -ne 1 ]] fi # at least 1 datanode -if [[ -v OZONE_DATANODES ]] && [[ ${OZONE_DATANODES} -lt 1 ]]; then +if [[ -n ${OZONE_DATANODES} ]] && [[ ${OZONE_DATANODES} -lt 1 ]]; then unset OZONE_DATANODES fi -if [[ -v OZONE_DATANODES ]] && [[ -v OZONE_REPLICATION_FACTOR ]]; then +if [[ -n ${OZONE_DATANODES} ]] && [[ -n ${OZONE_REPLICATION_FACTOR} ]]; then # ensure enough datanodes for replication factor if [[ ${OZONE_DATANODES} -lt ${OZONE_REPLICATION_FACTOR} ]]; then OZONE_DATANODES=${OZONE_REPLICATION_FACTOR} fi -elif [[ -v OZONE_DATANODES ]]; then +elif [[ -n ${OZONE_DATANODES} ]]; then if [[ ${OZONE_DATANODES} -ge 3 ]]; then OZONE_REPLICATION_FACTOR=3 else OZONE_REPLICATION_FACTOR=1 fi -elif [[ -v OZONE_REPLICATION_FACTOR ]]; then +elif [[ -n ${OZONE_REPLICATION_FACTOR} ]]; then OZONE_DATANODES=${OZONE_REPLICATION_FACTOR} else OZONE_DATANODES=1 From 7ce75794f476fa20cf78e7fba06a1642829d683e Mon Sep 17 00:00:00 2001 From: Duong Nguyen Date: Thu, 21 Dec 2023 01:57:19 -0800 Subject: [PATCH 24/28] HDDS-9582. OM transport factory configuration mismatch (#5834) --- .../om/protocolPB/OmTransportFactory.java | 33 +++--- .../om/protocolPB/TestOmTransportFactory.java | 101 ++++++++++++++++++ ...oop.ozone.om.protocolPB.OmTransportFactory | 15 --- ...oop.ozone.om.protocolPB.OmTransportFactory | 15 --- 4 files changed, 117 insertions(+), 47 deletions(-) create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestOmTransportFactory.java delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory delete mode 100644 hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java index 2ba8536e18b..a4fac2be50d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS_DEFAULT; @@ -31,6 +33,7 @@ * Factory pattern to create object for RPC communication with OM. */ public interface OmTransportFactory { + Logger LOG = LoggerFactory.getLogger(OmTransportFactory.class); OmTransport createOmTransport(ConfigurationSource source, UserGroupInformation ugi, String omServiceId) throws IOException; @@ -45,28 +48,24 @@ static OmTransport create(ConfigurationSource conf, static OmTransportFactory createFactory(ConfigurationSource conf) throws IOException { try { - // if configured transport class is different than the default - // OmTransportFactory (Hadoop3OmTransportFactory), then - // check service loader for transport class and instantiate it - if (conf - .get(OZONE_OM_TRANSPORT_CLASS, - OZONE_OM_TRANSPORT_CLASS_DEFAULT) != - OZONE_OM_TRANSPORT_CLASS_DEFAULT) { - ServiceLoader transportFactoryServiceLoader = - ServiceLoader.load(OmTransportFactory.class); - Iterator iterator = - transportFactoryServiceLoader.iterator(); - if (iterator.hasNext()) { - return iterator.next(); - } + // if a transport implementation is found via ServiceLoader, use it. + ServiceLoader transportFactoryServiceLoader = ServiceLoader.load(OmTransportFactory.class); + Iterator iterator = transportFactoryServiceLoader.iterator(); + if (iterator.hasNext()) { + OmTransportFactory next = iterator.next(); + LOG.info("Found OM transport implementation {} from service loader.", next.getClass().getName()); + return next; } + + // Otherwise, load the transport implementation specified by configuration. + String transportClassName = conf.get(OZONE_OM_TRANSPORT_CLASS, OZONE_OM_TRANSPORT_CLASS_DEFAULT); + LOG.info("Loading OM transport implementation {} as specified by configuration.", transportClassName); return OmTransportFactory.class.getClassLoader() - .loadClass(OZONE_OM_TRANSPORT_CLASS_DEFAULT) + .loadClass(transportClassName) .asSubclass(OmTransportFactory.class) .newInstance(); } catch (Exception ex) { - throw new IOException( - "Can't create the default OmTransport implementation", ex); + throw new IOException("Can't create the default OmTransport implementation", ex); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestOmTransportFactory.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestOmTransportFactory.java new file mode 100644 index 00000000000..fdc1239585b --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestOmTransportFactory.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.protocolPB; + + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; + +import java.io.IOException; +import java.util.Collections; +import java.util.ServiceLoader; + +import static java.util.Collections.singletonList; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + +/** + * Test OmTransportFactory. + */ +public class TestOmTransportFactory { + private OzoneConfiguration conf; + + @BeforeEach + void setUp() { + conf = new OzoneConfiguration(); + } + + @Test + public void testCreateFactoryFromServiceLoader() throws IOException { + OmTransportFactory dummyImpl = mock(OmTransportFactory.class); + ServiceLoader serviceLoader = mock(ServiceLoader.class); + when(serviceLoader.iterator()).thenReturn(singletonList(dummyImpl).iterator()); + + try (MockedStatic mocked = mockStatic(ServiceLoader.class)) { + mocked.when(() -> ServiceLoader.load(OmTransportFactory.class)).thenReturn(serviceLoader); + + OmTransportFactory factory = OmTransportFactory.createFactory(conf); + assertEquals(dummyImpl, factory); + } + } + + @Test + public void testCreateFactoryFromConfig() throws IOException { + ServiceLoader emptyLoader = mock(ServiceLoader.class); + when(emptyLoader.iterator()).thenReturn(Collections.emptyIterator()); + + try (MockedStatic mocked = mockStatic(ServiceLoader.class)) { + mocked.when(() -> ServiceLoader.load(OmTransportFactory.class)) + .thenReturn(emptyLoader); + + // Without anything in config, the default transport is returned. + OmTransportFactory factory = OmTransportFactory.createFactory(conf); + assertEquals(OZONE_OM_TRANSPORT_CLASS_DEFAULT, factory.getClass().getName()); + + // With concrete class name indicated in config. + conf.set(OZONE_OM_TRANSPORT_CLASS, MyDummyTransport.class.getName()); + OmTransportFactory factory2 = OmTransportFactory.createFactory(conf); + assertEquals(MyDummyTransport.class, factory2.getClass()); + + // With non-existing class name in the config, exception is expected. + conf.set(OZONE_OM_TRANSPORT_CLASS, "com.MyMadeUpClass"); + assertThrows(IOException.class, () -> { + OmTransportFactory.createFactory(conf); + }); + } + } + + static class MyDummyTransport implements OmTransportFactory { + + @Override + public OmTransport createOmTransport(ConfigurationSource source, + UserGroupInformation ugi, String omServiceId) throws IOException { + return null; + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory b/hadoop-ozone/s3gateway/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory deleted file mode 100644 index 254933bc827..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory diff --git a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory deleted file mode 100644 index 21669f5982a..00000000000 --- a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory From 8aac3e89f3a6517a9f599cdbeb7fdcaa22262495 Mon Sep 17 00:00:00 2001 From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com> Date: Thu, 21 Dec 2023 19:41:48 +0800 Subject: [PATCH 25/28] HDDS-9955. Simplify assertions in integration tests (#5846) --- .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 6 +++--- .../fs/ozone/TestRootedOzoneFileSystemWithFSO.java | 2 +- .../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 4 ++-- .../hadoop/hdds/scm/pipeline/TestPipelineClose.java | 5 +++-- .../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 12 ++++++------ .../client/rpc/TestOzoneRpcClientForAclAuditLog.java | 3 ++- .../hadoop/ozone/om/TestOmBlockVersioning.java | 3 +-- .../ozone/om/TestOzoneManagerListVolumesSecure.java | 2 +- .../org/apache/hadoop/ozone/om/TestScmSafeMode.java | 2 +- .../hadoop/ozone/om/TestSnapshotDeletingService.java | 2 +- .../ozone/scm/TestContainerReportWithKeys.java | 5 ++--- .../hadoop/ozone/scm/TestFailoverWithSCMHA.java | 4 ++-- .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java | 6 +++--- .../ozone/scm/TestStorageContainerManager.java | 2 +- .../hadoop/ozone/scm/TestXceiverClientManager.java | 6 +++--- .../ozone/scm/pipeline/TestSCMPipelineMetrics.java | 2 +- 16 files changed, 33 insertions(+), 33 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index c7d19678c91..73d1301f0f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -1919,9 +1919,9 @@ public void testFileDelete() throws Exception { ContractTestUtils.touch(fs, childFolderFile); } - assertTrue(fs.listStatus(grandparent).length == 1); - assertTrue(fs.listStatus(parent).length == 9); - assertTrue(fs.listStatus(childFolder).length == 8); + assertEquals(1, fs.listStatus(grandparent).length); + assertEquals(9, fs.listStatus(parent).length); + assertEquals(8, fs.listStatus(childFolder).length); Boolean successResult = fs.delete(grandparent, true); assertTrue(successResult); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java index 49da3e72188..73060701b75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java @@ -201,7 +201,7 @@ public void testDeleteVolumeAndBucket() throws IOException { assertTrue(getFs().delete(bucketPath2, true)); assertTrue(getFs().delete(volumePath1, false)); long deletes = getOMMetrics().getNumKeyDeletes(); - assertTrue(deletes == prevDeletes + 1); + assertEquals(prevDeletes + 1, deletes); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index a695038d444..725b17ee9d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -94,9 +94,9 @@ private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn) leaderCount.put(leader, leaderCount.get(leader) + 1); } - assertTrue(leaderCount.size() == dnNum); + assertEquals(dnNum, leaderCount.size()); for (Map.Entry entry: leaderCount.entrySet()) { - assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn); + assertEquals(leaderNumOfEachDn, leaderCount.get(entry.getKey())); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 99dd1d1768d..6d523b21df0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -63,6 +63,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests for Pipeline Closing. @@ -229,7 +230,7 @@ public void testPipelineCloseWithLogFailure() try { pipelineManager.getPipeline(openPipeline.getId()); } catch (PipelineNotFoundException e) { - assertTrue(false, "pipeline should exist"); + fail("pipeline should exist"); } DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0); @@ -275,6 +276,6 @@ private boolean verifyCloseForPipeline(Pipeline pipeline, } assertTrue(found, "SCM did not receive a Close action for the Pipeline"); - return found; + return true; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 1e5dc26d841..6d19c1ad381 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -634,7 +634,7 @@ public void testCreateBucketWithVersioning() volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); - assertEquals(true, bucket.getVersioning()); + assertTrue(bucket.getVersioning()); } @Test @@ -708,7 +708,7 @@ public void testCreateBucketWithAllArgument() volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); - assertEquals(true, bucket.getVersioning()); + assertTrue(bucket.getVersioning()); assertEquals(StorageType.SSD, bucket.getStorageType()); assertTrue(bucket.getAcls().contains(userAcl)); assertEquals(repConfig, bucket.getReplicationConfig()); @@ -812,7 +812,7 @@ public void testSetBucketVersioning() bucket.setVersioning(true); OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertEquals(true, newBucket.getVersioning()); + assertTrue(newBucket.getVersioning()); } @Test @@ -830,7 +830,7 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertEquals(true, newBucket.getVersioning()); + assertTrue(newBucket.getVersioning()); List aclsAfterSet = newBucket.getAcls(); assertEquals(currentAcls, aclsAfterSet); @@ -3796,7 +3796,7 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, assertTrue(latestVersionLocations.isMultipartKey()); latestVersionLocations.getBlocksLatestVersionOnly() .forEach(omKeyLocationInfo -> - assertTrue(omKeyLocationInfo.getPartNumber() != -1)); + assertNotEquals(-1, omKeyLocationInfo.getPartNumber())); } private String initiateMultipartUpload(OzoneBucket bucket, String keyName, @@ -3996,7 +3996,7 @@ public void testDeletedKeyForGDPR() throws Exception { assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG)); assertEquals("AES", key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM)); - assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null); + assertNotNull(key.getMetadata().get(OzoneConsts.GDPR_SECRET)); try (OzoneInputStream is = bucket.readKey(keyName)) { assertInputStreamContent(text, is); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index 3f7c590bf6e..d2ace27dc34 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -287,7 +288,7 @@ private void verifyLog(String... expected) throws Exception { try { // When log entry is expected, the log file will contain one line and // that must be equal to the expected string - assertTrue(lines.size() != 0); + assertNotEquals(0, lines.size()); for (String exp: expected) { assertTrue(lines.get(0).contains(exp)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 428bfa73059..7b8d6653d9b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -39,7 +39,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -144,7 +143,7 @@ public void testAllocateCommit() throws Exception { List locationInfoList = openKey.getKeyInfo().getLatestVersionLocations() .getBlocksLatestVersionOnly(); - assertTrue(locationInfoList.size() == 1); + assertEquals(1, locationInfoList.size()); locationInfoList.add(locationInfo); keyArgs.setLocationInfoList(locationInfoList); writeClient.commitKey(keyArgs, openKey.getId()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 977bb0d4e9b..1c751bc99a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -294,7 +294,7 @@ private static void doAs(UserGroupInformation ugi, Callable callable) { // Some thread (eg: HeartbeatEndpointTask) will use the login ugi, // so we could not use loginUserFromKeytabAndReturnUGI to switch user. - assertEquals(true, ugi.doAs((PrivilegedAction) () -> { + assertTrue(ugi.doAs((PrivilegedAction) () -> { try { return callable.call(); } catch (Throwable ex) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 1d3aaf351fc..d682c7f8f31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -272,7 +272,7 @@ public void testSCMSafeMode() throws Exception { scm = cluster.getStorageContainerManager(); assertTrue(scm.isInSafeMode()); assertFalse(logCapturer.getOutput().contains("SCM exiting safe mode.")); - assertTrue(scm.getCurrentContainerThreshold() == 0); + assertEquals(0, scm.getCurrentContainerThreshold()); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { dn.start(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 98c23b8076f..a14255d3c15 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -407,7 +407,7 @@ public void testSnapshotWithFSO() throws Exception { RepeatedOmKeyInfo activeDBDeleted = next.getValue(); OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager(); - assertEquals(activeDBDeleted.getOmKeyInfoList().size(), 1); + assertEquals(1, activeDBDeleted.getOmKeyInfoList().size()); OmKeyInfo activeDbDeletedKeyInfo = activeDBDeleted.getOmKeyInfoList().get(0); long volumeId = metadataManager diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java index bdbe4107fbe..a30e3db2218 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java @@ -140,9 +140,8 @@ public void testContainerReportKeyWrite() throws Exception { Set replicas = scm.getContainerManager().getContainerReplicas( ContainerID.valueOf(keyInfo.getContainerID())); - Assert.assertTrue(replicas.size() == 1); - replicas.stream().forEach(rp -> - Assert.assertTrue(rp.getDatanodeDetails().getParent() != null)); + Assert.assertEquals(1, replicas.size()); + replicas.stream().forEach(rp -> Assert.assertNotNull(rp.getDatanodeDetails().getParent())); LOG.info("SCM Container Info keyCount: {} usedBytes: {}", cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java index e1d1ba31d74..43a2e2603e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java @@ -115,7 +115,7 @@ public void testFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(15, scmClientConfig.getRetryCount()); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); assertNotNull(scm); @@ -161,7 +161,7 @@ public void testMoveFailover() throws Exception { scmClientConfig.setRetryCount(1); scmClientConfig.setRetryInterval(100); scmClientConfig.setMaxRetryTimeout(1500); - assertEquals(scmClientConfig.getRetryCount(), 15); + assertEquals(15, scmClientConfig.getRetryCount()); conf.setFromObject(scmClientConfig); StorageContainerManager scm = getLeader(cluster); assertNotNull(scm); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index a3314f59e7d..94019ed1d62 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -159,12 +159,12 @@ public void testSCMContainerStateCount() throws Exception { containerStateCount.forEach((k, v) -> { if (k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { - assertEquals((int)v, 5); + assertEquals(5, (int)v); } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) { - assertEquals((int)v, 5); + assertEquals(5, (int)v); } else { // Remaining all container state count should be zero. - assertEquals((int)v, 0); + assertEquals(0, (int)v); } }); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java index 6916e8cfb84..9286cfe521d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java @@ -1073,7 +1073,7 @@ public void testIncrementalContainerReportQueue() throws Exception { eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); - Assert.assertTrue(containerReportExecutors.droppedEvents() == 0); + Assert.assertEquals(0, containerReportExecutors.droppedEvents()); Thread.currentThread().sleep(3000); Assert.assertEquals(containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index 9130a87b1a5..ca85c5cf9e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -117,7 +117,7 @@ public void testCaching(boolean securityEnabled) throws IOException { clientManager.releaseClient(client1, true); clientManager.releaseClient(client2, true); clientManager.releaseClient(client3, true); - Assertions.assertTrue(clientManager.getClientCache().size() == 0); + Assertions.assertEquals(0, clientManager.getClientCache().size()); } } @@ -159,7 +159,7 @@ public void testFreeByReference() throws IOException { XceiverClientSpi nonExistent1 = cache.getIfPresent( container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType()); - Assertions.assertEquals(null, nonExistent1); + Assertions.assertNull(nonExistent1); // However container call should succeed because of refcount on the client ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null); @@ -218,7 +218,7 @@ public void testFreeByEviction() throws IOException { XceiverClientSpi nonExistent = cache.getIfPresent( container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType()); - Assertions.assertEquals(null, nonExistent); + Assertions.assertNull(nonExistent); // Any container operation should now fail Throwable t = Assertions.assertThrows(IOException.class, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java index 53f4ce5e16a..568d9679d17 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java @@ -109,7 +109,7 @@ public void testNumBlocksAllocated() throws IOException, TimeoutException { Pipeline pipeline = block.getPipeline(); long numBlocksAllocated = getLongCounter( SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), metrics); - Assertions.assertEquals(numBlocksAllocated, 1); + Assertions.assertEquals(1, numBlocksAllocated); // destroy the pipeline Assertions.assertDoesNotThrow(() -> From 5e32b70ce7ad187307ad040d25079cab318d849d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 21 Dec 2023 15:29:25 +0100 Subject: [PATCH 26/28] HDDS-9974. Add static import for assertions and mocks in hdds-client (#5843) --- .../hdds/scm/TestContainerClientMetrics.java | 4 +- .../scm/storage/TestBlockInputStream.java | 6 +- .../TestBlockOutputStreamCorrectness.java | 8 +- .../io/TestBlockInputStreamFactoryImpl.java | 16 +- .../client/io/TestECBlockInputStream.java | 109 ++++++------ .../io/TestECBlockInputStreamProxy.java | 68 ++++---- .../TestECBlockReconstructedInputStream.java | 32 ++-- ...ECBlockReconstructedStripeInputStream.java | 160 ++++++++---------- 8 files changed, 195 insertions(+), 208 deletions(-) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestContainerClientMetrics.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestContainerClientMetrics.java index a5a313eab02..5831314e54a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestContainerClientMetrics.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestContainerClientMetrics.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.util.Collections; import java.util.UUID; @@ -31,6 +30,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; /** * Test ContainerClientMetrics. @@ -108,7 +108,7 @@ public void testAcquireAndRelease() { private Pipeline createPipeline(PipelineID piplineId, UUID leaderId) { return Pipeline.newBuilder() .setId(piplineId) - .setReplicationConfig(Mockito.mock(ReplicationConfig.class)) + .setReplicationConfig(mock(ReplicationConfig.class)) .setState(Pipeline.PipelineState.OPEN) .setNodes(Collections.emptyList()) .setLeaderId(leaderId) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 2e95de1ecad..3dc5a82b335 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -35,13 +35,11 @@ import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.StatusException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Mockito; import org.mockito.stubbing.OngoingStubbing; import java.io.EOFException; @@ -92,7 +90,7 @@ public class TestBlockInputStream { @BeforeEach @SuppressWarnings("unchecked") public void setup() throws Exception { - refreshFunction = Mockito.mock(Function.class); + refreshFunction = mock(Function.class); BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); createChunkList(5); @@ -376,7 +374,7 @@ public void testReadNotRetriedOnOtherException(IOException ex) subject.initialize(); // WHEN - Assertions.assertThrows(ex.getClass(), + assertThrows(ex.getClass(), () -> subject.read(new byte[len], 0, len)); // THEN diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 29c0798df77..3d2ff00d64f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -45,9 +45,11 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * UNIT test for BlockOutputStream. @@ -93,8 +95,8 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) final Pipeline pipeline = MockPipeline.createRatisPipeline(); - final XceiverClientManager xcm = Mockito.mock(XceiverClientManager.class); - Mockito.when(xcm.acquireClient(Mockito.any())) + final XceiverClientManager xcm = mock(XceiverClientManager.class); + when(xcm.acquireClient(any())) .thenReturn(new MockXceiverClientSpi(pipeline)); OzoneClientConfig config = new OzoneClientConfig(); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java index abd69e5118c..cf3f4f13ef9 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java @@ -30,12 +30,14 @@ import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Assertions; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; + /** * Tests for BlockInputStreamFactoryImpl. */ @@ -53,9 +55,9 @@ public void testNonECGivesBlockInputStream() { BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), blockInfo.getToken(), true, null, null); - Assertions.assertTrue(stream instanceof BlockInputStream); - Assertions.assertEquals(stream.getBlockID(), blockInfo.getBlockID()); - Assertions.assertEquals(stream.getLength(), blockInfo.getLength()); + assertInstanceOf(BlockInputStream.class, stream); + assertEquals(stream.getBlockID(), blockInfo.getBlockID()); + assertEquals(stream.getLength(), blockInfo.getLength()); } @Test @@ -70,9 +72,9 @@ public void testECGivesECBlockInputStream() { BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), blockInfo.getToken(), true, null, null); - Assertions.assertTrue(stream instanceof ECBlockInputStreamProxy); - Assertions.assertEquals(stream.getBlockID(), blockInfo.getBlockID()); - Assertions.assertEquals(stream.getLength(), blockInfo.getLength()); + assertInstanceOf(ECBlockInputStreamProxy.class, stream); + assertEquals(stream.getBlockID(), blockInfo.getBlockID()); + assertEquals(stream.getLength(), blockInfo.getLength()); } private BlockLocationInfo createKeyLocationInfo(ReplicationConfig repConf, diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index caa071b1b9c..bd34e7546c1 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.scm.storage.ByteReaderStrategy; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.security.token.Token; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -45,7 +44,11 @@ import java.util.Map; import java.util.function.Function; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for ECBlockInputStream. @@ -71,14 +74,14 @@ public void testSufficientLocations() { .createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, new TestBlockInputStreamFactory())) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, very large block, so all 3 data locations are needed keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5000 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, new TestBlockInputStreamFactory())) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); } Map dnMap = new HashMap<>(); @@ -88,7 +91,7 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB - 1, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, new TestBlockInputStreamFactory())) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, 5MB blocks, only 2 locations passed so we do not have sufficient @@ -98,7 +101,7 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, new TestBlockInputStreamFactory())) { - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } // EC-3-2, 5MB blocks, only 1 data and 2 parity locations present. For now @@ -110,7 +113,7 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, new TestBlockInputStreamFactory())) { - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } } @@ -127,7 +130,7 @@ public void testCorrectBlockSizePassedToBlockStreamLessThanCell() // We expect only 1 block stream and it should have a length passed of // ONEMB - 100. List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(ONEMB - 100, streams.get(0).getLength()); + assertEquals(ONEMB - 100, streams.get(0).getLength()); } } @@ -142,8 +145,8 @@ public void testCorrectBlockSizePassedToBlockStreamTwoCells() keyInfo, true, null, null, streamFactory)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(ONEMB, streams.get(0).getLength()); - Assertions.assertEquals(100, streams.get(1).getLength()); + assertEquals(ONEMB, streams.get(0).getLength()); + assertEquals(100, streams.get(1).getLength()); } } @@ -158,9 +161,9 @@ public void testCorrectBlockSizePassedToBlockStreamThreeCells() keyInfo, true, null, null, streamFactory)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(ONEMB, streams.get(0).getLength()); - Assertions.assertEquals(ONEMB, streams.get(1).getLength()); - Assertions.assertEquals(100, streams.get(2).getLength()); + assertEquals(ONEMB, streams.get(0).getLength()); + assertEquals(ONEMB, streams.get(1).getLength()); + assertEquals(100, streams.get(2).getLength()); } } @@ -175,9 +178,9 @@ public void testCorrectBlockSizePassedToBlockStreamThreeFullAndPartialStripe() keyInfo, true, null, null, streamFactory)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(4 * ONEMB, streams.get(0).getLength()); - Assertions.assertEquals(3 * ONEMB + 100, streams.get(1).getLength()); - Assertions.assertEquals(3 * ONEMB, streams.get(2).getLength()); + assertEquals(4 * ONEMB, streams.get(0).getLength()); + assertEquals(3 * ONEMB + 100, streams.get(1).getLength()); + assertEquals(3 * ONEMB, streams.get(2).getLength()); } } @@ -192,7 +195,7 @@ public void testCorrectBlockSizePassedToBlockStreamSingleFullCell() keyInfo, true, null, null, streamFactory)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(ONEMB, streams.get(0).getLength()); + assertEquals(ONEMB, streams.get(0).getLength()); } } @@ -207,9 +210,9 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() keyInfo, true, null, null, streamFactory)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); - Assertions.assertEquals(3 * ONEMB, streams.get(0).getLength()); - Assertions.assertEquals(3 * ONEMB, streams.get(1).getLength()); - Assertions.assertEquals(3 * ONEMB, streams.get(2).getLength()); + assertEquals(3 * ONEMB, streams.get(0).getLength()); + assertEquals(3 * ONEMB, streams.get(1).getLength()); + assertEquals(3 * ONEMB, streams.get(2).getLength()); } } @@ -223,12 +226,12 @@ public void testSimpleRead() throws IOException { ByteBuffer buf = ByteBuffer.allocate(100); int read = ecb.read(buf); - Assertions.assertEquals(100, read); + assertEquals(100, read); validateBufferContents(buf, 0, 100, (byte) 0); - Assertions.assertEquals(100, ecb.getPos()); + assertEquals(100, ecb.getPos()); } for (TestBlockInputStream s : streamFactory.getBlockStreams()) { - Assertions.assertTrue(s.isClosed()); + assertTrue(s.isClosed()); } } @@ -246,12 +249,12 @@ public void testSimpleReadUnderOneChunk() throws IOException { ByteBuffer buf = ByteBuffer.allocate(100); int read = ecb.read(buf); - Assertions.assertEquals(100, read); + assertEquals(100, read); validateBufferContents(buf, 0, 100, (byte) 0); - Assertions.assertEquals(100, ecb.getPos()); + assertEquals(100, ecb.getPos()); } for (TestBlockInputStream s : streamFactory.getBlockStreams()) { - Assertions.assertTrue(s.isClosed()); + assertTrue(s.isClosed()); } } @@ -265,9 +268,9 @@ public void testReadPastEOF() throws IOException { ByteBuffer buf = ByteBuffer.allocate(100); int read = ecb.read(buf); - Assertions.assertEquals(50, read); + assertEquals(50, read); read = ecb.read(buf); - Assertions.assertEquals(read, -1); + assertEquals(read, -1); } } @@ -285,7 +288,7 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { // so 350 ByteBuffer buf = ByteBuffer.allocate(350); int read = ecb.read(buf); - Assertions.assertEquals(350, read); + assertEquals(350, read); validateBufferContents(buf, 0, 100, (byte) 0); validateBufferContents(buf, 100, 200, (byte) 1); @@ -294,7 +297,7 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { buf.clear(); read = ecb.read(buf); - Assertions.assertEquals(350, read); + assertEquals(350, read); validateBufferContents(buf, 0, 50, (byte) 0); validateBufferContents(buf, 50, 150, (byte) 1); @@ -303,7 +306,7 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { } for (TestBlockInputStream s : streamFactory.getBlockStreams()) { - Assertions.assertTrue(s.isClosed()); + assertTrue(s.isClosed()); } } @@ -341,8 +344,8 @@ public void testSeekToLengthZeroLengthBlock() throws IOException { try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, streamFactory)) { ecb.seek(0); - Assertions.assertEquals(0, ecb.getPos()); - Assertions.assertEquals(0, ecb.getRemaining()); + assertEquals(0, ecb.getPos()); + assertEquals(0, ecb.getRemaining()); } } @@ -355,23 +358,23 @@ public void testSeekToValidPosition() throws IOException { try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, keyInfo, true, null, null, streamFactory)) { ecb.seek(ONEMB - 1); - Assertions.assertEquals(ONEMB - 1, ecb.getPos()); - Assertions.assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); + assertEquals(ONEMB - 1, ecb.getPos()); + assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); // First read should read the last byte of the first chunk - Assertions.assertEquals(0, ecb.read()); - Assertions.assertEquals(ONEMB, + assertEquals(0, ecb.read()); + assertEquals(ONEMB, streamFactory.getBlockStreams().get(0).position); // Second read should be the first byte of the second chunk. - Assertions.assertEquals(1, ecb.read()); + assertEquals(1, ecb.read()); // Seek to the end of the file minus one byte ecb.seek(ONEMB * 5 - 1); - Assertions.assertEquals(1, ecb.read()); - Assertions.assertEquals(ONEMB * 2, + assertEquals(1, ecb.read()); + assertEquals(ONEMB * 2, streamFactory.getBlockStreams().get(1).position); // Second read should be EOF as there is no data left - Assertions.assertEquals(-1, ecb.read()); - Assertions.assertEquals(0, ecb.getRemaining()); + assertEquals(-1, ecb.read()); + assertEquals(0, ecb.getRemaining()); } } @@ -387,14 +390,14 @@ public void testErrorReadingBlockReportsBadLocation() throws IOException { // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); int read = ecb.read(buf); - Assertions.assertEquals(3 * ONEMB, read); + assertEquals(3 * ONEMB, read); // Now make replication index 2 error on the next read streamFactory.getBlockStreams().get(1).setThrowException(true); buf.clear(); BadDataLocationException e = assertThrows(BadDataLocationException.class, () -> ecb.read(buf)); - Assertions.assertEquals(1, e.getFailedLocations().size()); - Assertions.assertEquals(2, + assertEquals(1, e.getFailedLocations().size()); + assertEquals(2, keyInfo.getPipeline().getReplicaIndex(e.getFailedLocations().get(0))); } } @@ -418,13 +421,13 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); int read = ecb.read(buf); - Assertions.assertEquals(3 * ONEMB, read); + assertEquals(3 * ONEMB, read); // Now make replication index 1 error on the next read but as there is a // spare it should read from it with no errors streamFactory.getBlockStreams().get(0).setThrowException(true); buf.clear(); read = ecb.read(buf); - Assertions.assertEquals(3 * ONEMB, read); + assertEquals(3 * ONEMB, read); // Now make the spare one error on the next read, and we should get an // error with two failed locations. As each stream is created, a new @@ -439,11 +442,11 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { assertThrows(BadDataLocationException.class, () -> ecb.read(buf)); List failed = e.getFailedLocations(); // Expect 2 different DNs reported as failure - Assertions.assertEquals(2, failed.size()); - Assertions.assertNotEquals(failed.get(0), failed.get(1)); + assertEquals(2, failed.size()); + assertNotEquals(failed.get(0), failed.get(1)); // Both failures should map to index = 1. for (DatanodeDetails dn : failed) { - Assertions.assertEquals(1, datanodes.get(dn)); + assertEquals(1, datanodes.get(dn)); } } } @@ -484,17 +487,17 @@ public void testEcPipelineRefreshFunction() { .getPipeline(); // Check the pipeline is built with the correct Datanode // with right replicaIndex. - Assertions.assertEquals(HddsProtos.ReplicationType.STAND_ALONE, + assertEquals(HddsProtos.ReplicationType.STAND_ALONE, pipeline.getReplicationConfig().getReplicationType()); - Assertions.assertEquals(1, pipeline.getNodes().size()); - Assertions.assertEquals(3, dnMap.get(pipeline.getNodes().get(0))); + assertEquals(1, pipeline.getNodes().size()); + assertEquals(3, dnMap.get(pipeline.getNodes().get(0))); } } private void validateBufferContents(ByteBuffer buf, int from, int to, byte val) { for (int i = from; i < to; i++) { - Assertions.assertEquals(val, buf.get(i)); + assertEquals(val, buf.get(i)); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index 929fa13042e..e8ada43b08a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -24,11 +24,8 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +36,8 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.Function; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; /** @@ -46,9 +45,6 @@ */ public class TestECBlockInputStreamProxy { - private static final Logger LOG = - LoggerFactory.getLogger(TestECBlockInputStreamProxy.class); - private static final int ONEMB = 1024 * 1024; private ECReplicationConfig repConfig; private TestECBlockInputStreamFactory streamFactory; @@ -67,23 +63,23 @@ public void setup() { @Test public void testExpectedDataLocations() { - Assertions.assertEquals(1, + assertEquals(1, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 1)); - Assertions.assertEquals(2, + assertEquals(2, ECBlockInputStreamProxy.expectedDataLocations(repConfig, ONEMB + 1)); - Assertions.assertEquals(3, + assertEquals(3, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 3 * ONEMB)); - Assertions.assertEquals(3, + assertEquals(3, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 10 * ONEMB)); repConfig = new ECReplicationConfig(6, 3); - Assertions.assertEquals(1, + assertEquals(1, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 1)); - Assertions.assertEquals(2, + assertEquals(2, ECBlockInputStreamProxy.expectedDataLocations(repConfig, ONEMB + 1)); - Assertions.assertEquals(3, + assertEquals(3, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 3 * ONEMB)); - Assertions.assertEquals(6, + assertEquals(6, ECBlockInputStreamProxy.expectedDataLocations(repConfig, 10 * ONEMB)); } @@ -93,21 +89,21 @@ public void testAvailableDataLocations() { ECStreamTestUtil.createIndexMap(1, 2, 3, 4, 5); BlockLocationInfo blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1024, dnMap); - Assertions.assertEquals(1, ECBlockInputStreamProxy.availableDataLocations( + assertEquals(1, ECBlockInputStreamProxy.availableDataLocations( blockInfo.getPipeline(), 1)); - Assertions.assertEquals(2, ECBlockInputStreamProxy.availableDataLocations( + assertEquals(2, ECBlockInputStreamProxy.availableDataLocations( blockInfo.getPipeline(), 2)); - Assertions.assertEquals(3, ECBlockInputStreamProxy.availableDataLocations( + assertEquals(3, ECBlockInputStreamProxy.availableDataLocations( blockInfo.getPipeline(), 3)); dnMap = ECStreamTestUtil.createIndexMap(1, 4, 5); blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1024, dnMap); - Assertions.assertEquals(1, ECBlockInputStreamProxy.availableDataLocations( + assertEquals(1, ECBlockInputStreamProxy.availableDataLocations( blockInfo.getPipeline(), 3)); dnMap = ECStreamTestUtil.createIndexMap(2, 3, 4, 5); blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1024, dnMap); - Assertions.assertEquals(0, ECBlockInputStreamProxy.availableDataLocations( + assertEquals(0, ECBlockInputStreamProxy.availableDataLocations( blockInfo.getPipeline(), 1)); } @@ -122,7 +118,7 @@ public void testBlockIDCanBeRetrieved() throws IOException { ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { - Assertions.assertEquals(blockInfo.getBlockID(), bis.getBlockID()); + assertEquals(blockInfo.getBlockID(), bis.getBlockID()); } } @@ -137,7 +133,7 @@ public void testBlockLengthCanBeRetrieved() throws IOException { ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { - Assertions.assertEquals(1234, bis.getLength()); + assertEquals(1234, bis.getLength()); } } @@ -154,11 +150,11 @@ public void testBlockRemainingCanBeRetrieved() throws IOException { dataGenerator = new SplittableRandom(randomSeed); ByteBuffer readBuffer = ByteBuffer.allocate(100); try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { - Assertions.assertEquals(12345, bis.getRemaining()); - Assertions.assertEquals(0, bis.getPos()); + assertEquals(12345, bis.getRemaining()); + assertEquals(0, bis.getPos()); bis.read(readBuffer); - Assertions.assertEquals(12345 - 100, bis.getRemaining()); - Assertions.assertEquals(100, bis.getPos()); + assertEquals(12345 - 100, bis.getRemaining()); + assertEquals(100, bis.getPos()); } } @@ -176,8 +172,8 @@ public void testCorrectStreamCreatedDependingOnDataLocations() try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { // Not all locations present, so we expect on;y the "missing=true" stream // to be present. - Assertions.assertTrue(streamFactory.getStreams().containsKey(false)); - Assertions.assertFalse(streamFactory.getStreams().containsKey(true)); + assertThat(streamFactory.getStreams()).containsKey(false); + assertThat(streamFactory.getStreams()).doesNotContainKey(true); } streamFactory = new TestECBlockInputStreamFactory(); @@ -188,8 +184,8 @@ public void testCorrectStreamCreatedDependingOnDataLocations() try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { // Not all locations present, so we expect on;y the "missing=true" stream // to be present. - Assertions.assertFalse(streamFactory.getStreams().containsKey(false)); - Assertions.assertTrue(streamFactory.getStreams().containsKey(true)); + assertThat(streamFactory.getStreams()).doesNotContainKey(false); + assertThat(streamFactory.getStreams()).containsKey(true); } } @@ -217,7 +213,7 @@ public void testCanReadNonReconstructionToEOF() } readBuffer.clear(); int read = bis.read(readBuffer); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -245,7 +241,7 @@ public void testCanReadReconstructionToEOF() } readBuffer.clear(); int read = bis.read(readBuffer); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -267,7 +263,7 @@ public void testCanHandleErrorAndFailOverToReconstruction() try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { // Perform one read to get the stream created int read = bis.read(readBuffer); - Assertions.assertEquals(100, read); + assertEquals(100, read); ECStreamTestUtil.assertBufferMatches(readBuffer, dataGenerator); // Setup an error to be thrown part through a read, so the dataBuffer // will have been advanced by 50 bytes before the error. This tests it @@ -285,10 +281,10 @@ public void testCanHandleErrorAndFailOverToReconstruction() } readBuffer.clear(); read = bis.read(readBuffer); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); // Ensure the bad location was passed into the factory to create the // reconstruction reader - Assertions.assertEquals(badDN, streamFactory.getFailedLocations().get(0)); + assertEquals(badDN, streamFactory.getFailedLocations().get(0)); } } @@ -307,14 +303,14 @@ public void testCanSeekToNewPosition() throws IOException { try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { // Perform one read to get the stream created int read = bis.read(readBuffer); - Assertions.assertEquals(100, read); + assertEquals(100, read); bis.seek(1024); readBuffer.clear(); resetAndAdvanceDataGenerator(1024); bis.read(readBuffer); ECStreamTestUtil.assertBufferMatches(readBuffer, dataGenerator); - Assertions.assertEquals(1124, bis.getPos()); + assertEquals(1124, bis.getPos()); // Set the non-reconstruction reader to thrown an exception on seek streamFactory.getStreams().get(false).setShouldErrorOnSeek(true); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java index e39acaf9d23..0425f6943a4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -39,6 +38,7 @@ import java.util.concurrent.ThreadLocalRandom; import static org.apache.hadoop.ozone.client.io.ECStreamTestUtil.generateParity; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; /** @@ -87,7 +87,7 @@ public void testBlockLengthReturned() throws IOException { try (ECBlockReconstructedInputStream stream = new ECBlockReconstructedInputStream(repConfig, bufferPool, stripeStream)) { - Assertions.assertEquals(12345L, stream.getLength()); + assertEquals(12345L, stream.getLength()); } } } @@ -101,7 +101,7 @@ public void testBlockIDReturned() throws IOException { try (ECBlockReconstructedInputStream stream = new ECBlockReconstructedInputStream(repConfig, bufferPool, stripeStream)) { - Assertions.assertEquals(new BlockID(1, 1), stream.getBlockID()); + assertEquals(new BlockID(1, 1), stream.getBlockID()); } } } @@ -133,19 +133,19 @@ public void testReadDataByteBufferMultipleStripes() throws IOException { int expectedRead = Math.min(blockLength - totalRead, readBufferSize); long read = stream.read(b); totalRead += read; - Assertions.assertEquals(expectedRead, read); + assertEquals(expectedRead, read); ECStreamTestUtil.assertBufferMatches(b, dataGenerator); b.clear(); } // Next read should be EOF b.clear(); long read = stream.read(b); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); // Seek back to zero and read again to ensure the buffers are // re-allocated after being freed at the end of block. stream.seek(0); read = stream.read(b); - Assertions.assertEquals(readBufferSize, read); + assertEquals(readBufferSize, read); dataGenerator = new SplittableRandom(randomSeed); ECStreamTestUtil.assertBufferMatches(b, dataGenerator); } @@ -181,7 +181,7 @@ public void testReadDataWithUnbuffer() throws IOException { int expectedRead = Math.min(blockLength - totalRead, readBufferSize); long read = stream.read(b); totalRead += read; - Assertions.assertEquals(expectedRead, read); + assertEquals(expectedRead, read); ECStreamTestUtil.assertBufferMatches(b, dataGenerator); b.clear(); stream.unbuffer(); @@ -189,7 +189,7 @@ public void testReadDataWithUnbuffer() throws IOException { // Next read should be EOF b.clear(); long read = stream.read(b); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } } @@ -216,12 +216,12 @@ public void testReadDataByteBufferUnderBufferSize() throws IOException { ByteBuffer b = ByteBuffer.allocate(readBufferSize); dataGenerator = new SplittableRandom(randomSeed); long read = stream.read(b); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(b, dataGenerator); b.clear(); // Next read should be EOF read = stream.read(b); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } } @@ -253,10 +253,10 @@ public void testReadByteAtATime() throws IOException { if (val == -1) { break; } - Assertions.assertEquals(dataGenerator.nextInt(255), val); + assertEquals(dataGenerator.nextInt(255), val); totalRead += 1; } - Assertions.assertEquals(blockLength, totalRead); + assertEquals(blockLength, totalRead); } } } @@ -287,13 +287,13 @@ public void testReadByteBuffer() throws IOException { int expectedRead = Math.min(blockLength - totalRead, 1024); long read = stream.read(buf, 0, buf.length); totalRead += read; - Assertions.assertEquals(expectedRead, read); + assertEquals(expectedRead, read); ECStreamTestUtil.assertBufferMatches( ByteBuffer.wrap(buf, 0, (int)read), dataGenerator); } // Next read should be EOF long read = stream.read(buf, 0, buf.length); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } } @@ -325,7 +325,7 @@ public void testSeek() throws IOException { resetAndAdvanceDataGenerator(seekPosition); long expectedRead = Math.min(stream.getRemaining(), readBufferSize); long read = stream.read(b); - Assertions.assertEquals(expectedRead, read); + assertEquals(expectedRead, read); ECStreamTestUtil.assertBufferMatches(b, dataGenerator); seekPosition = random.nextInt(blockLength); stream.seek(seekPosition); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index 62d8c2d7602..c708fc28ddb 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -28,7 +28,6 @@ import org.apache.hadoop.ozone.client.io.ECStreamTestUtil.TestBlockInputStream; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -53,7 +52,10 @@ import static java.util.Collections.singleton; import static java.util.stream.Collectors.toSet; import static org.apache.hadoop.ozone.client.io.ECStreamTestUtil.generateParity; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test for the ECBlockReconstructedStripeInputStream. @@ -126,7 +128,7 @@ public void testSufficientLocations() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil .createKeyInfo(repConfig, 1, ONEMB); try (ECBlockInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); } // Two Chunks, but missing data block 2. Map dnMap @@ -134,16 +136,16 @@ public void testSufficientLocations() throws IOException { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB * 2, dnMap); try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); Collection idxs = dnMap.values(); for (int i : idxs) { ecb.setRecoveryIndexes(singleton(i - 1)); - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); } // trying to recover all ecb.setRecoveryIndexes(toBufferIndexes(idxs)); - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } // Three Chunks, but missing data block 2 and 3. @@ -151,19 +153,19 @@ public void testSufficientLocations() throws IOException { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB * 3, dnMap); try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); // Set a failed location List failed = new ArrayList<>(); failed.add(keyInfo.getPipeline().getFirstNode()); ecb.addFailedDatanodes(failed); - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } // Three Chunks, but missing data block 2 and 3 and parity 1. dnMap = ECStreamTestUtil.createIndexMap(1, 4); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB * 3, dnMap); try (ECBlockInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } // Three Chunks, all available but fail 3 @@ -171,7 +173,7 @@ public void testSufficientLocations() throws IOException { keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB * 3, dnMap); try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertTrue(ecb.hasSufficientLocations()); + assertTrue(ecb.hasSufficientLocations()); // Set a failed location List failed = new ArrayList<>(); for (Map.Entry entry : dnMap.entrySet()) { @@ -179,7 +181,7 @@ public void testSufficientLocations() throws IOException { boolean expected = failed.size() < 3; ecb.addFailedDatanodes(singleton(entry.getKey())); - Assertions.assertEquals(expected, ecb.hasSufficientLocations()); + assertEquals(expected, ecb.hasSufficientLocations()); } } @@ -190,7 +192,7 @@ public void testSufficientLocations() throws IOException { recover.add(i); ecb.setRecoveryIndexes(recover); boolean expected = recover.size() < 3; - Assertions.assertEquals(expected, ecb.hasSufficientLocations()); + assertEquals(expected, ecb.hasSufficientLocations()); } } @@ -200,7 +202,7 @@ public void testSufficientLocations() throws IOException { dnMap = ECStreamTestUtil.createIndexMap(2, 3); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB, dnMap); try (ECBlockInputStream ecb = createInputStream(keyInfo)) { - Assertions.assertFalse(ecb.hasSufficientLocations()); + assertFalse(ecb.hasSufficientLocations()); } } @@ -241,7 +243,7 @@ void testReadFullStripesWithPartial(Set recoveryIndexes) // Read 3 full stripes for (int i = 0; i < 3; i++) { int read = ecb.read(bufs); - Assertions.assertEquals(stripeSize(), read); + assertEquals(stripeSize(), read); int output = 0; for (int j = 0; j < repConfig.getRequiredNodes(); j++) { @@ -252,15 +254,15 @@ void testReadFullStripesWithPartial(Set recoveryIndexes) // Check the underlying streams have read 1 chunk per read: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(chunkSize * (i + 1), + assertEquals(chunkSize * (i + 1), bis.getPos()); } - Assertions.assertEquals(stripeSize() * (i + 1), ecb.getPos()); + assertEquals(stripeSize() * (i + 1), ecb.getPos()); clearBuffers(bufs); } // The next read is a partial stripe int read = ecb.read(bufs); - Assertions.assertEquals(partialStripeSize, read); + assertEquals(partialStripeSize, read); int output = 0; for (int j = 0; j < 2; j++) { if (outputIndexes.contains(j)) { @@ -268,14 +270,14 @@ void testReadFullStripesWithPartial(Set recoveryIndexes) } } if (outputIndexes.contains(2)) { - Assertions.assertEquals(0, bufs[output].remaining()); - Assertions.assertEquals(0, bufs[output].position()); + assertEquals(0, bufs[output].remaining()); + assertEquals(0, bufs[output].position()); } // A further read should give EOF clearBuffers(bufs); read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -301,21 +303,21 @@ public void testReadPartialStripe() throws IOException { try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { int read = ecb.read(bufs); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(bufs[0], dataGen); - Assertions.assertEquals(0, bufs[1].remaining()); - Assertions.assertEquals(0, bufs[1].position()); - Assertions.assertEquals(0, bufs[2].remaining()); - Assertions.assertEquals(0, bufs[2].position()); + assertEquals(0, bufs[1].remaining()); + assertEquals(0, bufs[1].position()); + assertEquals(0, bufs[2].remaining()); + assertEquals(0, bufs[2].position()); // Check the underlying streams have been advanced by 1 blockLength: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(blockLength, bis.getPos()); + assertEquals(blockLength, bis.getPos()); } - Assertions.assertEquals(ecb.getPos(), blockLength); + assertEquals(ecb.getPos(), blockLength); clearBuffers(bufs); // A further read should give EOF read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -344,18 +346,18 @@ void recoverPartialStripe() throws IOException { ecb.setRecoveryIndexes(Arrays.asList(3, 4)); int read = ecb.read(bufs); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(bufs[0], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[1], dataGen); // Check the underlying streams have been advanced by 1 blockLength: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(blockLength, bis.getPos()); + assertEquals(blockLength, bis.getPos()); } - Assertions.assertEquals(ecb.getPos(), blockLength); + assertEquals(ecb.getPos(), blockLength); clearBuffers(bufs); // A further read should give EOF read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -383,20 +385,20 @@ public void testReadPartialStripeTwoChunks() throws IOException { try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { int read = ecb.read(bufs); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(bufs[0], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[1], dataGen); - Assertions.assertEquals(0, bufs[2].remaining()); - Assertions.assertEquals(0, bufs[2].position()); + assertEquals(0, bufs[2].remaining()); + assertEquals(0, bufs[2].position()); // Check the underlying streams have been advanced by 1 chunk: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(chunkSize, bis.getPos()); + assertEquals(chunkSize, bis.getPos()); } - Assertions.assertEquals(ecb.getPos(), blockLength); + assertEquals(ecb.getPos(), blockLength); clearBuffers(bufs); // A further read should give EOF read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } @@ -440,19 +442,19 @@ public void testReadPartialStripeThreeChunks() throws IOException { try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { int read = ecb.read(bufs); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(bufs[0], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[1], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[2], dataGen); // Check the underlying streams have been advanced by 1 chunk: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(0, bis.getRemaining()); + assertEquals(0, bis.getRemaining()); } - Assertions.assertEquals(ecb.getPos(), blockLength); + assertEquals(ecb.getPos(), blockLength); clearBuffers(bufs); // A further read should give EOF read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } } @@ -479,14 +481,8 @@ public void testErrorThrownIfBlockNotLongEnough() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); - try (ECBlockReconstructedStripeInputStream ecb = - createInputStream(keyInfo)) { - try { - ecb.read(bufs); - Assertions.fail("Read should have thrown an exception"); - } catch (InsufficientLocationsException e) { - // expected - } + try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { + assertThrows(InsufficientLocationsException.class, () -> ecb.read(bufs)); } } @@ -538,19 +534,19 @@ void testNoErrorIfSpareLocationToRead() throws IOException { try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { int read = ecb.read(bufs); - Assertions.assertEquals(blockLength, read); + assertEquals(blockLength, read); ECStreamTestUtil.assertBufferMatches(bufs[0], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[1], dataGen); ECStreamTestUtil.assertBufferMatches(bufs[2], dataGen); // Check the underlying streams have been advanced by 1 chunk: for (TestBlockInputStream bis : streamFactory.getBlockStreams()) { - Assertions.assertEquals(0, bis.getRemaining()); + assertEquals(0, bis.getRemaining()); } - Assertions.assertEquals(ecb.getPos(), blockLength); + assertEquals(ecb.getPos(), blockLength); clearBuffers(bufs); // A further read should give EOF read = ecb.read(bufs); - Assertions.assertEquals(-1, read); + assertEquals(-1, read); } } } @@ -596,8 +592,8 @@ public void testSeek() throws IOException { for (int j = 0; j < bufs.length; j++) { validateContents(dataBufs[j], bufs[j], 0, chunkSize); } - Assertions.assertEquals(stripeSize(), read); - Assertions.assertEquals(dataLength - stripeSize(), ecb.getRemaining()); + assertEquals(stripeSize(), read); + assertEquals(dataLength - stripeSize(), ecb.getRemaining()); // Seek to 0 and read again clearBuffers(bufs); @@ -606,8 +602,8 @@ public void testSeek() throws IOException { for (int j = 0; j < bufs.length; j++) { validateContents(dataBufs[j], bufs[j], 0, chunkSize); } - Assertions.assertEquals(stripeSize(), read); - Assertions.assertEquals(dataLength - stripeSize(), ecb.getRemaining()); + assertEquals(stripeSize(), read); + assertEquals(dataLength - stripeSize(), ecb.getRemaining()); // Seek to the last stripe // Seek to the last stripe @@ -616,9 +612,9 @@ public void testSeek() throws IOException { read = ecb.read(bufs); validateContents(dataBufs[0], bufs[0], 3 * chunkSize, chunkSize); validateContents(dataBufs[1], bufs[1], 3 * chunkSize, chunkSize - 1); - Assertions.assertEquals(0, bufs[2].remaining()); - Assertions.assertEquals(partialStripeSize, read); - Assertions.assertEquals(0, ecb.getRemaining()); + assertEquals(0, bufs[2].remaining()); + assertEquals(partialStripeSize, read); + assertEquals(0, ecb.getRemaining()); // seek to the start of stripe 3 clearBuffers(bufs); @@ -627,8 +623,8 @@ public void testSeek() throws IOException { for (int j = 0; j < bufs.length; j++) { validateContents(dataBufs[j], bufs[j], 2 * chunkSize, chunkSize); } - Assertions.assertEquals(stripeSize(), read); - Assertions.assertEquals(partialStripeSize, ecb.getRemaining()); + assertEquals(stripeSize(), read); + assertEquals(partialStripeSize, ecb.getRemaining()); } } } @@ -641,15 +637,10 @@ public void testSeekToPartialOffsetFails() { stripeSize() * 3, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); - try (ECBlockReconstructedStripeInputStream ecb = - createInputStream(keyInfo)) { - try { - ecb.seek(10); - Assertions.fail("Seek should have thrown an exception"); - } catch (IOException e) { - Assertions.assertEquals("Requested position 10 does not align " + - "with a stripe offset", e.getMessage()); - } + try (ECBlockReconstructedStripeInputStream ecb = createInputStream(keyInfo)) { + IOException e = assertThrows(IOException.class, () -> ecb.seek(10)); + assertEquals("Requested position 10 does not align " + + "with a stripe offset", e.getMessage()); } } @@ -692,8 +683,8 @@ public void testErrorReadingBlockContinuesReading() throws IOException { for (int j = 0; j < bufs.length; j++) { validateContents(dataBufs[j], bufs[j], i * chunkSize, chunkSize); } - Assertions.assertEquals(stripeSize() * (i + 1), ecb.getPos()); - Assertions.assertEquals(stripeSize(), read); + assertEquals(stripeSize() * (i + 1), ecb.getPos()); + assertEquals(stripeSize(), read); clearBuffers(bufs); if (i == 0) { Integer failStream = @@ -705,11 +696,11 @@ public void testErrorReadingBlockContinuesReading() throws IOException { } // The next read is a partial stripe int read = ecb.read(bufs); - Assertions.assertEquals(partialStripeSize, read); + assertEquals(partialStripeSize, read); validateContents(dataBufs[0], bufs[0], 3 * chunkSize, chunkSize); validateContents(dataBufs[1], bufs[1], 3 * chunkSize, chunkSize - 1); - Assertions.assertEquals(0, bufs[2].remaining()); - Assertions.assertEquals(0, bufs[2].position()); + assertEquals(0, bufs[2].remaining()); + assertEquals(0, bufs[2].position()); // seek back to zero and read a stripe to re-open the streams ecb.seek(0); @@ -723,13 +714,8 @@ public void testErrorReadingBlockContinuesReading() throws IOException { Integer failStream = getRandomStreamIndex(currentStreams); streamFactory.getBlockStream(failStream) .setShouldError(true); - try { - clearBuffers(bufs); - ecb.read(bufs); - Assertions.fail("InsufficientLocationsException expected"); - } catch (InsufficientLocationsException e) { - // expected - } + clearBuffers(bufs); + assertThrows(InsufficientLocationsException.class, () -> ecb.read(bufs)); } } } @@ -808,13 +794,13 @@ public void testFailedLocationsAreNotRead() throws IOException { for (int j = 0; j < bufs.length; j++) { ECStreamTestUtil.assertBufferMatches(bufs[j], dataGen); } - Assertions.assertEquals(stripeSize(), read); + assertEquals(stripeSize(), read); // Now ensure that streams with repIndexes 1, 2 and 3 have not been // created in the stream factory, indicating we did not read them. List streams = streamFactory.getBlockStreams(); for (TestBlockInputStream stream : streams) { - Assertions.assertTrue(stream.getEcReplicaIndex() > 2); + assertTrue(stream.getEcReplicaIndex() > 2); } } } @@ -847,9 +833,9 @@ private void addDataStreamsToFactory(ByteBuffer[] data, ByteBuffer[] parity) { private void validateContents(ByteBuffer src, ByteBuffer data, int offset, int count) { byte[] srcArray = src.array(); - Assertions.assertEquals(count, data.remaining()); + assertEquals(count, data.remaining()); for (int i = offset; i < offset + count; i++) { - Assertions.assertEquals(srcArray[i], data.get(), "Element " + i); + assertEquals(srcArray[i], data.get(), "Element " + i); } data.flip(); } From 0008d9ab086428ecb00bf3b080835f8f8a9af9f4 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 21 Dec 2023 22:07:30 -0800 Subject: [PATCH 27/28] HDDS-9170. Replaced GenericTestUtils#assertExceptionContains with AssertJ#assertThat (#5844) --- .../scm/storage/TestChunkInputStream.java | 13 +- .../helpers/TestDatanodeVersionFile.java | 38 ++--- .../common/impl/TestContainerDataYaml.java | 49 +++--- .../common/impl/TestContainerSet.java | 16 +- .../TestCloseContainerCommandHandler.java | 44 +++-- .../keyvalue/TestKeyValueBlockIterator.java | 34 ++-- .../keyvalue/TestKeyValueContainer.java | 59 +++---- .../keyvalue/TestKeyValueHandler.java | 19 +-- .../impl/CommonChunkManagerTestCases.java | 32 ++-- .../TestDataNodeStartupSlvLessThanMlv.java | 18 +- .../hdds/scm/node/TestSCMNodeManager.java | 15 +- .../scm/safemode/TestSCMSafeModeManager.java | 102 +++--------- .../apache/ozone/test/GenericTestUtils.java | 41 ----- .../fs/ozone/AbstractOzoneFileSystemTest.java | 26 ++- .../fs/ozone/TestRootedOzoneFileSystem.java | 13 +- .../apache/hadoop/ozone/TestBlockTokens.java | 12 +- .../hadoop/ozone/TestSecureOzoneCluster.java | 23 +-- ...TestOzoneClientMultipartUploadWithFSO.java | 31 ++-- .../rpc/TestOzoneRpcClientAbstract.java | 155 +++++++----------- .../om/TestOzoneManagerConfiguration.java | 19 +-- .../hadoop/ozone/om/TestOzoneManagerHA.java | 15 +- .../om/TestOzoneManagerHAWithAllRunning.java | 9 +- .../ozone/om/TestOzoneManagerRestart.java | 22 +-- hadoop-ozone/interface-storage/pom.xml | 6 +- .../helpers/TestOmMultipartKeyInfoCodec.java | 6 +- .../om/helpers/TestTransactionInfoCodec.java | 16 +- .../ozone/om/failover/TestOMFailovers.java | 32 ++-- .../volume/TestOMVolumeCreateRequest.java | 14 +- .../ozone/s3/TestVirtualHostStyleFilter.java | 50 ++---- 29 files changed, 322 insertions(+), 607 deletions(-) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java index f45529412fe..a5de86a84f6 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java @@ -38,16 +38,16 @@ import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -160,14 +160,9 @@ public void testPartialChunkRead() throws Exception { @Test public void testSeek() throws Exception { seekAndVerify(0); + EOFException eofException = assertThrows(EOFException.class, () -> seekAndVerify(CHUNK_SIZE + 1)); + assertThat(eofException).hasMessage("EOF encountered at pos: " + (CHUNK_SIZE + 1) + " for chunk: " + CHUNK_NAME); - try { - seekAndVerify(CHUNK_SIZE + 1); - fail("Seeking to more than the length of Chunk should fail."); - } catch (EOFException e) { - GenericTestUtils.assertExceptionContains("EOF encountered at pos: " - + (CHUNK_SIZE + 1) + " for chunk: " + CHUNK_NAME, e); - } // Seek before read should update the ChunkInputStream#chunkPosition seekAndVerify(25); assertEquals(25, chunkStream.getChunkPosition()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java index d9dc7de6d98..90cd925611d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java @@ -20,7 +20,6 @@ import org.apache.hadoop.ozone.common.InconsistentStorageStateException; import org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -33,9 +32,10 @@ import java.util.Properties; import java.util.UUID; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests {@link DatanodeVersionFile}. @@ -92,15 +92,11 @@ public void testCreateAndReadVersionFile() throws IOException { } @Test - public void testIncorrectClusterId() throws IOException { - try { - String randomClusterID = UUID.randomUUID().toString(); - StorageVolumeUtil.getClusterID(properties, versionFile, - randomClusterID); - fail("Test failure in testIncorrectClusterId"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex); - } + public void testIncorrectClusterId() { + String randomClusterID = UUID.randomUUID().toString(); + InconsistentStorageStateException exception = assertThrows(InconsistentStorageStateException.class, + () -> StorageVolumeUtil.getClusterID(properties, versionFile, randomClusterID)); + assertThat(exception).hasMessageContaining("Mismatched ClusterIDs"); } @Test @@ -111,13 +107,9 @@ public void testVerifyCTime() throws IOException { dnVersionFile.createVersionFile(versionFile); properties = dnVersionFile.readFrom(versionFile); - try { - StorageVolumeUtil.getCreationTime(properties, versionFile); - fail("Test failure in testVerifyCTime"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid Creation time in " + - "Version File : " + versionFile, ex); - } + InconsistentStorageStateException exception = assertThrows(InconsistentStorageStateException.class, + () -> StorageVolumeUtil.getCreationTime(properties, versionFile)); + assertThat(exception).hasMessageContaining("Invalid Creation time in Version File : " + versionFile); } @Test @@ -127,12 +119,8 @@ public void testVerifyLayOut() throws IOException { storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion); dnVersionFile.createVersionFile(versionFile); Properties props = dnVersionFile.readFrom(versionFile); - - try { - StorageVolumeUtil.getLayOutVersion(props, versionFile); - fail("Test failure in testVerifyLayOut"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex); - } + InconsistentStorageStateException exception = assertThrows(InconsistentStorageStateException.class, + () -> StorageVolumeUtil.getLayOutVersion(props, versionFile)); + assertThat(exception).hasMessageContaining("Invalid layOutVersion."); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index 4bd2ece41eb..786c793b340 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; @@ -30,7 +31,6 @@ import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; -import org.apache.ozone.test.GenericTestUtils; import java.io.File; import java.io.IOException; @@ -39,12 +39,13 @@ import java.util.UUID; import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; - /** * This class tests create/read .container files. */ @@ -188,22 +189,18 @@ public void testCreateContainerFileWithoutReplicaIndex( cleanup(); } - @ContainerLayoutTestInfo.ContainerTest - public void testIncorrectContainerFile(ContainerLayoutVersion layout) - throws IOException { + public void testIncorrectContainerFile(ContainerLayoutVersion layout) { setLayoutVersion(layout); - try { - String containerFile = "incorrect.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - fail("testIncorrectContainerFile failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("No enum constant", ex); - } + String containerFile = "incorrect.container"; + + // Get file from resource folder + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource(containerFile).getFile()); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> ContainerDataYaml.readContainerFile(file)); + + assertThat(exception).hasMessageContaining("No enum constant"); } @@ -246,26 +243,24 @@ public void testCheckBackWardCompatibilityOfContainerFile( } /** - * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. + * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData,ConfigurationSource)}. */ @ContainerLayoutTestInfo.ContainerTest - public void testChecksumInContainerFile(ContainerLayoutVersion layout) - throws IOException { + public void testChecksumInContainerFile(ContainerLayoutVersion layout) throws IOException { setLayoutVersion(layout); long containerID = testContainerID++; File containerFile = createContainerFile(containerID, 0); // Read from .container file, and verify data. - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); + KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile(containerFile); ContainerUtils.verifyChecksum(kvData, conf); cleanup(); } /** - * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. + * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData,ConfigurationSource)}. */ @ContainerLayoutTestInfo.ContainerTest public void testChecksumInContainerFileWithReplicaIndex( @@ -297,14 +292,12 @@ private KeyValueContainerData getKeyValueContainerData() throws IOException { @ContainerLayoutTestInfo.ContainerTest public void testIncorrectChecksum(ContainerLayoutVersion layout) { setLayoutVersion(layout); - try { + Exception ex = assertThrows(Exception.class, () -> { KeyValueContainerData kvData = getKeyValueContainerData(); ContainerUtils.verifyChecksum(kvData, conf); - fail("testIncorrectChecksum failed"); - } catch (Exception ex) { - GenericTestUtils.assertExceptionContains("Container checksum error for " + - "ContainerID:", ex); - } + }); + + assertThat(ex).hasMessageStartingWith("Container checksum error for ContainerID:"); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 13b8fb6d30a..d0d3576c480 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -20,8 +20,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -29,7 +28,6 @@ import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.ozone.test.GenericTestUtils; import org.mockito.Mockito; import java.io.IOException; @@ -43,10 +41,12 @@ import java.util.UUID; import java.util.stream.LongStream; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -83,13 +83,9 @@ public void testAddGetRemoveContainer(ContainerLayoutVersion layout) //addContainer boolean result = containerSet.addContainer(keyValueContainer); assertTrue(result); - try { - containerSet.addContainer(keyValueContainer); - fail("Adding same container ID twice should fail."); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Container already exists with" + - " container Id " + containerId, ex); - } + StorageContainerException exception = assertThrows(StorageContainerException.class, + () -> containerSet.addContainer(keyValueContainer)); + assertThat(exception).hasMessage("Container already exists with container Id " + containerId); //getContainer KeyValueContainer container = (KeyValueContainer) containerSet diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 79107ce111e..0c526a2f204 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -35,12 +35,14 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Assertions; import java.io.IOException; import java.util.UUID; import static java.util.Collections.singletonMap; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; @@ -69,7 +71,7 @@ public class TestCloseContainerCommandHandler { private ContainerLayoutVersion layoutVersion; - public void initLayoutVerison(ContainerLayoutVersion layout) + public void initLayoutVersion(ContainerLayoutVersion layout) throws Exception { this.layoutVersion = layout; init(); @@ -106,7 +108,7 @@ private void init() throws Exception { @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithPipeline(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // close a container that's associated with an existing pipeline subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null); waitTillFinishExecution(subject); @@ -121,7 +123,7 @@ public void closeContainerWithPipeline(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithoutPipeline(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // close a container that's NOT associated with an open pipeline subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null); waitTillFinishExecution(subject); @@ -139,7 +141,7 @@ public void closeContainerWithoutPipeline(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void closeContainerWithForceFlagSet(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // close a container that's associated with an existing pipeline subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); waitTillFinishExecution(subject); @@ -153,7 +155,7 @@ public void closeContainerWithForceFlagSet(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void forceCloseQuasiClosedContainer(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // force-close a container that's already quasi closed container.getContainerData() .setState(ContainerProtos.ContainerDataProto.State.QUASI_CLOSED); @@ -170,7 +172,7 @@ public void forceCloseQuasiClosedContainer(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void forceCloseOpenContainer(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // force-close a container that's NOT associated with an open pipeline subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); waitTillFinishExecution(subject); @@ -186,7 +188,7 @@ public void forceCloseOpenContainer(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void forceCloseOpenContainerWithPipeline(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); // force-close a container that's associated with an existing pipeline subject.handle(forceCloseWithPipeline(), ozoneContainer, context, null); waitTillFinishExecution(subject); @@ -204,7 +206,7 @@ public void forceCloseOpenContainerWithPipeline(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void closeAlreadyClosedContainer(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); container.getContainerData() .setState(ContainerProtos.ContainerDataProto.State.CLOSED); @@ -226,32 +228,24 @@ public void closeAlreadyClosedContainer(ContainerLayoutVersion layout) } @ContainerLayoutTestInfo.ContainerTest - public void closeNonExistenceContainer(ContainerLayoutVersion layout) - throws Exception { - initLayoutVerison(layout); + public void closeNonExistenceContainer(ContainerLayoutVersion layout) throws Exception { + initLayoutVersion(layout); long containerID = 1L; - try { - controller.markContainerForClose(containerID); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("The Container " + - "is not found. ContainerID: " + containerID, e); - } + IOException ioe = Assertions.assertThrows(IOException.class, () -> controller.markContainerForClose(containerID)); + assertThat(ioe).hasMessage("The Container is not found. ContainerID: " + containerID); } @ContainerLayoutTestInfo.ContainerTest public void closeMissingContainer(ContainerLayoutVersion layout) throws Exception { - initLayoutVerison(layout); + initLayoutVersion(layout); long containerID = 2L; containerSet.getMissingContainerSet().add(containerID); - try { - controller.markContainerForClose(containerID); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("The Container is in " + - "the MissingContainerSet hence we can't close it. " + - "ContainerID: " + containerID, e); - } + + IOException ioe = Assertions.assertThrows(IOException.class, () -> controller.markContainerForClose(containerID)); + assertThat(ioe) + .hasMessage("The Container is in the MissingContainerSet hence we can't close it. ContainerID: " + containerID); } private CloseContainerCommand closeWithKnownPipeline() { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 6e8ad7196d8..52316c43264 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -54,8 +54,10 @@ import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.params.ParameterizedTest; @@ -169,12 +171,8 @@ public void testKeyValueBlockIteratorWithMixedBlocks( assertFalse(keyValueBlockIterator.hasNext()); assertFalse(blockIDIter.hasNext()); - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + CONTAINER_ID, ex); - } + NoSuchElementException exception = assertThrows(NoSuchElementException.class, keyValueBlockIterator::nextBlock); + assertThat(exception).hasMessage("Block Iterator reached end for ContainerID " + CONTAINER_ID); } } @@ -192,12 +190,8 @@ public void testKeyValueBlockIteratorWithNextBlock( assertEquals((long) blockIDs.get(1), keyValueBlockIterator.nextBlock().getLocalID()); - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + CONTAINER_ID, ex); - } + NoSuchElementException exception = assertThrows(NoSuchElementException.class, keyValueBlockIterator::nextBlock); + assertThat(exception).hasMessage("Block Iterator reached end for ContainerID " + CONTAINER_ID); } } @@ -208,8 +202,7 @@ public void testKeyValueBlockIteratorWithHasNext( throws Exception { initTest(versionInfo, keySeparator); List blockIDs = createContainerWithBlocks(CONTAINER_ID, 2); - try (BlockIterator blockIter = - db.getStore().getBlockIterator(CONTAINER_ID)) { + try (BlockIterator blockIter = db.getStore().getBlockIterator(CONTAINER_ID)) { // Even calling multiple times hasNext() should not move entry forward. assertTrue(blockIter.hasNext()); @@ -217,8 +210,7 @@ public void testKeyValueBlockIteratorWithHasNext( assertTrue(blockIter.hasNext()); assertTrue(blockIter.hasNext()); assertTrue(blockIter.hasNext()); - assertEquals((long) blockIDs.get(0), - blockIter.nextBlock().getLocalID()); + assertEquals((long) blockIDs.get(0), blockIter.nextBlock().getLocalID()); assertTrue(blockIter.hasNext()); assertTrue(blockIter.hasNext()); @@ -229,14 +221,10 @@ public void testKeyValueBlockIteratorWithHasNext( blockIter.seekToFirst(); assertEquals((long) blockIDs.get(0), blockIter.nextBlock().getLocalID()); - assertEquals((long)blockIDs.get(1), blockIter.nextBlock().getLocalID()); + assertEquals((long) blockIDs.get(1), blockIter.nextBlock().getLocalID()); - try { - blockIter.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + CONTAINER_ID, ex); - } + NoSuchElementException exception = assertThrows(NoSuchElementException.class, blockIter::nextBlock); + assertThat(exception).hasMessage("Block Iterator reached end for ContainerID " + CONTAINER_ID); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 9e2e2de6dbe..a841c0f38f0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -25,8 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -43,8 +42,7 @@ import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -92,9 +90,11 @@ import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.apache.ratis.util.Preconditions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.mockito.ArgumentMatchers.anyList; @@ -540,20 +540,14 @@ public void concurrentExport(ContainerTestVersionInfo versionInfo) } @ContainerTestVersionInfo.ContainerTest - public void testDuplicateContainer(ContainerTestVersionInfo versionInfo) - throws Exception { + public void testDuplicateContainer(ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - try { - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDuplicateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("ContainerFile already " + - "exists", ex); - assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex - .getResult()); - } + + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + StorageContainerException exception = assertThrows(StorageContainerException.class, () -> + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId)); + assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, exception.getResult()); + assertThat(exception).hasMessage("Container creation failed because ContainerFile already exists"); } @ContainerTestVersionInfo.ContainerTest @@ -563,14 +557,11 @@ public void testDiskFullExceptionCreateContainer( Mockito.reset(volumeChoosingPolicy); Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) .thenThrow(DiskChecker.DiskOutOfSpaceException.class); - try { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDiskFullExceptionCreateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("disk out of space", - ex); - assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult()); - } + + StorageContainerException exception = assertThrows(StorageContainerException.class, () -> + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId)); + assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, exception.getResult()); + assertThat(exception).hasMessage("Container creation failed, due to disk out of space"); } @ContainerTestVersionInfo.ContainerTest @@ -669,20 +660,20 @@ public void testUpdateContainer(ContainerTestVersionInfo versionInfo) public void testUpdateContainerUnsupportedRequest( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - try { - closeContainer(); + + closeContainer(); + + StorageContainerException exception = assertThrows(StorageContainerException.class, () -> { keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); Map metadata = new HashMap<>(); metadata.put(OzoneConsts.VOLUME, OzoneConsts.OZONE); keyValueContainer.update(metadata, false); - fail("testUpdateContainerUnsupportedRequest failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Updating a closed container " + - "without force option is not allowed", ex); - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex - .getResult()); - } + }); + + assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, exception.getResult()); + assertThat(exception) + .hasMessageStartingWith("Updating a closed container without force option is not allowed. ContainerID: "); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index a970013ef8c..c17ce8c7c97 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -57,9 +57,11 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -277,16 +279,13 @@ public void testVolumeSetInKeyValueHandler() throws Exception { //Set a class which is not of sub class of VolumeChoosingPolicy conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); - try { - new KeyValueHandler(conf, - context.getParent().getDatanodeDetails().getUuidString(), - cset, volumeSet, metrics, c -> { }); - } catch (RuntimeException ex) { - GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + - ".ozone.container.common.impl.HddsDispatcher not org.apache" + - ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy", - ex); - } + RuntimeException exception = assertThrows(RuntimeException.class, + () -> new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, + metrics, c -> { })); + + assertThat(exception).hasMessageEndingWith( + "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + + "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); } finally { volumeSet.shutdown(); FileUtil.fullyDelete(datanodeDir); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index ad859704946..6ad6936bccc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import java.io.File; @@ -37,6 +36,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMBINED_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -45,32 +45,20 @@ /** * Common test cases for ChunkManager implementation tests. */ -public abstract class CommonChunkManagerTestCases - extends AbstractTestChunkManager { +public abstract class CommonChunkManagerTestCases extends AbstractTestChunkManager { @Test public void testWriteChunkIncorrectLength() { - // GIVEN ChunkManager chunkManager = createTestSubject(); - try { - long randomLength = 200L; - BlockID blockID = getBlockID(); - ChunkInfo chunkInfo = new ChunkInfo( - String.format("%d.data.%d", blockID.getLocalID(), 0), - 0, randomLength); - - chunkManager.writeChunk(getKeyValueContainer(), blockID, chunkInfo, - getData(), - WRITE_STAGE); + long randomLength = 200L; + BlockID blockID = getBlockID(); + ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, randomLength); - // THEN - fail("testWriteChunkIncorrectLength failed"); - } catch (StorageContainerException ex) { - // As we got an exception, writeBytes should be 0. - checkWriteIOStats(0, 0); - GenericTestUtils.assertExceptionContains("Unexpected buffer size", ex); - assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult()); - } + StorageContainerException exception = assertThrows(StorageContainerException.class, + () -> chunkManager.writeChunk(getKeyValueContainer(), blockID, chunkInfo, getData(), WRITE_STAGE)); + checkWriteIOStats(0, 0); + assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, exception.getResult()); + assertThat(exception).hasMessageStartingWith("Unexpected buffer size"); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index e7d20028a6d..e9fef6ecfd6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion; import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_LAYOUT_VERSION_DIR; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.File; import java.io.IOException; @@ -32,8 +34,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -65,18 +65,12 @@ public void testStartupSlvLessThanMlv() throws Exception { UpgradeTestUtils.createVersionFile(datanodeSubdir, HddsProtos.NodeType.DATANODE, mlv); - try { - new DatanodeStateMachine(getNewDatanodeDetails(), conf); - Assertions.fail("Expected IOException due to incorrect MLV on DataNode " + - "creation."); - } catch (IOException e) { - String expectedMessage = String.format("Metadata layout version (%s) > " + - "software layout version (%s)", mlv, largestSlv); - GenericTestUtils.assertExceptionContains(expectedMessage, e); - } + IOException ioException = assertThrows(IOException.class, + () -> new DatanodeStateMachine(getNewDatanodeDetails(), conf)); + assertThat(ioException).hasMessageEndingWith( + String.format("Metadata layout version (%s) > software layout version (%s)", mlv, largestSlv)); } - private DatanodeDetails getNewDatanodeDetails() { DatanodeDetails.Port containerPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, 0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 333830a7d73..2f7663dcd40 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -42,8 +42,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.LayoutVersionProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; @@ -111,9 +110,11 @@ import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND_COUNT_UPDATED; import static org.apache.hadoop.hdds.scm.events.SCMEvents.NEW_NODE; import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; @@ -1123,13 +1124,11 @@ public void testCommandCount() * @throws IOException */ @Test - public void testScmCheckForErrorOnNullDatanodeDetails() - throws IOException, AuthenticationException { + public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, AuthenticationException { try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.processHeartbeat(null, null); - } catch (NullPointerException npe) { - GenericTestUtils.assertExceptionContains("Heartbeat is missing " + - "DatanodeDetails.", npe); + NullPointerException npe = assertThrows(NullPointerException.class, + () -> nodeManager.processHeartbeat(null, null)); + assertThat(npe).hasMessage("Heartbeat is missing DatanodeDetails."); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index ad59d323eb2..dea95cb8266 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -62,12 +62,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.mockito.Mockito; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** Test class for SCMSafeModeManager. */ @@ -238,87 +241,30 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck5() testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5); } - @Test - public void testFailWithIncorrectValueForHealthyPipelinePercent() - throws Exception { - try { - OzoneConfiguration conf = createConf(100, - 0.9); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = - PipelineManagerImpl.newPipelineManager( - conf, - SCMHAManagerStub.getInstance(true), - mockNodeManager, - scmMetadataStore.getPipelineTable(), - queue, - scmContext, - serviceManager, - Clock.system(ZoneOffset.UTC)); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, - scmContext); - fail("testFailWithIncorrectValueForHealthyPipelinePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); - } - } - - @Test - public void testFailWithIncorrectValueForOneReplicaPipelinePercent() - throws Exception { - try { - OzoneConfiguration conf = createConf(0.9, - 200); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = - PipelineManagerImpl.newPipelineManager( - conf, - SCMHAManagerStub.getInstance(true), - mockNodeManager, - scmMetadataStore.getPipelineTable(), - queue, - scmContext, - serviceManager, - Clock.system(ZoneOffset.UTC)); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, - scmContext); - fail("testFailWithIncorrectValueForOneReplicaPipelinePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); - } - } - - @Test - public void testFailWithIncorrectValueForSafeModePercent() throws Exception { - try { - OzoneConfiguration conf = createConf(0.9, 0.1); + @ParameterizedTest + @CsvSource(value = {"100,0.9,false", "0.9,200,false", "0.9,0.1,true"}) + public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, + double oneReplicaPercent, + boolean overrideScmSafeModeThresholdPct) throws Exception { + OzoneConfiguration conf = createConf(healthyPercent, oneReplicaPercent); + if (overrideScmSafeModeThresholdPct) { conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = - PipelineManagerImpl.newPipelineManager( - conf, - SCMHAManagerStub.getInstance(true), - mockNodeManager, - scmMetadataStore.getPipelineTable(), - queue, - scmContext, - serviceManager, - Clock.system(ZoneOffset.UTC)); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, - scmContext); - fail("testFailWithIncorrectValueForSafeModePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); } + MockNodeManager mockNodeManager = new MockNodeManager(true, 10); + PipelineManager pipelineManager = PipelineManagerImpl.newPipelineManager( + conf, + SCMHAManagerStub.getInstance(true), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + queue, + scmContext, + serviceManager, + Clock.system(ZoneOffset.UTC)); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> new SCMSafeModeManager(conf, containers, null, pipelineManager, queue, serviceManager, scmContext)); + assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } - public void testSafeModeExitRuleWithPipelineAvailabilityCheck( int containerCount, int nodeCount, int pipelineCount, double healthyPipelinePercent, double oneReplicaPercent) diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 856ac30a328..406a58768a8 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.OutputStream; import java.io.PrintStream; -import java.io.PrintWriter; import java.io.StringWriter; import java.io.UnsupportedEncodingException; import java.util.List; @@ -31,7 +30,6 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.log4j.Layout; import org.apache.log4j.Level; @@ -146,13 +144,6 @@ public static String getRandomizedTempPath() { + "-" + randomAlphanumeric(10)); } - /** - * Assert that a given file exists. - */ - public static void assertExists(File f) { - Assertions.assertTrue(f.exists(), "File " + f + " should exist"); - } - /** * Assert that a given dir can be created or it already exists. */ @@ -161,38 +152,6 @@ public static void assertDirCreation(File f) { "Could not create dir " + f + ", nor does it exist"); } - public static void assertExceptionContains(String expectedText, Throwable t) { - assertExceptionContains(expectedText, t, ""); - } - - public static void assertExceptionContains(String expectedText, Throwable t, - String message) { - Assertions.assertNotNull(t, "Null Throwable"); - String msg = t.toString(); - if (msg == null) { - throw new AssertionError("Null Throwable.toString() value", t); - } else if (expectedText != null && !msg.contains(expectedText)) { - String prefix = StringUtils.isEmpty(message) ? "" : message + ": "; - throw new AssertionError(String - .format("%s Expected to find '%s' %s: %s", prefix, expectedText, - "but got unexpected exception", - stringifyException(t)), t); - } - } - - /** - * Make a string representation of the exception. - * @param e The exception to stringify - * @return A string with exception name and call stack. - */ - public static String stringifyException(Throwable e) { - StringWriter stm = new StringWriter(); - PrintWriter wrt = new PrintWriter(stm); - e.printStackTrace(wrt); - wrt.close(); - return stm.toString(); - } - /** * Wait for the specified test to return true. The test will be performed * initially and then every {@code checkEveryMillis} until at least diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index ba55b2afcf7..38b0272ab30 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -101,6 +101,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -392,8 +393,8 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { // Creating a child should not add parent keys to the bucket try { getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); + } catch (OMException ome) { + assertEquals(KEY_NOT_FOUND, ome.getResult()); } // List status on the parent should show the child file @@ -412,8 +413,8 @@ public void testDeleteCreatesFakeParentDir() throws Exception { // Creating a child should not add parent keys to the bucket try { getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); + } catch (OMException ome) { + assertEquals(KEY_NOT_FOUND, ome.getResult()); } // Delete the child key @@ -1355,10 +1356,6 @@ private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) .getBucket(bucketName).getKey(key); } - private void assertKeyNotFoundException(IOException ex) { - GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex); - } - @Test public void testGetDirectoryModificationTime() throws IOException, InterruptedException { @@ -1585,14 +1582,11 @@ public void testListStatusOnLargeDirectoryForACLCheck() throws Exception { paths.add(keyName + OM_KEY_PREFIX + p.getName()); } - // unknown keyname - try { - new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey", - cluster.getOzoneManager().getKeyManager()); - fail("Non-existent key name!"); - } catch (OMException ome) { - assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult()); - } + // unknown keyName + OMException ome = assertThrows(OMException.class, + () -> new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey", + cluster.getOzoneManager().getKeyManager())); + assertEquals(KEY_NOT_FOUND, ome.getResult()); OzonePrefixPathImpl ozonePrefixPath = new OzonePrefixPathImpl(getVolumeName(), getBucketName(), keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 73d1301f0f6..c82c521a14c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -350,8 +350,8 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { // Creating a child should not add parent keys to the bucket try { getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); + } catch (OMException ome) { + assertEquals(KEY_NOT_FOUND, ome.getResult()); } // List status on the parent should show the child file @@ -421,10 +421,11 @@ public void testDeleteCreatesFakeParentDir() throws Exception { // Creating a child should not add parent keys to the bucket try { getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); + } catch (OMException ome) { + assertEquals(KEY_NOT_FOUND, ome.getResult()); } + // Delete the child key assertTrue(fs.delete(child, false)); @@ -971,10 +972,6 @@ private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) .getBucket(bucketName).getKey(keyInBucket); } - private void assertKeyNotFoundException(IOException ex) { - GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex); - } - /** * Helper function for testListStatusRootAndVolume*. * Each call creates one volume, one bucket under that volume, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index f410c50c466..e028e6741e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -90,8 +90,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import static org.apache.ozone.test.GenericTestUtils.assertExceptionContains; import static org.apache.ozone.test.GenericTestUtils.waitFor; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -101,8 +101,7 @@ */ @InterfaceAudience.Private public final class TestBlockTokens { - private static final Logger LOG = LoggerFactory - .getLogger(TestBlockTokens.class); + private static final Logger LOG = LoggerFactory.getLogger(TestBlockTokens.class); private static final String TEST_VOLUME = "testvolume"; private static final String TEST_BUCKET = "testbucket"; private static final String TEST_FILE = "testfile"; @@ -207,8 +206,7 @@ public void blockTokenFailsOnExpiredSecretKey() throws Exception { StorageContainerException ex = assertThrows(StorageContainerException.class, () -> readDataWithoutRetry(keyInfo)); assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); - assertExceptionContains( - "Token can't be verified due to expired secret key", ex); + assertThat(ex).hasMessageContaining("Token can't be verified due to expired secret key"); } @Test @@ -254,7 +252,7 @@ public void blockTokenFailsOnWrongSecretKeyId() throws Exception { assertThrows(StorageContainerException.class, () -> readDataWithoutRetry(keyInfo)); assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); - assertExceptionContains("Can't find the signing secret key", ex); + assertThat(ex).hasMessageContaining("Can't find the signing secret key"); } @Test @@ -277,7 +275,7 @@ public void blockTokenFailsOnWrongPassword() throws Exception { assertThrows(StorageContainerException.class, () -> readDataWithoutRetry(keyInfo)); assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); - assertExceptionContains("Invalid token for user", ex); + assertThat(ex).hasMessageContaining("Invalid token for user"); } private UUID extractSecretKeyId(OmKeyInfo keyInfo) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 717a2fafa19..05b70154235 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -149,6 +149,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_MISMATCH; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import org.apache.ozone.test.LambdaTestUtils; @@ -176,6 +177,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -838,21 +840,12 @@ public void testGetSetRevokeS3Secret() throws Exception { OmTransportFactory.create(conf, ugiNonAdmin, null), RandomStringUtils.randomAscii(5)); - try { - omClientNonAdmin.getS3Secret("HADOOP/JOHN"); - // Expected to fail because current ugi isn't an admin - fail("non-admin getS3Secret didn't fail as intended"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex); - } - - try { - omClientNonAdmin.revokeS3Secret("HADOOP/DOE"); - // Expected to fail because current ugi isn't an admin - fail("non-admin revokeS3Secret didn't fail as intended"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex); - } + OMException omException = assertThrows(OMException.class, + () -> omClientNonAdmin.getS3Secret("HADOOP/JOHN")); + assertSame(USER_MISMATCH, omException.getResult()); + omException = assertThrows(OMException.class, + () -> omClientNonAdmin.revokeS3Secret("HADOOP/DOE")); + assertSame(USER_MISMATCH, omException.getResult()); } finally { if (scm != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 6eaf051ba45..015c57a0251 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -60,13 +60,14 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import java.io.IOException; import java.nio.file.Path; @@ -83,7 +84,9 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -732,26 +735,14 @@ public void testListMultipartUploadPartsWithContinuation() } - @Test - public void testListPartsInvalidPartMarker() throws Exception { - try { - bucket.listParts(keyName, "random", -1, 2); - Assertions.fail("Should throw exception as partNumber is an invalid number!"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Should be greater than or " - + "equal to zero", ex); - } - } + @ParameterizedTest + @CsvSource(value = {"-1,2,Should be greater than or equal to zero", + "1,-1,Max Parts Should be greater than zero"}) + public void testListPartsWithInvalidInputs(int partNumberMarker, int maxParts, String expectedErrorMessage) { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> bucket.listParts(keyName, "random", partNumberMarker, maxParts)); - @Test - public void testListPartsInvalidMaxParts() throws Exception { - try { - bucket.listParts(keyName, "random", 1, -1); - Assertions.fail("Should throw exception as max parts is an invalid number!"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Max Parts Should be greater " - + "than zero", ex); - } + assertThat(exception).hasMessageContaining(expectedErrorMessage); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 6d19c1ad381..1321fd0e826 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -145,12 +145,14 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -615,11 +617,9 @@ public void testDeleteS3Bucket() @Test public void testDeleteS3NonExistingBucket() { - try { - store.deleteS3Bucket(UUID.randomUUID().toString()); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("NOT_FOUND", ex); - } + OMException omException = assertThrows(OMException.class, () -> store.deleteS3Bucket(UUID.randomUUID().toString())); + assertSame(ResultCodes.BUCKET_NOT_FOUND, omException.getResult()); + assertThat(omException).hasMessage("Bucket not found"); } @Test @@ -900,8 +900,7 @@ public void testDeleteBucket() } @Test - public void testDeleteLinkedBucket() - throws Exception { + public void testDeleteLinkedBucket() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String linkedBucketName = UUID.randomUUID().toString(); @@ -1040,65 +1039,51 @@ public void testCheckUsedBytesQuota() throws IOException { int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); int valueLength = value.getBytes(UTF_8).length; - int countException = 0; store.createVolume(volumeName); volume = store.getVolume(volumeName); volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - bucket.setQuota(OzoneQuota.parseQuota("1 B", "100")); - - // Test bucket quota. - bucketName = UUID.randomUUID().toString(); - volume.createBucket(bucketName); - bucket = volume.getBucket(bucketName); + final OzoneBucket bucket = volume.getBucket(bucketName); bucket.setQuota(OzoneQuota.parseQuota("1 B", "100")); store.getVolume(volumeName).setQuota( OzoneQuota.parseQuota(Long.MAX_VALUE + " B", "100")); // Test bucket quota: write key. - // The remaining quota does not satisfy a block size, so the write fails. - try { - writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } + // The remaining quota does not satisfy a block size, so the writing fails. + + OMException omException = assertThrows(OMException.class, + () -> writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength)); + assertSame(ResultCodes.QUOTA_EXCEEDED, omException.getResult()); // Write failed, bucket usedBytes should be 0 assertEquals(0L, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); // Test bucket quota: write file. // The remaining quota does not satisfy a block size, so the write fails. - try { - writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } + + omException = assertThrows(OMException.class, + () -> writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0)); + assertSame(ResultCodes.QUOTA_EXCEEDED, omException.getResult()); // Write failed, bucket usedBytes should be 0 - assertEquals(0L, - store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); + assertEquals(0L, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); // Test bucket quota: write large key(with five blocks), the first four // blocks will succeed,while the later block will fail. bucket.setQuota(OzoneQuota.parseQuota( 4 * blockSize + " B", "100")); - try { + + IOException ioException = assertThrows(IOException.class, () -> { String keyName = UUID.randomUUID().toString(); - OzoneOutputStream out = bucket.createKey(keyName, - valueLength, RATIS, ONE, new HashMap<>()); - for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { - out.write(value.getBytes(UTF_8)); + try (OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS, ONE, new HashMap<>())) { + for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { + out.write(value.getBytes(UTF_8)); + } } - out.close(); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } + }); + assertThat(ioException).hasCauseInstanceOf(OMException.class).hasMessageContaining("QUOTA_EXCEEDED"); + // AllocateBlock failed, bucket usedBytes should not update. - assertEquals(0L, - store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); + assertEquals(0L, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); // Reset bucket quota, the original usedBytes needs to remain the same bucket.setQuota(OzoneQuota.parseQuota( @@ -1106,8 +1091,6 @@ public void testCheckUsedBytesQuota() throws IOException { assertEquals(0, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); - assertEquals(3, countException); - // key with 0 bytes, usedBytes should not increase. bucket.setQuota(OzoneQuota.parseQuota( 5 * blockSize + " B", "100")); @@ -1385,11 +1368,9 @@ public void testVolumeUsedNamespace() throws IOException { volume = store.getVolume(volumeName); assertEquals(1L, volume.getUsedNamespace()); - try { - volume.createBucket(bucketName2); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } + OzoneVolume finalVolume = volume; + OMException omException = assertThrows(OMException.class, () -> finalVolume.createBucket(bucketName2)); + assertEquals(ResultCodes.QUOTA_EXCEEDED, omException.getResult()); // test linked bucket String targetVolName = UUID.randomUUID().toString(); @@ -1913,14 +1894,13 @@ public void testReadKeyWithCorruptedData() throws IOException { // Try reading the key. Since the chunk file is corrupted, it should // throw a checksum mismatch exception. - try { + IOException ioException = assertThrows(IOException.class, () -> { try (OzoneInputStream is = bucket.readKey(keyName)) { is.read(new byte[100]); } - fail("Reading corrupted data should fail."); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("Checksum mismatch", e); - } + }); + + assertThat(ioException).hasMessageContaining("Checksum mismatch"); } // Make this executed at last, for it has some side effect to other UTs @@ -2090,14 +2070,15 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { fail("Reading corrupted data should not fail."); } corruptData(containerList.get(2), key); - // Try reading the key. Read will fail here as all the replica are corrupt - try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] b = new byte[data.length]; - is.read(b); - fail("Reading corrupted data should fail."); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("Checksum mismatch", e); - } + // Try reading the key. Read will fail here as all the replicas are corrupt + + IOException ioException = assertThrows(IOException.class, () -> { + try (OzoneInputStream is = bucket.readKey(keyName)) { + byte[] b = new byte[data.length]; + is.read(b); + } + }); + assertThat(ioException).hasMessageContaining("Checksum mismatch"); } private void corruptData(Container container, OzoneKey key) @@ -3305,46 +3286,22 @@ void testListMultipartUploadPartsWithContinuation( } - @Test - public void testListPartsInvalidPartMarker() throws Exception { - try { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, "random", -1, 2); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Should be greater than or " + - "equal to zero", ex); - } - } - - @Test - public void testListPartsInvalidMaxParts() throws Exception { - try { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); + @ParameterizedTest + @CsvSource(value = {"-1,1,Should be greater than or equal to zero", "2,-1,Max Parts Should be greater than zero"}) + public void testListPartsInvalidInput(int partNumberMarker, int maxParts, String exceptedMessage) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> bucket.listParts(keyName, "random", partNumberMarker, maxParts)); - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, "random", 1, -1); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Max Parts Should be greater " + - "than zero", ex); - } + assertThat(exception).hasMessageContaining(exceptedMessage); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 8f75e568057..89acf321e39 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -44,8 +44,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -335,10 +338,9 @@ public void testOMHAWithUnresolvedAddresses() throws Exception { /** * Test a wrong configuration for OM HA. A configuration with none of the * OM addresses matching the local address should throw an error. - * @throws Exception */ @Test - public void testWrongConfiguration() throws Exception { + public void testWrongConfiguration() { String omServiceId = "om-service-test1"; String omNode1Id = "omNode1"; @@ -360,14 +362,9 @@ public void testWrongConfiguration() throws Exception { conf.set(omNode2RpcAddrKey, "125.0.0.2:9862"); conf.set(omNode3RpcAddrKey, "124.0.0.124:9862"); - try { - startCluster(); - fail("Wrong Configuration. OM initialization should have failed."); - } catch (OzoneIllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Configuration has no " + - OMConfigKeys.OZONE_OM_ADDRESS_KEY + " address that matches local " + - "node's address.", e); - } + OzoneIllegalArgumentException exception = assertThrows(OzoneIllegalArgumentException.class, this::startCluster); + assertThat(exception).hasMessage( + "Configuration has no " + OZONE_OM_ADDRESS_KEY + " address that matches local node's address."); } /** @@ -479,7 +476,7 @@ public void testMultipleOMServiceIds() throws Exception { } private String getOMAddrKeyWithSuffix(String serviceId, String nodeId) { - return ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, + return ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, serviceId, nodeId); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 7830d699653..c18d1f8b17a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -66,6 +66,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -336,12 +337,11 @@ protected void createVolumeTest(boolean checkSuccess) throws Exception { // ConnectException. Otherwise, we would get a RemoteException from the // last running OM as it would fail to get a quorum. if (e instanceof RemoteException) { - GenericTestUtils.assertExceptionContains("OMNotLeaderException", e); + assertThat(e).hasMessageContaining("is not the leader"); } else if (e instanceof ConnectException) { - GenericTestUtils.assertExceptionContains("Connection refused", e); + assertThat(e).hasMessageContaining("Connection refused"); } else { - GenericTestUtils.assertExceptionContains( - "Could not determine or connect to OM Leader", e); + assertThat(e).hasMessageContaining("Could not determine or connect to OM Leader"); } } else { throw e; @@ -445,12 +445,11 @@ protected void createKeyTest(boolean checkSuccess) throws Exception { // ConnectException. Otherwise, we would get a RemoteException from the // last running OM as it would fail to get a quorum. if (e instanceof RemoteException) { - GenericTestUtils.assertExceptionContains("OMNotLeaderException", e); + assertThat(e).hasMessageContaining("is not the leader"); } else if (e instanceof ConnectException) { - GenericTestUtils.assertExceptionContains("Connection refused", e); + assertThat(e).hasMessageContaining("Connection refused"); } else { - GenericTestUtils.assertExceptionContains( - "Could not determine or connect to OM Leader", e); + assertThat(e).hasMessageContaining("Could not determine or connect to OM Leader"); } } else { throw e; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index 8d933912c55..0a8c5a67860 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import com.google.protobuf.ServiceException; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.ClientVersion; @@ -28,6 +29,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ha.OMProxyInfo; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; @@ -73,6 +75,7 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -385,10 +388,10 @@ public void testFailoverWithSuggestedLeader() throws Exception { OzoneManagerProtocolServerSideTranslatorPB omServerProtocol = followerOM.getOmServerProtocol(); - Exception ex = assertThrows(Exception.class, + ServiceException ex = assertThrows(ServiceException.class, () -> omServerProtocol.submitRequest(null, writeRequest)); - GenericTestUtils.assertExceptionContains("Suggested leader is OM:" + - leaderOMNodeId + "[" + leaderOMAddress + "]", ex); + assertThat(ex).hasCauseInstanceOf(OMNotLeaderException.class) + .hasMessageEndingWith("Suggested leader is OM:" + leaderOMNodeId + "[" + leaderOMAddress + "]."); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index c95bb1e35fa..feccd5b30eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; @@ -49,11 +48,13 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Test some client operations after cluster starts. And perform restart and @@ -122,12 +123,8 @@ public void testRestartOMWithVolumeOperation() throws Exception { cluster.restartStorageContainerManager(true); // After restart, try to create same volume again, it should fail. - try { - objectStore.createVolume(volumeName); - fail("testRestartOM failed"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("VOLUME_ALREADY_EXISTS", ex); - } + OMException ome = assertThrows(OMException.class, () -> objectStore.createVolume(volumeName)); + assertEquals(VOLUME_ALREADY_EXISTS, ome.getResult()); // Get Volume. ozoneVolume = objectStore.getVolume(volumeName); @@ -157,12 +154,9 @@ public void testRestartOMWithBucketOperation() throws Exception { cluster.restartStorageContainerManager(true); // After restart, try to create same bucket again, it should fail. - try { - ozoneVolume.createBucket(bucketName); - fail("testRestartOMWithBucketOperation failed"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("BUCKET_ALREADY_EXISTS", ex); - } + // After restart, try to create same volume again, it should fail. + OMException ome = assertThrows(OMException.class, () -> ozoneVolume.createBucket(bucketName)); + assertEquals(BUCKET_ALREADY_EXISTS, ome.getResult()); // Get bucket. ozoneBucket = ozoneVolume.getBucket(bucketName); diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index dc6a53c1ad7..e3271730c27 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -69,7 +69,11 @@ hdds-test-utils test - + + org.apache.ozone + hdds-hadoop-dependency-test + test + diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java index 68a49b0ce50..31846c44a7f 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -30,6 +29,7 @@ import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; /** * Test {@link OmMultipartKeyInfo#getCodec()}. @@ -72,11 +72,9 @@ public void testOmMultipartKeyInfoCodec() { try { codec.fromPersistedFormat("random".getBytes(UTF_8)); } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Can't encode the the raw " + - "data from the byte array", ex); + assertThat(ex).hasMessage("Can't encode the the raw data from the byte array"); } catch (java.io.IOException e) { e.printStackTrace(); } - } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java index 717758c0ac4..6f9520de765 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java @@ -20,13 +20,13 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test {@link TransactionInfo#getCodec()}. @@ -52,13 +52,9 @@ public void toAndFromPersistedFormat() throws Exception { } @Test - public void testInvalidProtocolBuffer() throws Exception { - try { - getCodec().fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8)); - fail("testInvalidProtocolBuffer failed"); - } catch (IllegalArgumentException e) { - GenericTestUtils.assertExceptionContains( - "Incorrect TransactionInfo value", e); - } + public void testInvalidProtocolBuffer() { + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, + () -> getCodec().fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8))); + assertThat(ex).hasMessage("Incorrect TransactionInfo value"); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java index 27bed51a148..75c27f70d52 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java @@ -42,6 +42,8 @@ import org.junit.jupiter.api.Test; import org.slf4j.event.Level; +import static org.assertj.core.api.Assertions.assertThat; + /** * Tests OM failover protocols using a Mock Failover provider and a Mock OM * Protocol. @@ -68,26 +70,16 @@ public void testAccessContorlExceptionFailovers() throws Exception { failoverProxyProvider.getRetryPolicy( OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT)); - try { - proxy.submitRequest(null, null); - Assertions.fail("Request should fail with AccessControlException"); - } catch (Exception ex) { - Assertions.assertTrue(ex instanceof ServiceException); - - // Request should try all OMs one be one and fail when the last OM also - // throws AccessControlException. - GenericTestUtils.assertExceptionContains("ServiceException of " + - "type class org.apache.hadoop.security.AccessControlException for " + - "om3", ex); - Assertions.assertTrue(ex.getCause() instanceof AccessControlException); - - Assertions.assertTrue( - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1"))); - Assertions.assertTrue( - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2"))); - Assertions.assertTrue( - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3"))); - } + ServiceException serviceException = Assertions.assertThrows(ServiceException.class, + () -> proxy.submitRequest(null, null)); + + // Request should try all OMs one be one and fail when the last OM also + // throws AccessControlException. + assertThat(serviceException).hasCauseInstanceOf(AccessControlException.class) + .hasMessage("ServiceException of type class org.apache.hadoop.security.AccessControlException for om3"); + Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1"))); + Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2"))); + Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3"))); } private String getRetryProxyDebugMsg(String omNodeId) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index 2687ade3d39..708565b2486 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.when; @@ -77,15 +77,11 @@ public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount() OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, txLogIndex); Assertions.assertTrue(omClientResponse instanceof OMVolumeCreateResponse); - OMVolumeCreateResponse respone = - (OMVolumeCreateResponse) omClientResponse; - Assertions.assertEquals(expectedObjId, respone.getOmVolumeArgs() - .getObjectID()); - Assertions.assertEquals(txLogIndex, - respone.getOmVolumeArgs().getUpdateID()); + OMVolumeCreateResponse response = (OMVolumeCreateResponse) omClientResponse; + Assertions.assertEquals(expectedObjId, response.getOmVolumeArgs().getObjectID()); + Assertions.assertEquals(txLogIndex, response.getOmVolumeArgs().getUpdateID()); } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("should be greater than zero", - ex); + assertThat(ex).hasMessage("should be greater than zero"); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 81f492c3e47..be9c1f7fd39 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -19,20 +19,22 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.ozone.test.GenericTestUtils; import org.glassfish.jersey.internal.PropertiesDelegate; import org.glassfish.jersey.server.ContainerRequest; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.mockito.Mockito; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.SecurityContext; import java.net.URI; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * This class test virtual host style mapping conversion to path style. @@ -199,41 +201,15 @@ public void testVirtualHostStyleWithQueryParams() throws Exception { } - @Test - public void testVirtualHostStyleWithNoMatchingDomain() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".myhost:9999", null, null, true); - try { - virtualHostStyleFilter.filter(containerRequest); - fail("testVirtualHostStyleWithNoMatchingDomain"); - } catch (InvalidRequestException ex) { - GenericTestUtils.assertExceptionContains("No matching domain", ex); - } - - } - - @Test - public void testIncorrectVirtualHostStyle() throws - Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); + @ParameterizedTest + @CsvSource(value = {"mybucket.myhost:9999,No matching domain", "mybucketlocalhost:9878,invalid format"}) + public void testVirtualHostStyleWithInvalidInputs(String hostAddress, + String expectErrorMessage) throws Exception { + VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter(); virtualHostStyleFilter.setConfiguration(conf); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - "localhost:9878", null, null, true); - try { - virtualHostStyleFilter.filter(containerRequest); - fail("testIncorrectVirtualHostStyle failed"); - } catch (InvalidRequestException ex) { - GenericTestUtils.assertExceptionContains("invalid format", ex); - } - + ContainerRequest containerRequest = createContainerRequest(hostAddress, null, null, true); + InvalidRequestException exception = assertThrows(InvalidRequestException.class, + () -> virtualHostStyleFilter.filter(containerRequest)); + assertThat(exception).hasMessageContaining(expectErrorMessage); } - } From 4cda788c7f311e8cc90283a02c96f0734af79742 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 21 Dec 2023 22:08:38 -0800 Subject: [PATCH 28/28] HDDS-9986. Log if there is a failure in closing RocksDB --- .../main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java | 4 ++-- .../org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 65e891f9820..71cd3716e56 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -226,12 +226,12 @@ public void close() throws IOException { } RDBMetrics.unRegister(); - IOUtils.closeQuietly(checkPointManager); + IOUtils.close(LOG, checkPointManager); if (rocksDBCheckpointDiffer != null) { RocksDBCheckpointDifferHolder .invalidateCacheEntry(rocksDBCheckpointDiffer.getMetadataDir()); } - IOUtils.closeQuietly(db); + IOUtils.close(LOG, db); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 97a7a0608d4..d8932c0e7e0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -169,8 +169,8 @@ public ReferenceCounted get(String key, // does not exist, and increment the reference count on the instance. ReferenceCounted rcOmSnapshot = dbMap.compute(key, (k, v) -> { - LOG.info("Loading snapshot. Table key: {}", k); if (v == null) { + LOG.info("Loading snapshot. Table key: {}", k); try { v = new ReferenceCounted<>(cacheLoader.load(k), false, this); } catch (OMException omEx) { @@ -317,7 +317,7 @@ private void cleanupInternal() { Preconditions.checkState(rcOmSnapshot == result, "Cache map entry removal failure. The cache is in an inconsistent " + "state. Expected OmSnapshot instance: " + rcOmSnapshot - + ", actual: " + result); + + ", actual: " + result + " for key: " + key); pendingEvictionList.remove(result);