Skip to content

Commit

Permalink
HDDS-12081. TestKeyInputStream repeats tests with default container l…
Browse files Browse the repository at this point in the history
…ayout (#7704)
  • Loading branch information
adoroszlai authored Jan 24, 2025
1 parent b6cc4af commit 1bd721b
Show file tree
Hide file tree
Showing 5 changed files with 88 additions and 94 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ private static void assertFileCount(File dir, long count) {
}

/**
* Composite annotation for tests parameterized with {@link ContainerLayoutTestInfo}.
* Composite annotation for tests parameterized with {@link ContainerLayoutVersion}.
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@

import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.io.KeyInputStream;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
import org.apache.hadoop.ozone.om.TestBucket;
import org.junit.jupiter.api.TestInstance;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
Expand All @@ -36,6 +37,7 @@
/**
* Tests {@link ChunkInputStream}.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class TestChunkInputStream extends TestInputStreamBase {

/**
Expand All @@ -44,16 +46,14 @@ class TestChunkInputStream extends TestInputStreamBase {
*/
@ContainerLayoutTestInfo.ContainerTest
void testAll(ContainerLayoutVersion layout) throws Exception {
try (MiniOzoneCluster cluster = newCluster(layout)) {
cluster.waitForClusterToBeReady();
try (OzoneClient client = getCluster().newClient()) {
updateConfig(layout);

try (OzoneClient client = cluster.newClient()) {
TestBucket bucket = TestBucket.newBuilder(client).build();
TestBucket bucket = TestBucket.newBuilder(client).build();

testChunkReadBuffers(bucket);
testBufferRelease(bucket);
testCloseReleasesBuffers(bucket);
}
testChunkReadBuffers(bucket);
testBufferRelease(bucket);
testCloseReleasesBuffers(bucket);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,22 @@
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;

// TODO remove this class, set config as default in integration tests
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
abstract class TestInputStreamBase {

static final int CHUNK_SIZE = 1024 * 1024; // 1MB
Expand All @@ -42,8 +49,7 @@ abstract class TestInputStreamBase {
static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; // 8MB
static final int BYTES_PER_CHECKSUM = 256 * 1024; // 256KB

protected static MiniOzoneCluster newCluster(
ContainerLayoutVersion containerLayout) throws Exception {
protected static MiniOzoneCluster newCluster() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();

OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
Expand All @@ -57,8 +63,6 @@ protected static MiniOzoneCluster newCluster(
conf.setQuietMode(false);
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64,
StorageUnit.MB);
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY,
containerLayout.toString());

ReplicationManagerConfiguration repConf =
conf.getObject(ReplicationManagerConfiguration.class);
Expand All @@ -81,4 +85,38 @@ static String getNewKeyName() {
return UUID.randomUUID().toString();
}

protected void updateConfig(ContainerLayoutVersion layout) {
cluster.getHddsDatanodes().forEach(dn -> dn.getConf().setEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, layout));
closeContainers();
}

private MiniOzoneCluster cluster;

protected MiniOzoneCluster getCluster() {
return cluster;
}

@BeforeAll
void setup() throws Exception {
cluster = newCluster();
cluster.waitForClusterToBeReady();
}

@AfterAll
void cleanup() {
IOUtils.closeQuietly(cluster);
}

private void closeContainers() {
StorageContainerManager scm = cluster.getStorageContainerManager();
scm.getContainerManager().getContainers().forEach(container -> {
if (container.isOpen()) {
try {
TestHelper.waitForContainerClose(getCluster(), container.getContainerID());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream;
import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.io.KeyInputStream;
import org.apache.hadoop.ozone.common.utils.BufferUtils;
Expand All @@ -46,15 +45,16 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;

import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.ozone.container.TestHelper.countReplicas;
import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
Expand All @@ -63,6 +63,8 @@
/**
* Tests {@link KeyInputStream}.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
class TestKeyInputStream extends TestInputStreamBase {

/**
Expand Down Expand Up @@ -123,20 +125,18 @@ private void validate(TestBucket bucket, KeyInputStream keyInputStream,
*/
@ContainerLayoutTestInfo.ContainerTest
void testNonReplicationReads(ContainerLayoutVersion layout) throws Exception {
try (MiniOzoneCluster cluster = newCluster(layout)) {
cluster.waitForClusterToBeReady();

try (OzoneClient client = cluster.newClient()) {
TestBucket bucket = TestBucket.newBuilder(client).build();

testInputStreams(bucket);
testSeekRandomly(bucket);
testSeek(bucket);
testReadChunkWithByteArray(bucket);
testReadChunkWithByteBuffer(bucket);
testSkip(bucket);
testECSeek(bucket);
}
try (OzoneClient client = getCluster().newClient()) {
updateConfig(layout);

TestBucket bucket = TestBucket.newBuilder(client).build();

testInputStreams(bucket);
testSeekRandomly(bucket);
testSeek(bucket);
testReadChunkWithByteArray(bucket);
testReadChunkWithByteBuffer(bucket);
testSkip(bucket);
testECSeek(bucket);
}
}

Expand Down Expand Up @@ -379,32 +379,18 @@ private void testSkip(TestBucket bucket) throws Exception {
}
}

private static List<Arguments> readAfterReplicationArgs() {
return Arrays.asList(
Arguments.arguments(FILE_PER_BLOCK, false),
Arguments.arguments(FILE_PER_BLOCK, true),
Arguments.arguments(FILE_PER_CHUNK, false),
Arguments.arguments(FILE_PER_CHUNK, true)
);
}

@ParameterizedTest
@MethodSource("readAfterReplicationArgs")
void readAfterReplication(ContainerLayoutVersion layout,
boolean doUnbuffer) throws Exception {
try (MiniOzoneCluster cluster = newCluster(layout)) {
cluster.waitForClusterToBeReady();

try (OzoneClient client = cluster.newClient()) {
TestBucket bucket = TestBucket.newBuilder(client).build();
@ValueSource(booleans = {false, true})
@Order(Integer.MAX_VALUE) // shuts down datanodes
void readAfterReplication(boolean doUnbuffer) throws Exception {
try (OzoneClient client = getCluster().newClient()) {
TestBucket bucket = TestBucket.newBuilder(client).build();

testReadAfterReplication(cluster, bucket, doUnbuffer);
}
testReadAfterReplication(bucket, doUnbuffer);
}
}

private void testReadAfterReplication(MiniOzoneCluster cluster,
TestBucket bucket, boolean doUnbuffer) throws Exception {
private void testReadAfterReplication(TestBucket bucket, boolean doUnbuffer) throws Exception {
int dataLength = 2 * CHUNK_SIZE;
String keyName = getNewKeyName();
byte[] data = bucket.writeRandomBytes(keyName, dataLength);
Expand All @@ -415,7 +401,7 @@ private void testReadAfterReplication(MiniOzoneCluster cluster,
.setKeyName(keyName)
.setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager()
OmKeyInfo keyInfo = getCluster().getOzoneManager()
.getKeyInfo(keyArgs, false)
.getKeyInfo();

Expand All @@ -425,32 +411,20 @@ private void testReadAfterReplication(MiniOzoneCluster cluster,
assertEquals(1, locationInfoList.size());
OmKeyLocationInfo loc = locationInfoList.get(0);
long containerID = loc.getContainerID();
assertEquals(3, countReplicas(containerID, cluster));
assertEquals(3, countReplicas(containerID, getCluster()));

TestHelper.waitForContainerClose(cluster, containerID);
TestHelper.waitForContainerClose(getCluster(), containerID);

List<DatanodeDetails> pipelineNodes = loc.getPipeline().getNodes();

// read chunk data
try (KeyInputStream keyInputStream = bucket.getKeyInputStream(keyName)) {
int b = keyInputStream.read();
assertNotEquals(-1, b);
if (doUnbuffer) {
keyInputStream.unbuffer();
}
cluster.shutdownHddsDatanode(pipelineNodes.get(0));
// check that we can still read it
assertReadFully(data, keyInputStream, dataLength - 1, 1);
}

// read chunk data with ByteBuffer
try (KeyInputStream keyInputStream = bucket.getKeyInputStream(keyName)) {
int b = keyInputStream.read();
assertNotEquals(-1, b);
if (doUnbuffer) {
keyInputStream.unbuffer();
}
cluster.shutdownHddsDatanode(pipelineNodes.get(0));
getCluster().shutdownHddsDatanode(pipelineNodes.get(0));
// check that we can still read it
assertReadFullyUsingByteBuffer(data, keyInputStream, dataLength - 1, 1);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.ContainerID;
Expand All @@ -41,7 +40,6 @@
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
Expand All @@ -51,8 +49,7 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;

import java.io.IOException;
import java.time.Duration;
Expand All @@ -61,7 +58,6 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Stream;

import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.SECONDS;
Expand All @@ -74,12 +70,9 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.params.provider.Arguments.arguments;

/**
* Tests FinalizeBlock.
Expand All @@ -94,16 +87,7 @@ public class TestFinalizeBlock {
private static String volumeName = UUID.randomUUID().toString();
private static String bucketName = UUID.randomUUID().toString();

public static Stream<Arguments> dnLayoutParams() {
return Stream.of(
arguments(false, FILE_PER_CHUNK),
arguments(true, FILE_PER_CHUNK),
arguments(false, FILE_PER_BLOCK),
arguments(true, FILE_PER_BLOCK)
);
}

private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throws Exception {
private void setup(boolean enableSchemaV3) throws Exception {
conf = new OzoneConfiguration();
conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
Expand All @@ -116,7 +100,6 @@ private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throw
conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
conf.setBoolean(CONTAINER_SCHEMA_V3_ENABLED, enableSchemaV3);
conf.setEnum(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, version);

DatanodeConfiguration datanodeConfiguration = conf.getObject(
DatanodeConfiguration.class);
Expand Down Expand Up @@ -150,10 +133,9 @@ public void shutdown() {
}

@ParameterizedTest
@MethodSource("dnLayoutParams")
public void testFinalizeBlock(boolean enableSchemaV3, ContainerLayoutVersion version)
throws Exception {
setup(enableSchemaV3, version);
@ValueSource(booleans = {false, true})
public void testFinalizeBlock(boolean enableSchemaV3) throws Exception {
setup(enableSchemaV3);
String keyName = UUID.randomUUID().toString();
// create key
createKey(keyName);
Expand Down

0 comments on commit 1bd721b

Please sign in to comment.