diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c6723daff8..7d0f911ed3e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ env: FAIL_FAST: ${{ github.event_name == 'pull_request' }} # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image - MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml --show-version + MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 HADOOP_IMAGE: ghcr.io/apache/hadoop OZONE_IMAGE: ghcr.io/apache/ozone @@ -146,7 +146,7 @@ jobs: - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Store binaries for tests uses: actions/upload-artifact@v4 with: @@ -226,7 +226,7 @@ jobs: run: hadoop-ozone/dev-support/checks/build.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} basic: needs: - build-info @@ -274,7 +274,7 @@ jobs: - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ matrix.check }}.sh ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ failure() }} @@ -321,7 +321,7 @@ jobs: - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ failure() }} @@ -641,7 +641,7 @@ jobs: hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} ${args} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: | if [[ -s "target/${{ github.job }}/summary.md" ]]; then @@ -701,7 +701,7 @@ jobs: env: SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Archive build results uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index cb765f36217..4154d1a9ac3 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -203,7 +203,7 @@ jobs: fi continue-on-error: true env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/unit/summary.txt if: ${{ !cancelled() }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index 1c6fc3797ed..c36a841817e 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -110,7 +110,7 @@ jobs: - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Store binaries for tests uses: actions/upload-artifact@v4 with: diff --git a/.mvn/develocity.xml b/.mvn/develocity.xml index 30295e9d204..5aa72e6acbb 100644 --- a/.mvn/develocity.xml +++ b/.mvn/develocity.xml @@ -22,8 +22,9 @@ + ozone - https://ge.apache.org + https://develocity.apache.org false diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 549a1cddcd3..8ceede33b9c 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,7 +24,7 @@ com.gradle develocity-maven-extension - 1.23 + 1.22.2 com.gradle diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index 0a961087040..84696c60945 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,14 +22,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-annotation-processing 2.0.0-SNAPSHOT - Apache Ozone annotation processing tools for validating custom - annotations at compile time. - - Apache Ozone Annotation Processing jar + Apache Ozone Annotation Processing + Apache Ozone annotation processing tools for validating custom + annotations at compile time. - true + + true diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index 333b960fc24..d1ce5d53019 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,14 +22,31 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Client Library - Apache Ozone HDDS Client jar - - - + Apache Ozone HDDS Client + Apache Ozone Distributed Data Store Client Library + + com.google.guava + guava + + + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util + + + jakarta.annotation + jakarta.annotation-api + + + org.apache.commons + commons-lang3 + org.apache.ozone hdds-common @@ -49,11 +63,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-interface-client - - - org.apache.commons - commons-lang3 - org.apache.ratis ratis-client @@ -74,31 +83,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-thirdparty-misc - org.slf4j slf4j-api - - com.google.guava - guava - - - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - - jakarta.annotation - jakarta.annotation-api - - org.apache.ozone @@ -148,7 +137,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index a4b53a80a1e..e31a2942cb9 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -485,14 +485,16 @@ public ChecksumCombineMode getChecksumCombineMode() { try { return ChecksumCombineMode.valueOf(checksumCombineMode); } catch (IllegalArgumentException iae) { - LOG.warn("Bad checksum combine mode: {}. Using default {}", - checksumCombineMode, - ChecksumCombineMode.COMPOSITE_CRC.name()); - return ChecksumCombineMode.valueOf( - ChecksumCombineMode.COMPOSITE_CRC.name()); + LOG.warn("Bad checksum combine mode: {}.", + checksumCombineMode); + return null; } } + public void setChecksumCombineMode(String checksumCombineMode) { + this.checksumCombineMode = checksumCombineMode; + } + public void setEcReconstructStripeReadPoolLimit(int poolLimit) { this.ecReconstructStripeReadPoolLimit = poolLimit; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 48c77f2c863..ac2a47ba972 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -253,7 +253,7 @@ public void write(ByteBuffer b, int off, int len) throws IOException { } while (len > 0) { allocateNewBufferIfNeeded(); - int writeLen = Math.min(len, currentBuffer.length()); + int writeLen = Math.min(len, currentBuffer.remaining()); final StreamBuffer buf = new StreamBuffer(b, off, writeLen); currentBuffer.put(buf); writeChunkIfNeeded(); @@ -265,7 +265,7 @@ public void write(ByteBuffer b, int off, int len) throws IOException { } private void writeChunkIfNeeded() throws IOException { - if (currentBuffer.length() == 0) { + if (currentBuffer.remaining() == 0) { writeChunk(currentBuffer); currentBuffer = null; } @@ -410,6 +410,10 @@ public void executePutBlock(boolean close, waitFuturesComplete(); final BlockData blockData = containerBlockData.build(); if (close) { + // HDDS-12007 changed datanodes to ignore the following PutBlock request. + // However, clients still have to send it for maintaining compatibility. + // Otherwise, new clients won't send a PutBlock. + // Then, old datanodes will fail since they expect a PutBlock. final ContainerCommandRequestProto putBlockRequest = ContainerProtocolCalls.getPutBlockRequest( xceiverClient.getPipeline(), blockData, true, tokenString); @@ -507,6 +511,22 @@ public void flush() throws IOException { } } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + try { + if (!isClosed()) { + handleFlush(false); + } + } catch (Exception e) { + + } + } + public void waitFuturesComplete() throws IOException { try { CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get(); @@ -672,6 +692,7 @@ private void writeChunkToContainer(ByteBuffer buf) out.writeAsync(buf, StandardWriteOption.SYNC) : out.writeAsync(buf)) .whenCompleteAsync((r, e) -> { + metrics.decrPendingContainerOpsMetrics(ContainerProtos.Type.WriteChunk); if (e != null || !r.isSuccess()) { if (e == null) { e = new IOException("result is not success"); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java index b213bb1f4c6..baaff09e6cf 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdds.scm.storage; +import org.apache.hadoop.fs.Syncable; + import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; @@ -26,7 +28,7 @@ * This interface is similar to {@link java.io.OutputStream} * except that this class support {@link ByteBuffer} instead of byte[]. */ -public interface ByteBufferStreamOutput extends Closeable { +public interface ByteBufferStreamOutput extends Closeable, Syncable { /** * Similar to {@link java.io.OutputStream#write(byte[])}, * except that the parameter of this method is a {@link ByteBuffer}. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java index b889aa35e26..5bf6dcee826 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java @@ -39,8 +39,8 @@ public ByteBuffer duplicate() { return buffer.duplicate(); } - public int length() { - return buffer.limit() - buffer.position(); + public int remaining() { + return buffer.remaining(); } public int position() { diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index f2576f7cf08..c1a2749fde5 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,45 +21,45 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-common 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Common - Apache Ozone HDDS Common jar - - - + Apache Ozone HDDS Common + Apache Ozone Distributed Data Store Common - org.apache.ozone - hdds-hadoop-dependency-client + com.fasterxml.jackson.core + jackson-annotations - info.picocli - picocli + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 com.github.stephenc.jcip jcip-annotations - com.google.protobuf - protobuf-java - compile + com.google.errorprone + error_prone_annotations + true com.google.guava guava compile - - - org.glassfish.jaxb - jaxb-runtime - provided - - org.apache.commons - commons-lang3 + com.google.protobuf + protobuf-java + compile commons-collections @@ -73,33 +70,45 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io - com.fasterxml.jackson.core - jackson-annotations + commons-validator + commons-validator - com.fasterxml.jackson.core - jackson-core + info.picocli + picocli + - com.fasterxml.jackson.core - jackson-databind + io.dropwizard.metrics + metrics-core - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + io.grpc + grpc-api + ${io.grpc.version} + compile + + + com.google.code.findbugs + jsr305 + + - org.apache.ozone - hdds-annotation-processing + io.jaegertracing + jaeger-client - org.apache.ozone - hdds-config + io.jaegertracing + jaeger-core - - javax.annotation - javax.annotation-api + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util jakarta.annotation @@ -111,17 +120,49 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - io.dropwizard.metrics - metrics-core + javax.annotation + javax.annotation-api + + + org.apache.commons + commons-lang3 + + + org.apache.ozone + hdds-annotation-processing + + + org.apache.ozone + hdds-config + + + org.apache.ozone + hdds-hadoop-dependency-client + + + org.apache.ozone + hdds-interface-admin + + + org.apache.ozone + hdds-interface-client + + + org.apache.ratis + ratis-client - ratis-server-api org.apache.ratis + ratis-common + + + org.apache.ratis + ratis-grpc - ratis-metrics-dropwizard3 org.apache.ratis + ratis-metrics-dropwizard3 io.dropwizard.metrics @@ -129,37 +170,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.apache.ratis - ratis-common - - ratis-netty - org.apache.ratis - - - ratis-grpc - org.apache.ratis org.apache.ratis ratis-proto + org.apache.ratis - ratis-client + ratis-server-api org.apache.ratis ratis-thirdparty-misc - - com.google.errorprone - error_prone_annotations - true - - org.bouncycastle @@ -174,34 +201,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle bcutil-jdk18on - - commons-validator - commons-validator - - - io.jaegertracing - jaeger-client - - - io.jaegertracing - jaeger-core - org.jetbrains.kotlin kotlin-stdlib - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - org.yaml - snakeyaml - org.reflections reflections @@ -211,32 +214,22 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-api - org.apache.ozone - hdds-interface-client - - - org.apache.ozone - hdds-interface-admin + org.yaml + snakeyaml + - io.grpc - grpc-api - ${io.grpc.version} - compile - - - com.google.code.findbugs - jsr305 - - + org.glassfish.jaxb + jaxb-runtime + provided + com.codahale.metrics metrics-core test - org.apache.ozone @@ -258,27 +251,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + false ${basedir}/src/main/resources hdds-version-info.properties - false + true ${basedir}/src/main/resources hdds-version-info.properties - true - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - org.apache.hadoop @@ -286,10 +272,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> version-info - generate-resources version-info + generate-resources ${basedir}/../ @@ -330,7 +316,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -347,5 +334,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + kr.motd.maven + os-maven-plugin + ${os-maven-plugin.version} + + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java index 9545869e163..7f65010e2c0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdds; +import org.apache.hadoop.ozone.Versioned; + /** * Base type for component version enums. */ -public interface ComponentVersion { +public interface ComponentVersion extends Versioned { /** * Returns the description of the version enum value. @@ -34,4 +36,9 @@ public interface ComponentVersion { * @return the version associated with the enum value. */ int toProtoValue(); + + @Override + default int version() { + return toProtoValue(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java index d4fde1b75cb..8f73787e82a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java @@ -20,6 +20,8 @@ import picocli.CommandLine; import java.util.ServiceLoader; +import java.util.SortedMap; +import java.util.TreeMap; /** * Interface for parent commands that accept subcommands to be dynamically registered. @@ -40,11 +42,13 @@ static void addSubcommands(CommandLine cli) { if (command instanceof ExtensibleParentCommand) { ExtensibleParentCommand parentCommand = (ExtensibleParentCommand) command; ServiceLoader subcommands = ServiceLoader.load(parentCommand.subcommandType()); + SortedMap sorted = new TreeMap<>(); for (Object subcommand : subcommands) { final CommandLine.Command commandAnnotation = subcommand.getClass().getAnnotation(CommandLine.Command.class); CommandLine subcommandCommandLine = new CommandLine(subcommand, cli.getFactory()); - cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); + sorted.put(commandAnnotation.name(), subcommandCommandLine); } + sorted.forEach(cli::addSubcommand); } // process subcommands recursively diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 14d454431f9..3ec9048dfcf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -17,11 +17,10 @@ package org.apache.hadoop.hdds.cli; import java.io.IOException; -import java.util.HashMap; +import java.io.PrintWriter; import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.Callable; +import com.google.common.base.Strings; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -29,29 +28,34 @@ import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; import picocli.CommandLine.ExitCode; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; /** * This is a generic parent class for all the ozone related cli tools. */ -public class GenericCli implements Callable, GenericParentCommand { +@CommandLine.Command +public abstract class GenericCli implements GenericParentCommand { public static final int EXECUTION_ERROR_EXIT_CODE = -1; + private final OzoneConfiguration config = new OzoneConfiguration(); + private final CommandLine cmd; + + private UserGroupInformation user; + @Option(names = {"--verbose"}, description = "More verbose output. Show the stack trace of the errors.") private boolean verbose; @Option(names = {"-D", "--set"}) - private Map configurationOverrides = new HashMap<>(); + public void setConfigurationOverrides(Map configOverrides) { + configOverrides.forEach(config::set); + } @Option(names = {"-conf"}) - private String configurationPath; - - private final CommandLine cmd; - private OzoneConfiguration conf; - private UserGroupInformation user; + public void setConfigurationPath(String configPath) { + config.addResource(new Path(configPath)); + } public GenericCli() { this(CommandLine.defaultFactory()); @@ -67,15 +71,6 @@ public GenericCli(CommandLine.IFactory factory) { ExtensibleParentCommand.addSubcommands(cmd); } - /** - * Handle the error when subcommand is required but not set. - */ - public static void missingSubcommand(CommandSpec spec) { - System.err.println("Incomplete command"); - spec.commandLine().usage(System.err); - System.exit(EXECUTION_ERROR_EXIT_CODE); - } - public void run(String[] argv) { int exitCode = execute(argv); @@ -92,38 +87,16 @@ public int execute(String[] argv) { protected void printError(Throwable error) { //message could be null in case of NPE. This is unexpected so we can //print out the stack trace. - if (verbose || error.getMessage() == null - || error.getMessage().length() == 0) { - error.printStackTrace(System.err); + if (verbose || Strings.isNullOrEmpty(error.getMessage())) { + error.printStackTrace(cmd.getErr()); } else { - System.err.println(error.getMessage().split("\n")[0]); + cmd.getErr().println(error.getMessage().split("\n")[0]); } } @Override - public Void call() throws Exception { - throw new MissingSubcommandException(cmd); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - if (configurationPath != null) { - ozoneConf.addResource(new Path(configurationPath)); - } - if (configurationOverrides != null) { - for (Entry entry : configurationOverrides.entrySet()) { - ozoneConf.set(entry.getKey(), entry.getValue()); - } - } - return ozoneConf; - } - public OzoneConfiguration getOzoneConf() { - if (conf == null) { - conf = createOzoneConfiguration(); - } - return conf; + return config; } public UserGroupInformation getUser() throws IOException { @@ -134,7 +107,7 @@ public UserGroupInformation getUser() throws IOException { } @VisibleForTesting - public picocli.CommandLine getCmd() { + public CommandLine getCmd() { return cmd; } @@ -142,4 +115,12 @@ public picocli.CommandLine getCmd() { public boolean isVerbose() { return verbose; } + + protected PrintWriter out() { + return cmd.getOut(); + } + + protected PrintWriter err() { + return cmd.getErr(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java index 6abad3e32b8..e4dcd8d4ab5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java @@ -25,5 +25,6 @@ public interface GenericParentCommand { boolean isVerbose(); - OzoneConfiguration createOzoneConfiguration(); + /** Returns a cached configuration, i.e. it is created only once, subsequent calls return the same instance. */ + OzoneConfiguration getOzoneConf(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c4b42acec43..85c82af942f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -153,6 +153,9 @@ public final class ScmConfigKeys { "ozone.chunk.read.mapped.buffer.max.count"; // this max_count could not be greater than Linux platform max_map_count which by default is 65530. public static final int OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT = 0; + public static final String OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY = + "ozone.chunk.read.netty.ChunkedNioFile"; + public static final boolean OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_DEFAULT = false; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index d3f39c023b7..31615cf509b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -304,7 +304,7 @@ public static ContainerCommandResponseProto getReadContainerResponse( } public static ContainerCommandResponseProto getReadChunkResponse( - ContainerCommandRequestProto request, ChunkBuffer data, + ContainerCommandRequestProto request, ChunkBufferToByteString data, Function byteBufferToByteString) { boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java index cc6695dc7d6..08e29356343 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.ComponentVersion; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import static java.util.function.Function.identity; @@ -75,8 +76,8 @@ public static ClientVersion fromProtoValue(int value) { } private static ClientVersion latest() { - ClientVersion[] versions = ClientVersion.values(); - return versions[versions.length - 2]; + return Arrays.stream(ClientVersion.values()) + .max(Comparator.comparingInt(ComponentVersion::toProtoValue)).orElse(null); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 2d0b2bb56fd..d46cdeaf1fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -50,6 +50,9 @@ public enum OzoneManagerVersion implements ComponentVersion { S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), + S3_PART_AWARE_GET(10, "OzoneManager version that supports S3 get for a specific multipart " + + "upload part number"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java similarity index 76% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java index d616d08e328..7f89b403b34 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSOAndOMRatis.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java @@ -15,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs.ozone; +package org.apache.hadoop.ozone; -import org.junit.jupiter.api.TestInstance; -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestO3FSWithFSOAndOMRatis extends AbstractOzoneFileSystemTestWithFSO { - TestO3FSWithFSOAndOMRatis() { - super(true); - } +/** + * Base class defining the version in the entire system. + */ +public interface Versioned { + int version(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java index 0f6482919a3..fffcf9c09e2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java @@ -46,7 +46,7 @@ public class ChecksumCache { private static final int BLOCK_CHUNK_SIZE = 4 * 1024 * 1024; // 4 MB public ChecksumCache(int bytesPerChecksum) { - LOG.info("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); + LOG.debug("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); this.prevChunkLength = 0; this.bytesPerChecksum = bytesPerChecksum; // Set initialCapacity to avoid costly resizes diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index a24d39e5dac..56541c57eff 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -22,17 +22,13 @@ import java.nio.channels.GatheringByteChannel; import java.util.List; import java.util.Objects; -import java.util.function.Function; -import java.util.function.Supplier; - -import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.util.UncheckedAutoCloseable; /** Buffer for a block chunk. */ -public interface ChunkBuffer extends UncheckedAutoCloseable { +public interface ChunkBuffer extends ChunkBufferToByteString, UncheckedAutoCloseable { /** Similar to {@link ByteBuffer#allocate(int)}. */ static ChunkBuffer allocate(int capacity) { @@ -142,53 +138,4 @@ default ChunkBuffer put(ByteString b) { * @return The number of bytes written, possibly zero */ long writeTo(GatheringByteChannel channel) throws IOException; - - /** - * Convert this buffer to a {@link ByteString}. - * The position and limit of this {@link ChunkBuffer} remains unchanged. - * The given function must preserve the position and limit - * of the input {@link ByteBuffer}. - */ - default ByteString toByteString(Function function) { - return toByteStringImpl(b -> applyAndAssertFunction(b, function, this)); - } - - /** - * Convert this buffer(s) to a list of {@link ByteString}. - * The position and limit of this {@link ChunkBuffer} remains unchanged. - * The given function must preserve the position and limit - * of the input {@link ByteBuffer}. - */ - default List toByteStringList( - Function function) { - return toByteStringListImpl(b -> applyAndAssertFunction(b, function, this)); - } - - // for testing - default ByteString toByteString() { - return toByteString(ByteStringConversion::safeWrap); - } - - ByteString toByteStringImpl(Function function); - - List toByteStringListImpl( - Function function); - - static void assertInt(int expected, int computed, Supplier prefix) { - if (expected != computed) { - throw new IllegalStateException(prefix.get() - + ": expected = " + expected + " but computed = " + computed); - } - } - - /** Apply the function and assert if it preserves position and limit. */ - static ByteString applyAndAssertFunction(ByteBuffer buffer, - Function function, Object name) { - final int pos = buffer.position(); - final int lim = buffer.limit(); - final ByteString bytes = function.apply(buffer); - assertInt(pos, buffer.position(), () -> name + ": Unexpected position"); - assertInt(lim, buffer.limit(), () -> name + ": Unexpected limit"); - return bytes; - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java new file mode 100644 index 00000000000..384b661f6e0 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +/** For converting to {@link ByteString}s. */ +public interface ChunkBufferToByteString { + /** + * Wrap the given list of {@link ByteBuf}s + * as a {@link ChunkBufferToByteString}. + */ + static ChunkBufferToByteString wrap(List buffers) { + return new ChunkBufferToByteStringByByteBufs(buffers); + } + + /** Release the underlying resource. */ + default void release() { + } + + /** + * Convert this buffer to a {@link ByteString}. + * The position and limit of this {@link ChunkBufferToByteString} + * remains unchanged. + * The given function must preserve the position and limit + * of the input {@link ByteBuffer}. + */ + default ByteString toByteString(Function function) { + return toByteStringImpl(b -> applyAndAssertFunction(b, function, this)); + } + + /** + * Convert this buffer(s) to a list of {@link ByteString}. + * The position and limit of this {@link ChunkBufferToByteString} + * remains unchanged. + * The given function must preserve the position and limit + * of the input {@link ByteBuffer}. + */ + default List toByteStringList( + Function function) { + return toByteStringListImpl(b -> applyAndAssertFunction(b, function, this)); + } + + // for testing + default ByteString toByteString() { + return toByteString(ByteStringConversion::safeWrap); + } + + ByteString toByteStringImpl(Function function); + + List toByteStringListImpl( + Function function); + + static void assertInt(int expected, int computed, Supplier prefix) { + if (expected != computed) { + throw new IllegalStateException(prefix.get() + + ": expected = " + expected + " but computed = " + computed); + } + } + + /** Apply the function and assert if it preserves position and limit. */ + static ByteString applyAndAssertFunction(ByteBuffer buffer, + Function function, Object name) { + final int pos = buffer.position(); + final int lim = buffer.limit(); + final ByteString bytes = function.apply(buffer); + assertInt(pos, buffer.position(), () -> name + ": Unexpected position"); + assertInt(lim, buffer.limit(), () -> name + ": Unexpected limit"); + return bytes; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java new file mode 100644 index 00000000000..167191f5a44 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +/** + * A {@link ChunkBufferToByteString} implementation + * using a list of {@link ByteBuf}s. + */ +class ChunkBufferToByteStringByByteBufs implements ChunkBufferToByteString { + private final List buffers; + + private volatile List byteStrings; + private volatile ByteString concatenated; + + ChunkBufferToByteStringByByteBufs(List buffers) { + this.buffers = buffers == null || buffers.isEmpty() ? + Collections.emptyList() : Collections.unmodifiableList(buffers); + } + + @Override + public void release() { + for (ByteBuf buf : buffers) { + buf.release(); + } + } + + @Override + public ByteString toByteStringImpl(Function f) { + if (concatenated != null) { + return concatenated; + } + initByteStrings(f); + return Objects.requireNonNull(concatenated, "concatenated == null"); + } + + @Override + public List toByteStringListImpl(Function f) { + if (byteStrings != null) { + return byteStrings; + } + return initByteStrings(f); + } + + private synchronized List initByteStrings(Function f) { + if (byteStrings != null) { + return byteStrings; + } + if (buffers.isEmpty()) { + byteStrings = Collections.emptyList(); + concatenated = ByteString.EMPTY; + return byteStrings; + } + + final List array = new ArrayList<>(); + concatenated = convert(buffers, array, f); + byteStrings = Collections.unmodifiableList(array); + return byteStrings; + } + + static ByteString convert(List bufs, List byteStrings, Function f) { + ByteString concatenated = ByteString.EMPTY; + for (ByteBuf buf : bufs) { + for (ByteBuffer b : buf.nioBuffers()) { + final ByteString s = f.apply(b); + byteStrings.add(s); + concatenated = concatenated.concat(s); + } + } + return concatenated; + } + + @Override + public String toString() { + return getClass().getSimpleName() + ":n=" + buffers.size(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 732af4b6850..500acf74c98 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -82,7 +82,7 @@ private int getBufferCapacityAtIndex(int i) { } private void assertInt(int expected, int computed, String name, int i) { - ChunkBuffer.assertInt(expected, computed, + ChunkBufferToByteString.assertInt(expected, computed, () -> this + ": Unexpected " + name + " at index " + i); } @@ -182,7 +182,7 @@ private boolean assertRemainingList(ByteBuffer ith, int i) { } } final int j = i; - ChunkBuffer.assertInt(buffers.size(), i, + ChunkBufferToByteString.assertInt(buffers.size(), i, () -> "i = " + j + " != buffers.size() = " + buffers.size()); return true; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java index 92dd706f4bb..9ec9b4cb589 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.upgrade; +import org.apache.hadoop.ozone.Versioned; + import java.util.Optional; /** * Generic Layout feature interface for Ozone. */ -public interface LayoutFeature { +public interface LayoutFeature extends Versioned { String name(); int layoutVersion(); @@ -48,6 +50,11 @@ default String name() { void execute(T arg) throws Exception; } + @Override + default int version() { + return this.layoutVersion(); + } + /** * Phase of execution for this action. */ diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index fdeb5c1c043..dfd058f5d70 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1892,6 +1892,48 @@ interfaces by setting it to 0.0.0.0. + + ozone.s3g.webadmin.http.enabled + true + OZONE, S3GATEWAY + This option can be used to disable the web server which serves additional content in Ozone S3 Gateway. + + + + ozone.s3g.webadmin.https-address + + OZONE, S3GATEWAY + Ozone S3Gateway content server's HTTPS address and port. + + + + ozone.s3g.webadmin.https-bind-host + + OZONE, S3GATEWAY + The actual address the HTTPS server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.https-address. + This is useful for making the Ozone S3Gateway HTTPS server listen on all + interfaces by setting it to 0.0.0.0. + + + + ozone.s3g.webadmin.http-address + 0.0.0.0:19878 + OZONE, S3GATEWAY + The address and port where Ozone S3Gateway serves + web content. + + + + ozone.s3g.webadmin.http-bind-host + 0.0.0.0 + OZONE, S3GATEWAY + The actual address the HTTP server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.http-address. + This is useful for making the Ozone S3Gateway HTTP server listen on all + interfaces by setting it to 0.0.0.0. + + ozone.s3g.http.auth.kerberos.principal HTTP/_HOST@REALM @@ -2045,15 +2087,6 @@ - - ozone.om.ratis.enable - true - OZONE, OM, RATIS, MANAGEMENT - Property to enable or disable Ratis server on OM. - Please note - this is a temporary property to disable OM Ratis server. - - - ozone.om.ratis.port 9872 @@ -3323,7 +3356,7 @@ ozone.recon.om.snapshot.task.interval.delay - 10m + 5s OZONE, RECON, OM Interval in MINUTES by Recon to request OM DB Snapshot. @@ -3339,7 +3372,7 @@ recon.om.delta.update.limit - 2000 + 50000 OZONE, RECON Recon each time get a limited delta updates from OM. @@ -3360,7 +3393,7 @@ recon.om.delta.update.loop.limit - 10 + 50 OZONE, RECON The sync between Recon and OM consists of several small diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 60c63475ae3..5809828eccf 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,12 +21,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-config 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Config Tools - Apache Ozone HDDS Config jar - - - + Apache Ozone HDDS Config + Apache Ozone Distributed Data Store Config Tools diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index c21ca8203b5..1ee4017fedf 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,86 +21,79 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-container-service 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Container Service - Apache Ozone HDDS Container Service jar - - + Apache Ozone HDDS Container Service + Apache Ozone Distributed Data Store Container Service + - org.apache.ozone - hdds-common + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-config + com.fasterxml.jackson.core + jackson-databind - org.apache.ozone - hdds-interface-client + com.github.luben + zstd-jni - org.apache.ozone - hdds-interface-server + com.google.guava + guava - org.apache.ozone - hdds-managed-rocksdb + com.google.protobuf + protobuf-java - - org.apache.commons - commons-compress + commons-codec + commons-codec - org.apache.logging.log4j - log4j-api + commons-collections + commons-collections commons-io commons-io - com.github.luben - zstd-jni + info.picocli + picocli - org.apache.ozone - hdds-server-framework + io.dropwizard.metrics + metrics-core - org.apache.ozone - hdds-client + io.netty + netty-buffer - commons-codec - commons-codec + io.netty + netty-codec - commons-collections - commons-collections + io.netty + netty-common - io.dropwizard.metrics - metrics-core + io.netty + netty-handler - - org.yaml - snakeyaml + io.netty + netty-transport - - org.apache.ozone - hdds-docs - provided + io.opentracing + opentracing-api - - org.apache.ratis - ratis-server + io.opentracing + opentracing-util - jakarta.annotation jakarta.annotation-api @@ -113,48 +103,48 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jakarta.xml.bind-api - org.glassfish.jaxb - jaxb-runtime + org.apache.commons + commons-compress - info.picocli - picocli + org.apache.commons + commons-lang3 - io.netty - netty-transport + org.apache.commons + commons-text - io.netty - netty-buffer + org.apache.logging.log4j + log4j-api - io.netty - netty-common + org.apache.ozone + hdds-client - io.netty - netty-codec + org.apache.ozone + hdds-common - io.netty - netty-handler + org.apache.ozone + hdds-config - io.opentracing - opentracing-api + org.apache.ozone + hdds-interface-client - io.opentracing - opentracing-util + org.apache.ozone + hdds-interface-server - org.apache.commons - commons-lang3 + org.apache.ozone + hdds-managed-rocksdb - org.apache.commons - commons-text + org.apache.ozone + hdds-server-framework org.apache.ratis @@ -176,6 +166,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-proto + + org.apache.ratis + ratis-server + org.apache.ratis ratis-server-api @@ -184,7 +178,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-thirdparty-misc - + + org.glassfish.jaxb + jaxb-runtime + org.rocksdb rocksdbjni @@ -193,22 +190,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j slf4j-api - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava + org.yaml + snakeyaml - com.google.protobuf - protobuf-java + org.apache.ozone + hdds-docs + provided @@ -260,7 +249,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -282,17 +272,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index de21e37503a..a6980e232b1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -97,11 +98,14 @@ hidden = true, description = "Start the datanode for ozone", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class HddsDatanodeService extends GenericCli implements ServicePlugin { +public class HddsDatanodeService extends GenericCli implements Callable, ServicePlugin { private static final Logger LOG = LoggerFactory.getLogger( HddsDatanodeService.class); + public static final String TESTING_DATANODE_VERSION_INITIAL = "testing.hdds.datanode.version.initial"; + public static final String TESTING_DATANODE_VERSION_CURRENT = "testing.hdds.datanode.version.current"; + private OzoneConfiguration conf; private SecurityConfig secConf; private DatanodeDetails datanodeDetails; @@ -169,7 +173,7 @@ public static Logger getLogger() { @Override public Void call() throws Exception { - OzoneConfiguration configuration = createOzoneConfiguration(); + OzoneConfiguration configuration = getOzoneConf(); if (printBanner) { HddsServerUtil.startupShutdownMessage(HddsVersionInfo.HDDS_VERSION_INFO, HddsDatanodeService.class, args, LOG, configuration); @@ -432,15 +436,14 @@ private DatanodeDetails initializeDatanodeDetails() DatanodeDetails details; if (idFile.exists()) { details = ContainerUtils.readDatanodeDetailsFrom(idFile); - // Current version is always overridden to the latest - details.setCurrentVersion(getDefaultCurrentVersion()); } else { // There is no datanode.id file, this might be the first time datanode // is started. details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build(); - details.setInitialVersion(getDefaultInitialVersion()); - details.setCurrentVersion(getDefaultCurrentVersion()); + details.setInitialVersion(getInitialVersion()); } + // Current version is always overridden to the latest + details.setCurrentVersion(getCurrentVersion()); return details; } @@ -680,16 +683,14 @@ private String reconfigReplicationStreamsLimit(String value) { /** * Returns the initial version of the datanode. */ - @VisibleForTesting - public static int getDefaultInitialVersion() { - return DatanodeVersion.CURRENT_VERSION; + private int getInitialVersion() { + return conf.getInt(TESTING_DATANODE_VERSION_INITIAL, DatanodeVersion.CURRENT_VERSION); } /** * Returns the current version of the datanode. */ - @VisibleForTesting - public static int getDefaultCurrentVersion() { - return DatanodeVersion.CURRENT_VERSION; + private int getCurrentVersion() { + return conf.getInt(TESTING_DATANODE_VERSION_CURRENT, DatanodeVersion.CURRENT_VERSION); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 210c538f274..99f56baa799 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -34,6 +34,7 @@ */ public enum ContainerLayoutVersion { + @Deprecated /* Use FILE_PER_BLOCK instead */ FILE_PER_CHUNK(1, "One file per chunk") { @Override public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { @@ -47,7 +48,7 @@ public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { } }; - private static final ContainerLayoutVersion + public static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; private static final List CONTAINER_LAYOUT_VERSIONS = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java index 5f1914402d0..20cbdf8f02f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java @@ -19,8 +19,10 @@ package org.apache.hadoop.ozone.container.common.transport.server; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; @@ -97,9 +99,13 @@ public StreamObserver send( @Override public void onNext(ContainerCommandRequestProto request) { + final DispatcherContext context = request.getCmdType() != Type.ReadChunk ? null + : DispatcherContext.newBuilder(DispatcherContext.Op.HANDLE_READ_CHUNK) + .setReleaseSupported(true) + .build(); + try { - ContainerCommandResponseProto resp = - dispatcher.dispatch(request, null); + final ContainerCommandResponseProto resp = dispatcher.dispatch(request, context); responseObserver.onNext(resp); } catch (Throwable e) { LOG.error("Got exception when processing" @@ -108,6 +114,9 @@ public void onNext(ContainerCommandRequestProto request) { responseObserver.onError(e); } finally { zeroCopyMessageMarshaller.release(request); + if (context != null) { + context.release(); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 23be4138b60..a0325311621 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -524,21 +524,6 @@ private ContainerCommandResponseProto dispatchCommand( return response; } - private CompletableFuture link( - ContainerCommandRequestProto requestProto, LogEntryProto entry) { - return CompletableFuture.supplyAsync(() -> { - final DispatcherContext context = DispatcherContext - .newBuilder(DispatcherContext.Op.STREAM_LINK) - .setTerm(entry.getTerm()) - .setLogIndex(entry.getIndex()) - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .setContainer2BCSIDMap(container2BCSIDMap) - .build(); - - return dispatchCommand(requestProto, context); - }, executor); - } - private CompletableFuture writeStateMachineData( ContainerCommandRequestProto requestProto, long entryIndex, long term, long startTime) { @@ -689,29 +674,8 @@ public CompletableFuture link(DataStream stream, LogEntryProto entry) { final KeyValueStreamDataChannel kvStreamDataChannel = (KeyValueStreamDataChannel) dataChannel; - - final ContainerCommandRequestProto request = - kvStreamDataChannel.getPutBlockRequest(); - - return link(request, entry).whenComplete((response, e) -> { - if (e != null) { - LOG.warn("Failed to link logEntry {} for request {}", - TermIndex.valueOf(entry), request, e); - } - if (response != null) { - final ContainerProtos.Result result = response.getResult(); - if (LOG.isDebugEnabled()) { - LOG.debug("{} to link logEntry {} for request {}, response: {}", - result, TermIndex.valueOf(entry), request, response); - } - if (result == ContainerProtos.Result.SUCCESS) { - kvStreamDataChannel.setLinked(); - return; - } - } - // failed to link, cleanup - kvStreamDataChannel.cleanUp(); - }); + kvStreamDataChannel.setLinked(); + return CompletableFuture.completedFuture(null); } private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java index 15af2645352..45bfbbc12a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.util.Time; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.util.Preconditions; import java.util.Map; import java.util.Objects; @@ -119,7 +120,10 @@ public static Op op(DispatcherContext context) { private final Map container2BCSIDMap; - private long startTime; + private final boolean releaseSupported; + private volatile Runnable releaseMethod; + + private final long startTime = Time.monotonicNowNanos(); private DispatcherContext(Builder b) { this.op = Objects.requireNonNull(b.op, "op == null"); @@ -127,7 +131,7 @@ private DispatcherContext(Builder b) { this.logIndex = b.logIndex; this.stage = b.stage; this.container2BCSIDMap = b.container2BCSIDMap; - this.startTime = Time.monotonicNowNanos(); + this.releaseSupported = b.releaseSupported; } /** Use {@link DispatcherContext#op(DispatcherContext)} for handling null. */ @@ -155,6 +159,21 @@ public long getStartTime() { return startTime; } + public boolean isReleaseSupported() { + return releaseSupported; + } + + public void setReleaseMethod(Runnable releaseMethod) { + Preconditions.assertTrue(releaseSupported, "Unsupported release method"); + this.releaseMethod = releaseMethod; + } + + public void release() { + if (releaseMethod != null) { + releaseMethod.run(); + } + } + @Override public String toString() { return op + "-" + stage + TermIndex.valueOf(term, logIndex); @@ -173,6 +192,7 @@ public static final class Builder { private long term; private long logIndex; private Map container2BCSIDMap; + private boolean releaseSupported; private Builder(Op op) { this.op = op; @@ -221,6 +241,12 @@ public Builder setContainer2BCSIDMap(Map map) { this.container2BCSIDMap = map; return this; } + + public Builder setReleaseSupported(boolean releaseSupported) { + this.releaseSupported = releaseSupported; + return this; + } + /** * Builds and returns DispatcherContext instance. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 06a4543bd79..0ef8d5e68a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -59,6 +60,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -123,6 +125,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.DEFAULT_LAYOUT; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; import org.apache.hadoop.util.Time; @@ -190,6 +193,14 @@ public KeyValueHandler(ConfigurationSource config, byteBufferToByteString = ByteStringConversion .createByteBufferConversion(isUnsafeByteBufferConversionEnabled); + + if (ContainerLayoutVersion.getConfiguredVersion(conf) == + ContainerLayoutVersion.FILE_PER_CHUNK) { + LOG.warn("FILE_PER_CHUNK layout is not supported. Falling back to default : {}.", + DEFAULT_LAYOUT.name()); + OzoneConfiguration.of(conf).set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, + DEFAULT_LAYOUT.name()); + } } @VisibleForTesting @@ -799,7 +810,8 @@ ContainerCommandResponseProto handleReadChunk( } return malformedRequest(request); } - ChunkBuffer data; + + final ChunkBufferToByteString data; try { BlockID blockID = BlockID.getFromProtobuf( request.getReadChunk().getBlockID()); @@ -861,11 +873,16 @@ ContainerCommandResponseProto handleDeleteChunk( "using BlockDeletingService"); } - private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) + private void validateChunkChecksumData(ChunkBufferToByteString data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); + if (data instanceof ChunkBuffer) { + final ChunkBuffer b = (ChunkBuffer)data; + Checksum.verifyChecksum(b.duplicate(b.position(), b.limit()), info.getChecksumData(), 0); + } else { + Checksum.verifyChecksum(data.toByteString(byteBufferToByteString), info.getChecksumData(), 0); + } } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -1058,8 +1075,8 @@ ContainerCommandResponseProto handleGetSmallFile( // of ByteStrings. chunkInfo.setReadDataIntoSingleBuffer(true); } - ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, - chunkInfo, dispatcherContext); + final ChunkBufferToByteString data = chunkManager.readChunk( + kvContainer, blockID, chunkInfo, dispatcherContext); dataBuffers.addAll(data.toByteStringList(byteBufferToByteString)); chunkInfoProto = chunk; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index dc048ac16aa..8ada6b10bcf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -49,7 +49,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.common.utils.BufferUtils; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.hadoop.util.Time; @@ -68,6 +70,9 @@ import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import org.apache.ratis.util.AutoCloseableLock; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.ratis.thirdparty.io.netty.buffer.PooledByteBufAllocator; +import org.apache.ratis.thirdparty.io.netty.handler.stream.ChunkedNioFile; import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -220,7 +225,7 @@ public static ChunkBuffer readData(long len, int bufferCapacity, } private static void readData(File file, long offset, long len, - CheckedFunction readMethod, + CheckedFunction readMethod, HddsVolume volume) throws StorageContainerException { final Path path = file.toPath(); @@ -230,7 +235,7 @@ private static void readData(File file, long offset, long len, try (AutoCloseableLock ignoredLock = getFileReadLock(path); FileChannel channel = open(path, READ_OPTIONS, NO_ATTRIBUTES)) { bytesRead = readMethod.apply(channel); - } catch (IOException e) { + } catch (Exception e) { onFailure(volume); throw wrapInStorageContainerException(e); } @@ -307,6 +312,37 @@ private static ChunkBuffer readData(File file, int chunkSize, } } + public static ChunkBufferToByteString readData(File file, long chunkSize, + long offset, long length, HddsVolume volume, DispatcherContext context) + throws StorageContainerException { + final List buffers = readDataNettyChunkedNioFile( + file, Math.toIntExact(chunkSize), offset, length, volume); + final ChunkBufferToByteString b = ChunkBufferToByteString.wrap(buffers); + context.setReleaseMethod(b::release); + return b; + } + + /** + * Read data from the given file using {@link ChunkedNioFile}. + * + * @return a list of {@link ByteBuf} containing the data. + */ + private static List readDataNettyChunkedNioFile( + File file, int chunkSize, long offset, long length, HddsVolume volume) throws StorageContainerException { + final List buffers = new ArrayList<>(Math.toIntExact((length - 1) / chunkSize) + 1); + readData(file, offset, length, channel -> { + final ChunkedNioFile f = new ChunkedNioFile(channel, offset, length, chunkSize); + long readLen = 0; + while (readLen < length) { + final ByteBuf buf = f.readChunk(PooledByteBufAllocator.DEFAULT); + readLen += buf.readableBytes(); + buffers.add(buf); + } + return readLen; + }, volume); + return buffers; + } + /** * Validates chunk data and returns a file object to Chunk File that we are * expected to write data to. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 6232b843567..a9dfcdc57a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -65,6 +65,7 @@ public class BlockManagerImpl implements BlockManager { private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; + private final boolean readNettyChunkedNioFile; /** * Constructs a Block Manager. @@ -83,6 +84,9 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferMaxCount = config.getInt( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT); + this.readNettyChunkedNioFile = config.getBoolean( + ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, + ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_DEFAULT); } @Override @@ -304,15 +308,21 @@ public int getDefaultReadBufferCapacity() { return defaultReadBufferCapacity; } + @Override public int getReadMappedBufferThreshold() { return readMappedBufferThreshold; } - /** @return the max count of memory mapped buffers for read. */ + @Override public int getReadMappedBufferMaxCount() { return readMappedBufferMaxCount; } + @Override + public boolean isReadNettyChunkedNioFile() { + return readNettyChunkedNioFile; + } + /** * Deletes an existing block. * As Deletion is handled by BlockDeletingService, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java index 6a1d5533cf2..89854169388 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -104,15 +105,15 @@ public void finalizeWriteChunk(KeyValueContainer kvContainer, } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { - ChunkBuffer data = selectHandler(container) + final ChunkBufferToByteString data = selectHandler(container) .readChunk(container, blockID, info, dispatcherContext); Preconditions.checkState(data != null); - container.getContainerData().updateReadStats(data.remaining()); + container.getContainerData().updateReadStats(info.getLen()); return data; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 4ca578d7717..26ccc5379b6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -77,6 +78,8 @@ public class FilePerBlockStrategy implements ChunkManager { private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; private final MappedBufferManager mappedBufferManager; + + private final boolean readNettyChunkedNioFile; private final VolumeSet volumeSet; public FilePerBlockStrategy(boolean sync, BlockManager manager, @@ -95,6 +98,8 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, } else { mappedBufferManager = null; } + + this.readNettyChunkedNioFile = manager != null && manager.isReadNettyChunkedNioFile(); } private static void checkLayoutVersion(Container container) { @@ -180,7 +185,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { @@ -204,6 +209,10 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, long offset = info.getOffset(); int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); + + if (readNettyChunkedNioFile && dispatcherContext != null && dispatcherContext.isReleaseSupported()) { + return ChunkUtils.readData(chunkFile, bufferCapacity, offset, len, volume, dispatcherContext); + } return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 6ac88cad7f5..344cd0a9f0c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; @@ -69,6 +70,8 @@ public class FilePerChunkStrategy implements ChunkManager { private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; private final MappedBufferManager mappedBufferManager; + + private final boolean readNettyChunkedNioFile; private final VolumeSet volumeSet; public FilePerChunkStrategy(boolean sync, BlockManager manager, @@ -88,6 +91,8 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager, } else { mappedBufferManager = null; } + + this.readNettyChunkedNioFile = manager != null && manager.isReadNettyChunkedNioFile(); } private static void checkLayoutVersion(Container container) { @@ -214,7 +219,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Explore if we need to do that for ozone. */ @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { @@ -274,6 +279,9 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, if (file.exists()) { long offset = info.getOffset() - chunkFileOffset; Preconditions.checkState(offset >= 0); + if (readNettyChunkedNioFile && dispatcherContext != null && dispatcherContext.isReleaseSupported()) { + return ChunkUtils.readData(file, bufferCapacity, offset, len, volume, dispatcherContext); + } return ChunkUtils.readData(len, bufferCapacity, file, offset, volume, readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java index 7500860229d..52838aff2e2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java @@ -20,14 +20,11 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; import org.apache.ratis.util.ReferenceCountedObject; import org.slf4j.Logger; @@ -36,9 +33,7 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; /** * This class is used to get the DataChannel for streaming. @@ -53,8 +48,6 @@ interface WriteMethod { private final Buffers buffers = new Buffers( BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX); - private final AtomicReference putBlockRequest - = new AtomicReference<>(); private final AtomicBoolean closed = new AtomicBoolean(); KeyValueStreamDataChannel(File file, ContainerData containerData, @@ -90,7 +83,7 @@ static int writeBuffers(ReferenceCountedObject src, return src.get().remaining(); } - private static void writeFully(ByteBuffer b, WriteMethod writeMethod) + static void writeFully(ByteBuffer b, WriteMethod writeMethod) throws IOException { while (b.remaining() > 0) { final int written = writeMethod.applyAsInt(b); @@ -100,11 +93,6 @@ private static void writeFully(ByteBuffer b, WriteMethod writeMethod) } } - public ContainerCommandRequestProto getPutBlockRequest() { - return Objects.requireNonNull(putBlockRequest.get(), - () -> "putBlockRequest == null, " + this); - } - void assertOpen() throws IOException { if (closed.get()) { throw new IOException("Already closed: " + this); @@ -115,7 +103,7 @@ void assertOpen() throws IOException { public void close() throws IOException { if (closed.compareAndSet(false, true)) { try { - putBlockRequest.set(closeBuffers(buffers, super::writeFileChannel)); + writeBuffers(); } finally { super.close(); } @@ -130,22 +118,23 @@ protected void cleanupInternal() throws IOException { } } - static ContainerCommandRequestProto closeBuffers( - Buffers buffers, WriteMethod writeMethod) throws IOException { + /** + * Write the data in {@link #buffers} to the channel. + * Note that the PutBlock proto at the end is ignored; see HDDS-12007. + */ + private void writeBuffers() throws IOException { final ReferenceCountedObject ref = buffers.pollAll(); final ByteBuf buf = ref.retain(); - final ContainerCommandRequestProto putBlockRequest; try { - putBlockRequest = readPutBlockRequest(buf); + setEndIndex(buf); // write the remaining data - writeFully(buf.nioBuffer(), writeMethod); + writeFully(buf.nioBuffer(), super::writeFileChannel); } finally { ref.release(); } - return putBlockRequest; } - private static int readProtoLength(ByteBuf b, int lengthIndex) { + static int readProtoLength(ByteBuf b, int lengthIndex) { final int readerIndex = b.readerIndex(); LOG.debug("{}, lengthIndex = {}, readerIndex = {}", b, lengthIndex, readerIndex); @@ -158,8 +147,8 @@ private static int readProtoLength(ByteBuf b, int lengthIndex) { return b.nioBuffer().getInt(); } - static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) - throws IOException { + /** Set end index to the proto index in order to ignore the proto. */ + static void setEndIndex(ByteBuf b) { // readerIndex protoIndex lengthIndex readerIndex+readableBytes // V V V V // format: |--- data ---|--- proto ---|--- proto length (4 bytes) ---| @@ -168,37 +157,7 @@ static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) final int protoLength = readProtoLength(b.duplicate(), lengthIndex); final int protoIndex = lengthIndex - protoLength; - final ContainerCommandRequestProto proto; - try { - proto = readPutBlockRequest(b.slice(protoIndex, protoLength).nioBuffer()); - } catch (Throwable t) { - RatisHelper.debug(b, "catch", LOG); - throw new IOException("Failed to readPutBlockRequest from " + b - + ": readerIndex=" + readerIndex - + ", protoIndex=" + protoIndex - + ", protoLength=" + protoLength - + ", lengthIndex=" + lengthIndex, t); - } - // set index for reading data b.writerIndex(protoIndex); - - return proto; - } - - private static ContainerCommandRequestProto readPutBlockRequest(ByteBuffer b) - throws IOException { - RatisHelper.debug(b, "readPutBlockRequest", LOG); - final ByteString byteString = ByteString.copyFrom(b); - - final ContainerCommandRequestProto request = - ContainerCommandRequestMessage.toProto(byteString, null); - - if (!request.hasPutBlock()) { - throw new StorageContainerException( - "Malformed PutBlock request. trace ID: " + request.getTraceID(), - ContainerProtos.Result.MALFORMED_REQUEST); - } - return request; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 256d357a31d..53f5f154cce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -102,6 +102,9 @@ void finalizeBlock(Container container, BlockID blockId) /** @return the max count of memory mapped buffers to read. */ int getReadMappedBufferMaxCount(); + /** @return true iff Netty ChunkedNioFile read is enabled. */ + boolean isReadNettyChunkedNioFile(); + /** * Shutdown ContainerManager. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java index 7751dba429d..6e5d064b7da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -75,7 +76,7 @@ default void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, + ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException; /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 3ff8f9e625d..073cbfa6edd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -25,6 +25,7 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -48,6 +49,7 @@ import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -668,9 +670,9 @@ public void testWritReadManyChunks(ContainerTestVersionInfo versionInfo) // Read chunk via ReadChunk call. for (int x = 0; x < chunkCount; x++) { ChunkInfo info = chunks.get(x); - final ChunkBuffer data = chunkManager.readChunk(container, blockID, info, + final ChunkBufferToByteString data = chunkManager.readChunk(container, blockID, info, DispatcherContext.getHandleReadChunk()); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(Collections.singletonList(data.toByteString())); assertEquals(info.getChecksumData(), checksumData); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 655ecbb48b4..d02910358de 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -292,6 +293,13 @@ public void testVolumeSetInKeyValueHandler() throws Exception { keyValueHandler.getVolumeChoosingPolicyForTesting() .getClass().getName()); + // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK. + conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK"); + new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, + metrics, c -> { }); + assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, + conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); + //Set a class which is not of sub class of VolumeChoosingPolicy conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java index 714426108bc..b249aa4fcfb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue.impl; -import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.Test; @@ -50,7 +50,7 @@ public void dummyManagerDoesNotWriteToFile() throws Exception { public void dummyManagerReadsAnyChunk() throws Exception { ChunkManager dummy = createTestSubject(); - ChunkBuffer dataRead = dummy.readChunk(getKeyValueContainer(), + final ChunkBufferToByteString dataRead = dummy.readChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(), null); assertNotNull(dataRead); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java index 36d71655192..d2fd394271c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; @@ -92,7 +93,7 @@ public void testMultipleWriteSingleRead() throws Exception { // Request to read the whole data in a single go. ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount); - ChunkBuffer chunk = + final ChunkBufferToByteString chunk = subject.readChunk(container, blockID, largeChunk, null); ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer(); @@ -119,18 +120,16 @@ public void testPartialRead() throws Exception { ChunkManager subject = createTestSubject(); subject.writeChunk(container, blockID, info, data, WRITE_STAGE); - ChunkBuffer readData = subject.readChunk(container, blockID, info, null); + final ChunkBufferToByteString readData = subject.readChunk(container, blockID, info, null); // data will be ChunkBufferImplWithByteBuffer and readData will return // ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings // before comparing. - assertEquals(data.rewind().toByteString(), - readData.rewind().toByteString()); + assertEquals(data.rewind().toByteString(), readData.toByteString()); ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length); - ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, null); + final ChunkBufferToByteString readData2 = subject.readChunk(container, blockID, info2, null); assertEquals(length, info2.getLen()); - assertEquals(data.rewind().toByteString().substring(start, start + length), - readData2.rewind().toByteString()); + assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.toByteString()); } @Override diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java index e6067e5c560..99793a0201f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java @@ -22,10 +22,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; +import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.WriteMethod; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.client.api.DataStreamOutput; import org.apache.ratis.io.FilePositionCount; import org.apache.ratis.io.StandardWriteOption; @@ -58,9 +62,8 @@ import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX; import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.executePutBlockClose; import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.getProtoLength; -import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.closeBuffers; -import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.readPutBlockRequest; import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.writeBuffers; +import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.writeFully; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -106,6 +109,49 @@ public void testSerialization() throws Exception { assertEquals(PUT_BLOCK_PROTO, proto); } + static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) throws IOException { + // readerIndex protoIndex lengthIndex readerIndex+readableBytes + // V V V V + // format: |--- data ---|--- proto ---|--- proto length (4 bytes) ---| + final int readerIndex = b.readerIndex(); + final int lengthIndex = readerIndex + b.readableBytes() - 4; + final int protoLength = KeyValueStreamDataChannel.readProtoLength(b.duplicate(), lengthIndex); + final int protoIndex = lengthIndex - protoLength; + + final ContainerCommandRequestProto proto; + try { + proto = readPutBlockRequest(b.slice(protoIndex, protoLength).nioBuffer()); + } catch (Throwable t) { + RatisHelper.debug(b, "catch", LOG); + throw new IOException("Failed to readPutBlockRequest from " + b + + ": readerIndex=" + readerIndex + + ", protoIndex=" + protoIndex + + ", protoLength=" + protoLength + + ", lengthIndex=" + lengthIndex, t); + } + + // set index for reading data + b.writerIndex(protoIndex); + + return proto; + } + + private static ContainerCommandRequestProto readPutBlockRequest(ByteBuffer b) + throws IOException { + RatisHelper.debug(b, "readPutBlockRequest", LOG); + final ByteString byteString = ByteString.copyFrom(b); + + final ContainerCommandRequestProto request = + ContainerCommandRequestMessage.toProto(byteString, null); + + if (!request.hasPutBlock()) { + throw new StorageContainerException( + "Malformed PutBlock request. trace ID: " + request.getTraceID(), + Result.MALFORMED_REQUEST); + } + return request; + } + @Test public void testBuffers() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(32); @@ -230,6 +276,21 @@ public CompletableFuture closeAsync() { new Reply(true, 0, putBlockRequest)); } + static ContainerCommandRequestProto closeBuffers( + Buffers buffers, WriteMethod writeMethod) throws IOException { + final ReferenceCountedObject ref = buffers.pollAll(); + final ByteBuf buf = ref.retain(); + final ContainerCommandRequestProto putBlockRequest; + try { + putBlockRequest = readPutBlockRequest(buf); + // write the remaining data + writeFully(buf.nioBuffer(), writeMethod); + } finally { + ref.release(); + } + return putBlockRequest; + } + @Override public CompletableFuture writeAsync( FilePositionCount filePositionCount, WriteOption... writeOptions) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java deleted file mode 100644 index d4a27e74cda..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ /dev/null @@ -1,604 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.upgrade; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.container.common.ScmTestMock; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.replication.ContainerImporter; -import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.ozone.test.LambdaTestUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.io.FileOutputStream; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Tests upgrading a single datanode from pre-SCM HA volume format that used - * SCM ID to the post-SCM HA volume format using cluster ID. If SCM HA was - * already being used before the upgrade, there should be no changes. - */ -public class TestDatanodeUpgradeToScmHA { - @TempDir - private Path tempFolder; - - private DatanodeStateMachine dsm; - private ContainerDispatcher dispatcher; - private OzoneConfiguration conf; - private static final String CLUSTER_ID = "clusterID"; - private boolean scmHAAlreadyEnabled; - - private RPC.Server scmRpcServer; - private InetSocketAddress address; - private ScmTestMock scmServerImpl; - - private void setScmHAEnabled(boolean enableSCMHA) - throws Exception { - this.scmHAAlreadyEnabled = enableSCMHA; - conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, scmHAAlreadyEnabled); - setup(); - } - - private void setup() throws Exception { - address = SCMTestUtils.getReuseableAddress(); - conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address); - } - - @AfterEach - public void teardown() throws Exception { - if (scmRpcServer != null) { - scmRpcServer.stop(); - } - - if (dsm != null) { - dsm.close(); - } - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testReadsDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Add data to read. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - - // Create thread to keep reading during finalization. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future readFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - } - // Make sure we can read after finalizing too. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure reading during the upgrade, the exception will - // be thrown here. - readFuture.get(); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testImportContainer(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Pre-export a container to continuously import and delete. - final long exportContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID, pipeline); - File exportedContainerFile = exportContainer(exportContainerID); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - - // Export another container to import while pre-finalized and read - // finalized. - final long exportContainerID2 = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk2 = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID2, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID2, pipeline); - File exportedContainerFile2 = exportContainer(exportContainerID2); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID2, pipeline); - - // Make sure we can import and read a container pre-finalized. - importContainer(exportContainerID2, exportedContainerFile2); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - // Make sure the existing container can be read. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Create thread to keep importing containers during the upgrade. - // Since the datanode's MLV is behind SCM's, container creation is not - // allowed. We will keep importing and deleting the same container since - // we cannot create new ones to import here. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future importFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - } - // Make sure we can import after finalizing too. - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure importing during the upgrade, the exception will - // be thrown here. - importFuture.get(); - - // Make sure we can read the container that was imported while - // pre-finalized after finalizing. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFailedVolumeDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // FINALIZE: With failed volume /// - - failVolume(volume); - // Since volume is failed, container should be marked unhealthy. - // Finalization should proceed anyways. - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline, - ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR); - State containerState = dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerState(); - assertEquals(State.UNHEALTHY, containerState); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Volume marked failed but gets restored on disk /// - - // Check that volume is marked failed during finalization. - assertEquals(0, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(1, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Since the volume was out during the upgrade, it should maintain its - // original format. - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Now that we are done finalizing, restore the volume. - restoreVolume(volume); - // After restoring the failed volume, its containers are readable again. - // However, since it is marked as failed no containers can be created or - // imported to it. - // This should log a warning about reading from an unhealthy container - // but otherwise proceed successfully. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - /// FINALIZED: Restart datanode to upgrade the failed volume /// - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - /// PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID /// - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - // A new volume is added that must be formatted. - File preFinVolume2 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(2, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Because DN mlv would be behind SCM mlv, only reads are allowed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - // On restart, there should have been no changes to the paths already used. - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // No new containers can be created on this volume since SCM MLV is ahead - // of DN MLV at this point. - // cluster ID should always be used for the new volume since SCM HA is now - // enabled. - checkVolumePathID(preFinVolume2, CLUSTER_ID); - - /// FINALIZE /// - - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Add a new volume and check its formatting /// - - // Add a new volume that should be formatted with cluster ID only, since - // DN has finalized. - File finVolume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - // Yet another SCM ID is received this time, but it should not matter. - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(3, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkVolumePathID(preFinVolume2, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // New volume should have been formatted with cluster ID only, since the - // datanode is finalized. - checkVolumePathID(finVolume, CLUSTER_ID); - - /// FINALIZED: Read old data and write + read new data /// - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - /// CHECKS FOR TESTING /// - - public void checkContainerPathID(long containerID, String scmID, - String clusterID) { - if (scmHAAlreadyEnabled) { - checkContainerPathID(containerID, clusterID); - } else { - checkContainerPathID(containerID, scmID); - } - } - - public void checkContainerPathID(long containerID, String expectedID) { - KeyValueContainerData data = - (KeyValueContainerData) dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - assertThat(data.getChunksPath()).contains(expectedID); - assertThat(data.getMetadataPath()).contains(expectedID); - } - - public void checkFinalizedVolumePathID(File volume, String scmID, - String clusterID) throws Exception { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - List subdirs = getHddsSubdirs(volume); - File hddsRoot = getHddsRoot(volume); - - // Volume should have SCM ID and cluster ID directory, where cluster ID - // is a symlink to SCM ID. - assertEquals(2, subdirs.size()); - - File scmIDDir = new File(hddsRoot, scmID); - assertThat(subdirs).contains(scmIDDir); - - File clusterIDDir = new File(hddsRoot, CLUSTER_ID); - assertThat(subdirs).contains(clusterIDDir); - assertTrue(Files.isSymbolicLink(clusterIDDir.toPath())); - Path symlinkTarget = Files.readSymbolicLink(clusterIDDir.toPath()); - assertEquals(scmID, symlinkTarget.toString()); - } - } - - public void checkPreFinalizedVolumePathID(File volume, String scmID, - String clusterID) { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - checkVolumePathID(volume, scmID); - } - - } - - public void checkVolumePathID(File volume, String expectedID) { - List subdirs; - File hddsRoot; - if (dnThinksVolumeFailed(volume)) { - // If the volume is failed, read from the failed location it was - // moved to. - subdirs = getHddsSubdirs(getFailedVolume(volume)); - hddsRoot = getHddsRoot(getFailedVolume(volume)); - } else { - subdirs = getHddsSubdirs(volume); - hddsRoot = getHddsRoot(volume); - } - - // Volume should only have the specified ID directory. - assertEquals(1, subdirs.size()); - File idDir = new File(hddsRoot, expectedID); - assertThat(subdirs).contains(idDir); - } - - public List getHddsSubdirs(File volume) { - File[] subdirsArray = getHddsRoot(volume).listFiles(File::isDirectory); - assertNotNull(subdirsArray); - return Arrays.asList(subdirsArray); - } - - public File getHddsRoot(File volume) { - return new File(HddsVolumeUtil.getHddsRoot(volume.getAbsolutePath())); - } - - /// CLUSTER OPERATIONS /// - - private void startScmServer() throws Exception { - scmServerImpl = new ScmTestMock(CLUSTER_ID); - scmRpcServer = SCMTestUtils.startScmRpcServer(conf, - scmServerImpl, address, 10); - } - - /** - * Updates the SCM ID on the SCM server. Datanode will not be aware of this - * until {@link UpgradeTestHelper#callVersionEndpointTask} is called. - * @return the new scm ID. - */ - private String changeScmID() { - String scmID = UUID.randomUUID().toString(); - scmServerImpl.setScmId(scmID); - return scmID; - } - - /// CONTAINER OPERATIONS /// - - /** - * Exports the specified container to a temporary file and returns the file. - */ - private File exportContainer(long containerId) throws Exception { - final ContainerReplicationSource replicationSource = - new OnDemandContainerReplicationSource( - dsm.getContainer().getController()); - - replicationSource.prepare(containerId); - - File destination = - Files.createFile(tempFolder.resolve("destFile" + containerId)).toFile(); - try (FileOutputStream fos = new FileOutputStream(destination)) { - replicationSource.copyData(containerId, fos, NO_COMPRESSION); - } - return destination; - } - - /** - * Imports the container found in {@code source} to the datanode with the ID - * {@code containerID}. - */ - private void importContainer(long containerID, File source) throws Exception { - ContainerImporter replicator = - new ContainerImporter(dsm.getConf(), - dsm.getContainer().getContainerSet(), - dsm.getContainer().getController(), - dsm.getContainer().getVolumeSet()); - - File tempFile = Files.createFile( - tempFolder.resolve(ContainerUtils.getContainerTarName(containerID))) - .toFile(); - Files.copy(source.toPath(), tempFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - replicator.importContainer(containerID, tempFile.toPath(), null, - NO_COMPRESSION); - } - - /// VOLUME OPERATIONS /// - - /** - * Renames the specified volume directory so it will appear as failed to - * the datanode. - */ - public void failVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(volume.renameTo(failedVolume)); - } - - /** - * Convert the specified volume from its failed name back to its original - * name. The File passed should be the original volume path, not the one it - * was renamed to to fail it. - */ - public void restoreVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(failedVolume.renameTo(volume)); - } - - /** - * @return The file name that will be used to rename a volume to fail it. - */ - public File getFailedVolume(File volume) { - return new File(volume.getParent(), volume.getName() + "-failed"); - } - - /** - * Checks whether the datanode thinks the volume has failed. - * This could be outdated information if the volume was restored already - * and the datanode has not been restarted since then. - */ - public boolean dnThinksVolumeFailed(File volume) { - return dsm.getContainer().getVolumeSet().getFailedVolumesList().stream() - .anyMatch(v -> - getHddsRoot(v.getStorageDir()).equals(getHddsRoot(volume))); - } -} diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index ca54b3de9f2..9524b2df2db 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -12,26 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - - 4.0.0 - - org.apache.ozone - hdds - 2.0.0-SNAPSHOT - - - hdds-crypto-api + + 4.0.0 + + org.apache.ozone + hdds 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store cryptographic functions - Apache Ozone HDDS Crypto + + + hdds-crypto-api + 2.0.0-SNAPSHOT + Apache Ozone HDDS Crypto + Apache Ozone Distributed Data Store cryptographic functions - - true - + + + true + - + - + diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index 6024c3e2ddf..5200521d249 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -12,26 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - - 4.0.0 - - org.apache.ozone - hdds - 2.0.0-SNAPSHOT - - - hdds-crypto-default + + 4.0.0 + + org.apache.ozone + hdds 2.0.0-SNAPSHOT - Default implementation of Apache Ozone Distributed Data Store's cryptographic functions - Apache Ozone HDDS Crypto - Default + + + hdds-crypto-default + 2.0.0-SNAPSHOT + Apache Ozone HDDS Crypto - Default + Default implementation of Apache Ozone Distributed Data Store's cryptographic functions - - true - + + + true + - + - + diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md index 3872c387335..cf8ca4351f3 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.md @@ -41,14 +41,6 @@ Client connects to the Leader Ozone Manager which process the request and schedu ## Configuration -HA mode of Ozone Manager can be enabled with the following settings in `ozone-site.xml`: - -```XML - - ozone.om.ratis.enable - true - -``` One Ozone configuration (`ozone-site.xml`) can support multiple Ozone HA cluster. To select between the available HA clusters a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Ozone Managers. This logical name is called `serviceId` and can be configured in the `ozone-site.xml` diff --git a/hadoop-hdds/docs/content/feature/OM-HA.zh.md b/hadoop-hdds/docs/content/feature/OM-HA.zh.md index 2ce92087a0c..fae76ef03b4 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.zh.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.zh.md @@ -42,15 +42,6 @@ Ozone Manager 和 Storage Container Manager 都支持 HA。在这种模式下, ## 配置 -可以在 `ozone-site.xml` 中配置以下设置来启用 Ozone Manager 的高可用模式: - -```XML - - ozone.om.ratis.enable - true - -``` - 一个 Ozone 的配置(`ozone-site.xml`)支持多个 Ozone 高可用集群。为了支持在多个高可用集群之间进行选择,每个集群都需要一个逻辑名称,该逻辑名称可以解析为 Ozone Manager 的 IP 地址(和域名)。 该逻辑名称叫做 `serviceId`,可以在 `ozone-site.xml` 中进行配置: diff --git a/hadoop-hdds/docs/content/integration/Hive.md b/hadoop-hdds/docs/content/integration/Hive.md new file mode 100644 index 00000000000..8b43236d567 --- /dev/null +++ b/hadoop-hdds/docs/content/integration/Hive.md @@ -0,0 +1,169 @@ +--- +title: Hive +weight: 4 +menu: + main: + parent: "Application Integrations" +--- + + +Apache Hive has supported Apache Ozone since Hive 4.0. To enable Hive to work with Ozone paths, ensure that the `ozone-filesystem-hadoop3` JAR is added to the Hive classpath. + +## Supported Access Protocols + +Hive supports the following protocols for accessing Ozone data: + +* ofs +* o3fs +* s3a + +## Supported Replication Types + +Hive is compatible with Ozone buckets configured with either: + +* RATIS (Replication) +* Erasure Coding + +## Accessing Ozone Data in Hive + +Hive provides two methods to interact with data in Ozone: + +* Managed Tables +* External Tables + +### Managed Tables +#### Configuring the Hive Warehouse Directory in Ozone +To store managed tables in Ozone, update the following properties in the `hive-site.xml` configuration file: + +```xml + + hive.metastore.warehouse.dir + ofs://ozone1/vol1/bucket1/warehouse/ + +``` + +#### Creating a Managed Table +You can create a managed table with a standard `CREATE TABLE` statement: + +```sql +CREATE TABLE myTable ( + id INT, + name STRING +); +``` + +#### Loading Data into a Managed Table +Data can be loaded into a Hive table from an Ozone location: + +```sql +LOAD DATA INPATH 'ofs://ozone1/vol1/bucket1/table.csv' INTO TABLE myTable; +``` + +#### Specifying a Custom Ozone Path +You can define a custom Ozone path for a database using the `MANAGEDLOCATION` clause: + +```sql +CREATE DATABASE d1 MANAGEDLOCATION 'ofs://ozone1/vol1/bucket1/data'; +``` + +Tables created in the database d1 will be stored under the specified path: +`ofs://ozone1/vol1/bucket1/data` + +#### Verifying the Ozone Path +You can confirm that Hive references the correct Ozone path using: + +```sql +SHOW CREATE DATABASE d1; +``` + +Output Example: + +```text ++----------------------------------------------------+ +| createdb_stmt | ++----------------------------------------------------+ +| CREATE DATABASE `d1` | +| LOCATION | +| 'ofs://ozone1/vol1/bucket1/external/d1.db' | +| MANAGEDLOCATION | +| 'ofs://ozone1/vol1/bucket1/data' | ++----------------------------------------------------+ +``` + +### External Tables + +Hive allows the creation of external tables to query existing data stored in Ozone. + +#### Creating an External Table +```sql +CREATE EXTERNAL TABLE external_table ( + id INT, + name STRING +) +LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +* With external tables, the data is expected to be created and managed by another tool. +* Hive queries the data as-is. +* Note: Dropping an external table in Hive does not delete the associated data. + +To set a default path for external tables, configure the following property in the `hive-site.xml` file: +```xml + + hive.metastore.warehouse.external.dir + ofs://ozone1/vol1/bucket1/external/ + +``` +This property specifies the base directory for external tables when no explicit `LOCATION` is provided. + +#### Verifying the External Table Path +To confirm the table's metadata and location, use: + +```sql +SHOW CREATE TABLE external_table; +``` +Output Example: + +```text ++----------------------------------------------------+ +| createtab_stmt | ++----------------------------------------------------+ +| CREATE EXTERNAL TABLE `external_table`( | +| `id` int, | +| `name` string) | +| ROW FORMAT SERDE | +| 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' | +| STORED AS INPUTFORMAT | +| 'org.apache.hadoop.mapred.TextInputFormat' | +| OUTPUTFORMAT | +| 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' | +| LOCATION | +| 'ofs://ozone1/vol1/bucket1/table1' | +| TBLPROPERTIES ( | +| 'bucketing_version'='2', | +| 'transient_lastDdlTime'='1734725573') | ++----------------------------------------------------+ +``` + +## Using the S3A Protocol +In addition to ofs, Hive can access Ozone using the S3 Gateway via the S3A file system. + +For more information, consult: + +* The [S3 Protocol]({{< ref "interface/S3.md">}}) +* The [Hadoop S3A](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html) documentation. diff --git a/hadoop-hdds/docs/content/integration/Impala.md b/hadoop-hdds/docs/content/integration/Impala.md new file mode 100644 index 00000000000..3c786d5e15a --- /dev/null +++ b/hadoop-hdds/docs/content/integration/Impala.md @@ -0,0 +1,101 @@ +--- +title: Impala +weight: 4 +menu: + main: + parent: "Application Integrations" +--- + + +Starting with version 4.2.0, Apache Impala provides full support for querying data stored in Apache Ozone. To utilize this functionality, ensure that your Ozone version is 1.4.0 or later. + +## Supported Access Protocols + +Impala supports the following protocols for accessing Ozone data: + +* ofs +* s3a + +Note: The o3fs protocol is **NOT** supported by Impala. + +## Supported Replication Types + +Impala is compatible with Ozone buckets configured with either: + +* RATIS (Replication) +* Erasure Coding + +## Querying Ozone Data with Impala + +Impala provides two approaches to interact with Ozone: + +* Managed Tables +* External Tables + +### Managed Tables + +If the Hive Warehouse Directory is located in Ozone, you can execute Impala queries without any changes, treating the Ozone file system like HDFS. For example: + +```sql +CREATE DATABASE d1; +``` + +```sql +CREATE TABLE t1 (x INT, s STRING); +``` + +The data will be stored under the Hive Warehouse Directory path in Ozone. + +#### Specifying a Custom Ozone Path + +You can create managed databases, tables, or partitions at a specific Ozone path using the `LOCATION` clause. Example: + +```sql +CREATE DATABASE d1 LOCATION 'ofs://ozone1/vol1/bucket1/d1.db'; +``` + +```sql +CREATE TABLE t1 LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +### External Tables + +You can create an external table in Impala to query Ozone data. For example: + +```sql +CREATE EXTERNAL TABLE external_table ( + id INT, + name STRING +) +LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +* With external tables, the data is expected to be created and managed by another tool. +* Impala queries the data as-is. +* The metadata is stored under the external warehouse directory. +* Note: Dropping an external table in Impala does not delete the associated data. + + +## Using the S3A Protocol + +In addition to ofs, Impala can access Ozone via the S3 Gateway using the S3A file system. For more details, refer to +* The [S3 Protocol]({{< ref "interface/S3.md">}}) +* The [Hadoop S3A](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html) documentation. + +For additional information, consult the Apache Impala User Documentation +[Using Impala with Apache Ozone Storage](https://impala.apache.org/docs/build/html/topics/impala_ozone.html). diff --git a/hadoop-hdds/docs/content/integration/_index.md b/hadoop-hdds/docs/content/integration/_index.md new file mode 100644 index 00000000000..87f6a4825b6 --- /dev/null +++ b/hadoop-hdds/docs/content/integration/_index.md @@ -0,0 +1,26 @@ +--- +title: "Application Integrations" +menu: + main: + weight: 5 +--- + + +{{}} +Many applications can be integrated with Ozone through the Hadoop-compatible ofs interface or the S3 interface. +{{}} diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index 7f4ffbb8a70..a67c80e9f37 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,12 +21,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-docs 2.0.0-SNAPSHOT - Apache Ozone/HDDS Documentation - Apache Ozone/HDDS Documentation jar + Apache Ozone/HDDS Documentation + Apache Ozone/HDDS Documentation - true + + true false diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index b540d1c68ea..b84b6e087c3 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,44 +21,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-erasurecode 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Earsurecode utils - - Apache Ozone HDDS Erasurecode jar - - - + Apache Ozone HDDS Erasurecode + Apache Ozone Distributed Data Store Earsurecode utils + + com.google.guava + guava + org.apache.ozone hdds-common - org.slf4j slf4j-api - - com.google.guava - guava - - org.apache.ozone - hdds-hadoop-dependency-test + hdds-config test org.apache.ozone - hdds-test-utils + hdds-hadoop-dependency-test test org.apache.ozone - hdds-config + hdds-test-utils test diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 37d41cde390..5108af9172d 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,66 +21,46 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-server-framework 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Server Framework - - Apache Ozone HDDS Server Framework jar - - - + Apache Ozone HDDS Server Framework + Apache Ozone Distributed Data Store Server Framework - org.apache.ozone - hdds-config - - - org.apache.ozone - hdds-interface-client - - - org.apache.ozone - hdds-interface-server - - - org.apache.ozone - hdds-interface-admin - - - org.apache.ozone - hdds-common + ch.qos.reload4j + reload4j - org.apache.ozone - hdds-managed-rocksdb + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-hadoop-dependency-server + com.fasterxml.jackson.core + jackson-databind - ch.qos.reload4j - reload4j + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 - org.slf4j - slf4j-api + com.github.jnr + jnr-constants - org.slf4j - slf4j-reload4j + com.github.jnr + jnr-posix - org.apache.commons - commons-compress + com.google.code.gson + gson - org.apache.commons - commons-configuration2 + com.google.guava + guava - org.apache.commons - commons-lang3 + com.google.protobuf + protobuf-java commons-codec @@ -106,76 +83,101 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-validator commons-validator - - - org.apache.logging.log4j - log4j-api - - org.apache.logging.log4j - log4j-core + io.dropwizard.metrics + metrics-core - - com.lmax - disruptor - runtime + io.opentracing + opentracing-api - org.bouncycastle - bcpkix-jdk18on + io.prometheus + simpleclient - org.bouncycastle - bcprov-jdk18on + io.prometheus + simpleclient_common - org.eclipse.jetty - jetty-http + io.prometheus + simpleclient_dropwizard - org.eclipse.jetty - jetty-util + jakarta.annotation + jakarta.annotation-api - org.eclipse.jetty - jetty-server + jakarta.ws.rs + jakarta.ws.rs-api - org.eclipse.jetty - jetty-servlet + javax.servlet + javax.servlet-api - org.eclipse.jetty - jetty-webapp + org.apache.commons + commons-compress - org.glassfish.jersey.core - jersey-server + org.apache.commons + commons-configuration2 - org.glassfish.jersey.containers - jersey-container-servlet-core + org.apache.commons + commons-lang3 - org.rocksdb - rocksdbjni - - - - ratis-server - org.apache.ratis + org.apache.hadoop + hadoop-hdfs-client - org.slf4j - slf4j-reload4j - - - org.bouncycastle - bcprov-jdk18on + com.squareup.okhttp + okhttp + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + org.apache.ozone + hdds-common + + + org.apache.ozone + hdds-config + + + org.apache.ozone + hdds-hadoop-dependency-server + + + org.apache.ozone + hdds-interface-admin + + + org.apache.ozone + hdds-interface-client + + + org.apache.ozone + hdds-interface-server + + + org.apache.ozone + hdds-managed-rocksdb + + + org.apache.ozone + rocksdb-checkpoint-differ + ${hdds.version} + org.apache.ratis ratis-common @@ -186,11 +188,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-server-api - - ratis-metrics-dropwizard3 - org.apache.ratis io.dropwizard.metrics @@ -200,90 +198,79 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-thirdparty-misc - - - - io.dropwizard.metrics - metrics-core - - - io.opentracing - opentracing-api - - - io.prometheus - simpleclient + ratis-server + + + org.bouncycastle + bcprov-jdk18on + + + org.slf4j + slf4j-reload4j + + - io.prometheus - simpleclient_dropwizard + org.apache.ratis + ratis-server-api - io.prometheus - simpleclient_common + org.apache.ratis + ratis-thirdparty-misc - com.fasterxml.jackson.core - jackson-annotations + org.bouncycastle + bcpkix-jdk18on - com.fasterxml.jackson.core - jackson-databind + org.bouncycastle + bcprov-jdk18on - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + org.eclipse.jetty + jetty-http - com.github.jnr - jnr-constants + org.eclipse.jetty + jetty-server - com.github.jnr - jnr-posix + org.eclipse.jetty + jetty-servlet - com.google.code.gson - gson + org.eclipse.jetty + jetty-util - com.google.guava - guava + org.eclipse.jetty + jetty-webapp - com.google.protobuf - protobuf-java + org.glassfish.jersey.containers + jersey-container-servlet-core - - org.apache.hadoop - hadoop-hdfs-client - - - com.squareup.okhttp - okhttp - - + org.glassfish.jersey.core + jersey-server - - org.apache.ozone - rocksdb-checkpoint-differ - ${hdds.version} + org.rocksdb + rocksdbjni - - jakarta.annotation - jakarta.annotation-api + org.slf4j + slf4j-api - jakarta.ws.rs - jakarta.ws.rs-api + org.slf4j + slf4j-reload4j - javax.servlet - javax.servlet-api + + com.lmax + disruptor + runtime @@ -310,7 +297,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - @@ -347,7 +333,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java index 44c18231549..ffa91404688 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java @@ -139,14 +139,23 @@ public BaseHttpServer(MutableConfigurationSource conf, String name) builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue); + boolean addDefaultApps = shouldAddDefaultApps(); + if (!addDefaultApps) { + builder.withoutDefaultApps(); + } + httpServer = builder.build(); - httpServer.addServlet("conf", "/conf", HddsConfServlet.class); - httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); - prometheusSupport = + // TODO move these to HttpServer2.addDefaultApps + if (addDefaultApps) { + httpServer.addServlet("conf", "/conf", HddsConfServlet.class); + httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); + } + + prometheusSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true); - profilerSupport = + profilerSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false); if (prometheusSupport) { @@ -477,4 +486,9 @@ public boolean isSecurityEnabled() { protected abstract String getHttpAuthConfigPrefix(); + /** Override to disable the default servlets. */ + protected boolean shouldAddDefaultApps() { + return true; + } + } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 9d037fed6bc..691f5374e6f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -250,6 +250,7 @@ public static class Builder { private boolean xFrameEnabled; private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN; + private boolean skipDefaultApps; public Builder setName(String serverName) { this.name = serverName; @@ -446,6 +447,11 @@ private void loadSSLConfiguration() throws IOException { excludeCiphers = sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST); } + public Builder withoutDefaultApps() { + this.skipDefaultApps = true; + return this; + } + public HttpServer2 build() throws IOException { Preconditions.checkNotNull(name, "name is not set"); Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified"); @@ -592,18 +598,13 @@ private HttpServer2(final Builder b) throws IOException { this.findPort = b.findPort; this.portRanges = b.portRanges; - initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, - b.authFilterConfigurationPrefix, b.securityEnabled); + initializeWebServer(b); } - private void initializeWebServer(String name, String hostName, - MutableConfigurationSource conf, String[] pathSpecs, - String authFilterConfigPrefix, - boolean securityEnabled) throws IOException { - + private void initializeWebServer(Builder builder) throws IOException { Preconditions.checkNotNull(webAppContext); - int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, -1); + int maxThreads = builder.conf.getInt(HTTP_MAX_THREADS_KEY, -1); // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the // default value (currently 250). @@ -613,13 +614,13 @@ private void initializeWebServer(String name, String hostName, threadPool.setMaxThreads(maxThreads); } - metrics = HttpServer2Metrics.create(threadPool, name); + metrics = HttpServer2Metrics.create(threadPool, builder.name); SessionHandler handler = webAppContext.getSessionHandler(); handler.setHttpOnly(true); handler.getSessionCookieConfig().setSecure(true); ContextHandlerCollection contexts = new ContextHandlerCollection(); - RequestLog requestLog = HttpRequestLog.getRequestLog(name); + RequestLog requestLog = HttpRequestLog.getRequestLog(builder.name); handlers.addHandler(contexts); if (requestLog != null) { @@ -628,20 +629,22 @@ private void initializeWebServer(String name, String hostName, handlers.addHandler(requestLogHandler); } handlers.addHandler(webAppContext); - final String appDir = getWebAppsPath(name); - addDefaultApps(contexts, appDir, conf); + final String appDir = getWebAppsPath(builder.name); + if (!builder.skipDefaultApps) { + addDefaultApps(contexts, appDir, builder.conf); + } webServer.setHandler(handlers); - Map config = generateFilterConfiguration(conf); + Map config = generateFilterConfiguration(builder.conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), config); - final FilterInitializer[] initializers = getFilterInitializers(conf); + final FilterInitializer[] initializers = getFilterInitializers(builder.conf); if (initializers != null) { - conf.set(BIND_ADDRESS, hostName); + builder.conf.set(BIND_ADDRESS, builder.hostName); org.apache.hadoop.conf.Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + LegacyHadoopConfigurationSource.asHadoopConfiguration(builder.conf); Map filterConfig = getFilterConfigMap(hadoopConf, - authFilterConfigPrefix); + builder.authFilterConfigurationPrefix); for (FilterInitializer c : initializers) { - if ((c instanceof AuthenticationFilterInitializer) && securityEnabled) { + if ((c instanceof AuthenticationFilterInitializer) && builder.securityEnabled) { addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig); } else { @@ -650,10 +653,12 @@ private void initializeWebServer(String name, String hostName, } } - addDefaultServlets(); + if (!builder.skipDefaultApps) { + addDefaultServlets(); + } - if (pathSpecs != null) { - for (String path : pathSpecs) { + if (builder.pathSpecs != null) { + for (String path : builder.pathSpecs) { LOG.info("adding path spec: {}", path); addFilterPathMapping(path, webAppContext); } diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 7676f1f45f1..c05614456e7 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,16 +21,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop client dependencies - - Apache Ozone HDDS Hadoop Client dependencies jar + Apache Ozone HDDS Hadoop Client dependencies + Apache Ozone Distributed Data Store Hadoop client dependencies - true + + true + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-annotations @@ -44,108 +45,100 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} - com.nimbusds - nimbus-jose-jwt - - - org.xerial.snappy - snappy-java - - - org.apache.hadoop - hadoop-annotations - - - com.google.guava - guava - - - - org.apache.commons - commons-math3 + ch.qos.reload4j + reload4j - commons-codec - commons-codec + com.fasterxml.jackson.core + jackson-databind - commons-io - commons-io + com.github.pjfanning + jersey-json - commons-net - commons-net + com.google.code.findbugs + jsr305 - commons-collections - commons-collections + com.google.code.gson + gson - javax.servlet - javax.servlet-api + com.google.guava + guava - org.eclipse.jetty - jetty-server + com.jcraft + jsch - org.eclipse.jetty - jetty-util + com.nimbusds + * - org.eclipse.jetty - jetty-servlet + com.nimbusds + nimbus-jose-jwt - org.eclipse.jetty - jetty-webapp + com.sun.jersey + jersey-core - com.github.pjfanning + com.sun.jersey jersey-json - - com.google.code.findbugs - jsr305 - com.sun.jersey - jersey-core + jersey-server com.sun.jersey jersey-servlet - com.sun.jersey - jersey-json + commons-beanutils + commons-beanutils + - com.sun.jersey - jersey-server + commons-codec + commons-codec + + + commons-collections + commons-collections + + + commons-io + commons-io commons-logging commons-logging - log4j - log4j + commons-net + commons-net - ch.qos.reload4j - reload4j + dnsjava + dnsjava - commons-beanutils - commons-beanutils + javax.servlet + javax.servlet-api - org.apache.commons - commons-lang3 + javax.servlet.jsp + * - org.slf4j + log4j + log4j + + + net.minidev * @@ -153,59 +146,62 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> avro - com.google.code.gson - gson + org.apache.commons + commons-compress - com.jcraft - jsch + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-math3 org.apache.curator * - org.apache.zookeeper - zookeeper + org.apache.curator + * - org.apache.commons - commons-compress + org.apache.hadoop + hadoop-annotations org.apache.kerby kerb-simplekdc - com.fasterxml.jackson.core - jackson-databind + org.apache.zookeeper + zookeeper - dnsjava - dnsjava + org.eclipse.jetty + jetty-server - com.nimbusds - * + org.eclipse.jetty + jetty-servlet - net.minidev - * + org.eclipse.jetty + jetty-util - org.apache.curator - * + org.eclipse.jetty + jetty-webapp - javax.servlet.jsp + org.slf4j * + + org.xerial.snappy + snappy-java + - - - - com.nimbusds - nimbus-jose-jwt org.apache.hadoop @@ -214,20 +210,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> compile - com.google.guava - guava + ch.qos.reload4j + reload4j - org.eclipse.jetty - jetty-server + com.fasterxml.jackson.core + jackson-databind - org.eclipse.jetty - jetty-util + com.google.guava + guava - org.eclipse.jetty - jetty-util-ajax + com.google.protobuf + protobuf-java com.sun.jersey @@ -245,6 +241,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-codec commons-codec + + commons-daemon + commons-daemon + commons-io commons-io @@ -254,48 +254,44 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - commons-daemon - commons-daemon - - - log4j - log4j + io.netty + netty - ch.qos.reload4j - reload4j + io.netty + netty-all - org.slf4j - slf4j-reload4j + javax.servlet + javax.servlet-api - com.google.protobuf - protobuf-java + log4j + log4j - javax.servlet - javax.servlet-api + org.apache.htrace + htrace-core4 - io.netty - netty + org.eclipse.jetty + jetty-server - io.netty - netty-all + org.eclipse.jetty + jetty-util - org.apache.htrace - htrace-core4 + org.eclipse.jetty + jetty-util-ajax org.fusesource.leveldbjni leveldbjni-all - com.fasterxml.jackson.core - jackson-databind + org.slf4j + slf4j-reload4j diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 6be31002b09..324b21ef668 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,64 +21,68 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop server dependencies - - Apache Ozone HDDS Hadoop Server dependencies jar + Apache Ozone HDDS Hadoop Server dependencies + Apache Ozone Distributed Data Store Hadoop server dependencies - true + + true + + com.nimbusds + nimbus-jose-jwt + + + + commons-cli + commons-cli + org.apache.hadoop hadoop-annotations org.apache.hadoop - hadoop-common + hadoop-auth ${hadoop.version} - com.nimbusds - nimbus-jose-jwt + ch.qos.reload4j + reload4j - org.xerial.snappy - snappy-java + log4j + log4j org.apache.curator * - org.apache.avro - avro + org.apache.kerby + kerb-simplekdc org.apache.zookeeper zookeeper - org.apache.commons + org.slf4j * + + + + org.apache.hadoop + hadoop-common + ${hadoop.version} + - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-jaxrs - - - org.codehaus.jackson - jackson-xc + ch.qos.reload4j + reload4j com.github.pjfanning @@ -92,32 +93,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jsr305 - com.sun.jersey - * + com.nimbusds + nimbus-jose-jwt - org.apache.kerby - kerb-simplekdc + com.sun.jersey + * log4j log4j - ch.qos.reload4j - reload4j + org.apache.avro + avro - org.slf4j + org.apache.commons * - - - - org.apache.hadoop - hadoop-auth - ${hadoop.version} - org.apache.curator * @@ -131,34 +125,41 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> zookeeper - log4j - log4j + org.codehaus.jackson + jackson-core-asl - ch.qos.reload4j - reload4j + org.codehaus.jackson + jackson-jaxrs + + + org.codehaus.jackson + jackson-mapper-asl + + + org.codehaus.jackson + jackson-xc org.slf4j * + + org.xerial.snappy + snappy-java + - - com.nimbusds - nimbus-jose-jwt - - - - commons-cli - commons-cli - org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + ch.qos.reload4j + reload4j + com.sun.jersey * @@ -167,17 +168,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty * - - org.fusesource.leveldbjni - leveldbjni-all - log4j log4j - ch.qos.reload4j - reload4j + org.fusesource.leveldbjni + leveldbjni-all org.slf4j diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index f04e45a0340..48bdff714fb 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,16 +21,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-test 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop test dependencies - - Apache Ozone HDDS Hadoop Test dependencies jar + Apache Ozone HDDS Hadoop Test dependencies + Apache Ozone Distributed Data Store Hadoop test dependencies - true + + true + + commons-codec + commons-codec + + + org.apache.commons + commons-compress + org.apache.hadoop hadoop-common @@ -58,14 +63,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - commons-codec - commons-codec - - - org.apache.commons - commons-compress - org.assertj diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index f3197dc8965..047db244faa 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,14 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-admin 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Admin interface - - Apache Ozone HDDS Admin Interface jar + Apache Ozone HDDS Admin Interface + Apache Ozone Distributed Data Store Admin interface - true - true + + true + + true @@ -71,9 +69,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ target/generated-sources/java false diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 1a61dfa930e..da6dec5cda4 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,14 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Client interface - - Apache Ozone HDDS Client Interface jar + Apache Ozone HDDS Client Interface + Apache Ozone Distributed Data Store Client interface - true - true + + true + + true @@ -39,6 +37,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + javax.annotation + javax.annotation-api + org.apache.hadoop.thirdparty hadoop-shaded-protobuf_3_25 @@ -48,10 +50,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ratis-thirdparty-misc ${ratis.thirdparty.version} - - javax.annotation - javax.annotation-api - @@ -81,9 +79,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ DatanodeClientProtocol.proto @@ -92,9 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -104,9 +98,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -123,9 +115,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -142,38 +132,21 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - - - - - - - + + + + + + + - - run - diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index 47bde5a0bc7..83aa5f72e36 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,21 +21,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Server interface - - Apache Ozone HDDS Server Interface jar + Apache Ozone HDDS Server Interface + Apache Ozone Distributed Data Store Server interface - true - true + + true + + true - - org.apache.ratis - ratis-thirdparty-misc - com.google.protobuf protobuf-java @@ -49,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-interface-client + + org.apache.ratis + ratis-thirdparty-misc + @@ -78,9 +76,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -89,9 +85,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -101,9 +95,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -120,26 +112,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - + + + - - run - diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 40ad920647a..82b58b5a4db 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -23,25 +21,32 @@ hdds-managed-rocksdb 2.0.0-SNAPSHOT - Apache Ozone Managed RocksDB library - Apache Ozone HDDS Managed RocksDB jar + Apache Ozone HDDS Managed RocksDB + Apache Ozone Managed RocksDB library - true + + true + + com.google.guava + guava + + + jakarta.annotation + jakarta.annotation-api + org.apache.ozone hdds-common - org.apache.ratis ratis-common - org.rocksdb rocksdbjni @@ -50,16 +55,6 @@ org.slf4j slf4j-api - - - com.google.guava - guava - - - - jakarta.annotation - jakarta.annotation-api - diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index b3aa6ff6952..7e4fbd32db3 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,157 +22,144 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Project - Apache Ozone HDDS pom - - - true - + Apache Ozone HDDS + Apache Ozone Distributed Data Store Project annotations - hadoop-dependency-client - hadoop-dependency-test - hadoop-dependency-server - interface-client - interface-admin - interface-server client common + config + container-service crypto-api crypto-default + docs + erasurecode framework + hadoop-dependency-client + hadoop-dependency-server + hadoop-dependency-test + interface-admin + interface-client + interface-server managed-rocksdb + rocks-native rocksdb-checkpoint-differ - container-service server-scm - tools - docs - config test-utils - erasurecode - rocks-native + tools - org.apache.ozone - hdds-common + hdds-annotation-processing ${hdds.version} org.apache.ozone - hdds-managed-rocksdb + hdds-client ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-client + hdds-common ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-server + hdds-config ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-test + hdds-container-service ${hdds.version} - test org.apache.ozone - hdds-interface-server + hdds-docs ${hdds.version} org.apache.ozone - hdds-interface-client + hdds-erasurecode ${hdds.version} org.apache.ozone - hdds-interface-admin + hdds-hadoop-dependency-client ${hdds.version} org.apache.ozone - hdds-erasurecode + hdds-hadoop-dependency-server ${hdds.version} org.apache.ozone - hdds-client + hdds-interface-admin ${hdds.version} org.apache.ozone - hdds-tools + hdds-interface-client ${hdds.version} org.apache.ozone - hdds-server-framework + hdds-interface-server ${hdds.version} - - org.apache.ozone - rocksdb-checkpoint-differ - ${hdds.version} - - org.apache.ozone - hdds-server-scm + hdds-managed-rocksdb ${hdds.version} org.apache.ozone - hdds-container-service - ${hdds.version} + hdds-rocks-native + ${hdds.rocks.native.version} org.apache.ozone - hdds-docs + hdds-server-framework ${hdds.version} org.apache.ozone - hdds-config + hdds-server-scm ${hdds.version} org.apache.ozone - hdds-annotation-processing + hdds-tools ${hdds.version} org.apache.ozone - hdds-test-utils + rocksdb-checkpoint-differ ${hdds.version} - test @@ -194,18 +178,26 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + org.apache.ozone + hdds-hadoop-dependency-test + ${hdds.version} + test + + org.apache.ozone hdds-server-scm - test-jar ${hdds.version} + test-jar test org.apache.ozone - hdds-rocks-native - ${hdds.rocks.native.version} + hdds-test-utils + ${hdds.version} + test diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 4c751e0b10a..087dc8c0235 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -12,19 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + + 4.0.0 - hdds org.apache.ozone + hdds 2.0.0-SNAPSHOT - 4.0.0 - Apache Ozone HDDS RocksDB Tools hdds-rocks-native + Apache Ozone HDDS RocksDB Tools + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + org.apache.ozone hdds-common @@ -33,12 +39,6 @@ org.apache.ozone hdds-managed-rocksdb - - - org.apache.commons - commons-lang3 - - org.eclipse.jetty jetty-io @@ -52,11 +52,6 @@ slf4j-api - - com.google.guava - guava - - org.apache.ozone @@ -104,10 +99,10 @@ get-cpu-count - generate-sources cpu-count + generate-sources system.numCores @@ -136,10 +131,10 @@ set-property - initialize java + initialize org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter @@ -155,10 +150,10 @@ read-property-from-file - initialize read-project-properties + initialize ${project.build.directory}/propertyFile.txt @@ -173,10 +168,10 @@ unpack-dependency - initialize unpack + initialize @@ -197,10 +192,10 @@ rocksdb source download - generate-sources wget + generate-sources https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz rocksdb-v${rocksdb.version}.tar.gz @@ -221,10 +216,10 @@ patch - process-sources apply + process-sources @@ -234,70 +229,71 @@ unzip-artifact + + run + generate-sources - + - - run - build-rocksjava + + run + generate-resources - - - + + + - - - - - - + + + + + + - - run - build-rocks-tools + + run + process-classes - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - run - copy-lib-file + + run + process-classes @@ -306,9 +302,6 @@ - - run - @@ -352,10 +345,10 @@ native-maven-plugin - compile javah + compile ${env.JAVA_HOME}/bin/javah @@ -386,10 +379,10 @@ copy-dependencies - process-sources copy-dependencies + process-sources ${project.build.directory}/dependency runtime diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index c4284a4e85d..cb7ff3acd59 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,17 +22,30 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> rocksdb-checkpoint-differ 2.0.0-SNAPSHOT - RocksDB Checkpoint Differ - RocksDB Checkpoint Differ jar - - - + RocksDB Checkpoint Differ + RocksDB Checkpoint Differ - org.rocksdb - rocksdbjni + com.github.vlsi.mxgraph + jgraphx + + + com.google.guava + guava + + + com.google.protobuf + protobuf-java + + + commons-collections + commons-collections + + + org.apache.commons + commons-lang3 org.apache.ozone @@ -57,15 +67,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-rocks-native - - - com.google.guava - guava - - - org.apache.commons - commons-lang3 - org.apache.ratis ratis-common @@ -78,6 +79,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.jgrapht jgrapht-ext + + org.rocksdb + rocksdbjni + org.slf4j slf4j-api @@ -87,20 +92,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-reload4j - - com.github.vlsi.mxgraph - jgraphx - - - com.google.protobuf - protobuf-java - - - - commons-collections - commons-collections - - org.apache.ozone @@ -109,15 +100,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-test-utils + hdds-rocks-native + ${project.version} + test-jar test org.apache.ozone - hdds-rocks-native - ${project.version} + hdds-test-utils test - test-jar @@ -142,7 +133,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> depcheck - + diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 4c2e40c3759..4137f443c71 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,37 +21,97 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-server-scm 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Storage Container Manager Server - Apache Ozone HDDS SCM Server jar + Apache Ozone HDDS SCM Server + Apache Ozone Distributed Data Store Storage Container Manager Server false + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.google.guava + guava + com.google.protobuf protobuf-java compile + + commons-collections + commons-collections + + + commons-io + commons-io + + + info.picocli + picocli + + + io.dropwizard.metrics + metrics-core + + + jakarta.annotation + jakarta.annotation-api + + + javax.servlet + javax.servlet-api + + + org.apache.commons + commons-compress + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-text + + + org.apache.hadoop + hadoop-hdfs-client + + + com.squareup.okhttp + okhttp + + + org.apache.ozone - hdds-common + hdds-client org.apache.ozone - hdds-config + hdds-common - org.apache.ozone - hdds-container-service + hdds-config - org.apache.ozone - hdds-client + hdds-container-service org.apache.ozone @@ -76,13 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework - - - org.apache.ozone - hdds-docs - provided - - org.apache.ratis ratis-client @@ -105,17 +155,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-server-api + ratis-server org.apache.ratis - ratis-server + ratis-server-api org.apache.ratis ratis-thirdparty-misc - org.bouncycastle bcpkix-jdk18on @@ -124,35 +173,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle bcprov-jdk18on - - io.dropwizard.metrics - metrics-core - - - - org.apache.commons - commons-compress - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-text - - - - org.apache.hadoop - hadoop-hdfs-client - - - com.squareup.okhttp - okhttp - - - - org.eclipse.jetty jetty-webapp @@ -161,44 +181,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j slf4j-api - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava - - - - info.picocli - picocli - - - jakarta.annotation - jakarta.annotation-api - - - javax.servlet - javax.servlet-api - - - commons-collections - commons-collections - - - commons-io - commons-io + org.apache.ozone + hdds-docs + provided @@ -215,14 +201,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-hadoop-dependency-test + hdds-container-service + test-jar test org.apache.ozone - hdds-container-service + hdds-hadoop-dependency-test test - test-jar org.apache.ozone @@ -231,6 +217,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + ${basedir}/../../hdds/common/src/main/resources + + + ${basedir}/src/test/resources + + org.apache.maven.plugins @@ -259,7 +253,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -280,24 +275,22 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* org.apache.ozone hdds-docs - ${project.build.outputDirectory}/webapps/scm - + ${project.build.outputDirectory}/webapps/scm docs/**/*.* @@ -314,13 +307,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - ${basedir}/../../hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 6b6a888f424..2ddcb223bf9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -24,6 +24,7 @@ import java.util.Set; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -72,6 +73,8 @@ default List getContainers() { List getContainers(ContainerID startID, int count); + List getContainers(ReplicationType type); + /** * Returns all the containers which are in the specified state. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index d61f9ee366b..113903e647b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; @@ -147,6 +148,12 @@ public ContainerInfo getContainer(final ContainerID id) id + " not found.")); } + + @Override + public List getContainers(ReplicationType type) { + return toContainers(containerStateManager.getContainerIDs(type)); + } + @Override public List getContainers(final ContainerID startID, final int count) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 4f478b201cd..263dc14469a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.metadata.Replicate; @@ -114,6 +115,12 @@ public interface ContainerStateManager { */ Set getContainerIDs(LifeCycleState state); + + /** + * Returns the IDs of the Containers whose ReplicationType matches the given type. + */ + Set getContainerIDs(ReplicationType type); + /** * */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 28a732795b1..f2cbe451ba7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -287,6 +288,13 @@ public Set getContainerIDs(final LifeCycleState state) { } } + @Override + public Set getContainerIDs(final ReplicationType type) { + try (AutoCloseableLock ignored = readLock()) { + return containers.getContainerIDsByType(type); + } + } + @Override public ContainerInfo getContainer(final ContainerID id) { try (AutoCloseableLock ignored = readLock(id)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index 438e9709bff..4e6f0ed67ce 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -316,7 +316,7 @@ NavigableSet getContainerIDsByOwner(final String ownerName) { * @param type - Replication type -- StandAlone, Ratis etc. * @return NavigableSet */ - NavigableSet getContainerIDsByType(final ReplicationType type) { + public NavigableSet getContainerIDsByType(final ReplicationType type) { Preconditions.checkNotNull(type); return typeMap.getCollection(type); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index bdd7160de4c..b66b6e9f0f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,11 @@ import java.util.stream.Collectors; import com.google.common.collect.Sets; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -48,55 +48,53 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT; + /** * Class defining Safe mode exit criteria for Containers. */ public class ContainerSafeModeRule extends SafeModeExitRule { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerSafeModeRule.class); + public static final Logger LOG = LoggerFactory.getLogger(ContainerSafeModeRule.class); + private final ContainerManager containerManager; // Required cutoff % for containers with at least 1 reported replica. - private double safeModeCutoff; + private final double safeModeCutoff; // Containers read from scm db (excluding containers in ALLOCATED state). - private Set ratisContainers; - private Set ecContainers; - private Map> ecContainerDNsMap; + private final Set ratisContainers; + private final Set ecContainers; + private final Map> ecContainerDNsMap; + private final AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); + private final AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); + private double ratisMaxContainer; private double ecMaxContainer; - private AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); - private AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); - private final ContainerManager containerManager; - - public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - ConfigurationSource conf, - ContainerManager containerManager, SCMSafeModeManager manager) { - this(ruleName, eventQueue, conf, containerManager.getContainers(), containerManager, manager); - } - public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - ConfigurationSource conf, - List containers, - ContainerManager containerManager, SCMSafeModeManager manager) { + public ContainerSafeModeRule(final String ruleName, + final EventQueue eventQueue, + final ConfigurationSource conf, + final ContainerManager containerManager, + final SCMSafeModeManager manager) { super(manager, ruleName, eventQueue); + this.safeModeCutoff = getSafeModeCutoff(conf); this.containerManager = containerManager; - safeModeCutoff = conf.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); - - Preconditions.checkArgument( - (safeModeCutoff >= 0.0 && safeModeCutoff <= 1.0), - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT + - " value should be >= 0.0 and <= 1.0"); + this.ratisContainers = new HashSet<>(); + this.ecContainers = new HashSet<>(); + this.ecContainerDNsMap = new ConcurrentHashMap<>(); + initializeRule(); + } - ratisContainers = new HashSet<>(); - ecContainers = new HashSet<>(); - ecContainerDNsMap = new ConcurrentHashMap<>(); - initializeRule(containers); + private static double getSafeModeCutoff(ConfigurationSource conf) { + final double cutoff = conf.getDouble(HDDS_SCM_SAFEMODE_THRESHOLD_PCT, + HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); + Preconditions.checkArgument((cutoff >= 0.0 && cutoff <= 1.0), + HDDS_SCM_SAFEMODE_THRESHOLD_PCT + + " value should be >= 0.0 and <= 1.0"); + return cutoff; } - @Override protected TypedEvent getEventType() { return SCMEvents.CONTAINER_REGISTRATION_REPORT; @@ -104,45 +102,59 @@ protected TypedEvent getEventType() { @Override protected synchronized boolean validate() { - return (getCurrentContainerThreshold() >= safeModeCutoff) && - (getCurrentECContainerThreshold() >= safeModeCutoff); + if (validateBasedOnReportProcessing()) { + return (getCurrentContainerThreshold() >= safeModeCutoff) && + (getCurrentECContainerThreshold() >= safeModeCutoff); + } + + // TODO: Split ContainerSafeModeRule into RatisContainerSafeModeRule and + // ECContainerSafeModeRule + final List containers = containerManager.getContainers( + ReplicationType.RATIS); + + return containers.stream() + .filter(this::isClosed) + .map(ContainerInfo::containerID) + .noneMatch(this::isMissing); } - @VisibleForTesting - public synchronized double getCurrentContainerThreshold() { - if (ratisMaxContainer == 0) { - return 1; + /** + * Checks if the container has any replica. + */ + private boolean isMissing(ContainerID id) { + try { + return containerManager.getContainerReplicas(id).isEmpty(); + } catch (ContainerNotFoundException ex) { + /* + * This should never happen, in case this happens the container + * somehow got removed from SCM. + * Safemode rule doesn't have to log/fix this. We will just exclude this + * from the rule validation. + */ + return false; + } - return (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); } @VisibleForTesting - public synchronized double getCurrentECContainerThreshold() { - if (ecMaxContainer == 0) { - return 1; - } - return (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); + public double getCurrentContainerThreshold() { + return ratisMaxContainer == 0 ? 1 : + (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); } - private synchronized double getEcMaxContainer() { - if (ecMaxContainer == 0) { - return 1; - } - return ecMaxContainer; + @VisibleForTesting + public double getCurrentECContainerThreshold() { + return ecMaxContainer == 0 ? 1 : + (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); } - private synchronized double getRatisMaxContainer() { - if (ratisMaxContainer == 0) { - return 1; - } - return ratisMaxContainer; - } + // TODO: Report processing logic will be removed in future. HDDS-11958. @Override protected synchronized void process( - NodeRegistrationContainerReport reportsProto) { - DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); - UUID datanodeUUID = datanodeDetails.getUuid(); + final NodeRegistrationContainerReport reportsProto) { + final DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); + final UUID datanodeUUID = datanodeDetails.getUuid(); StorageContainerDatanodeProtocolProtos.ContainerReportsProto report = reportsProto.getReport(); report.getReportsList().forEach(c -> { @@ -166,9 +178,7 @@ protected synchronized void process( SCMSafeModeManager.getLogger().info( "SCM in safe mode. {} % containers [Ratis] have at least one" + " reported replica, {} % containers [EC] have at N reported replica.", - ((ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100), - ((ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100) - ); + getCurrentContainerThreshold() * 100, getCurrentECContainerThreshold() * 100); } } @@ -246,8 +256,8 @@ public String getStatusText() { String status = String.format( "%1.2f%% of [Ratis] Containers(%s / %s) with at least one reported replica (=%1.2f) >= " + "safeModeCutoff (=%1.2f);", - (ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100, - ratisContainerWithMinReplicas, (long) getRatisMaxContainer(), + getCurrentContainerThreshold() * 100, + ratisContainerWithMinReplicas, (long) ratisMaxContainer, getCurrentContainerThreshold(), this.safeModeCutoff); Set sampleRatisContainers = ratisContainers.stream(). @@ -264,8 +274,8 @@ public String getStatusText() { String ecStatus = String.format( "%1.2f%% of [EC] Containers(%s / %s) with at least N reported replica (=%1.2f) >= " + "safeModeCutoff (=%1.2f);", - (ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100, - ecContainerWithMinReplicas, (long) getEcMaxContainer(), + getCurrentECContainerThreshold() * 100, + ecContainerWithMinReplicas, (long) ecMaxContainer, getCurrentECContainerThreshold(), this.safeModeCutoff); status = status.concat("\n").concat(ecStatus); @@ -295,25 +305,19 @@ public String getStatusText() { @Override public synchronized void refresh(boolean forceRefresh) { - List containers = containerManager.getContainers(); - if (forceRefresh) { - initializeRule(containers); - } else { - if (!validate()) { - initializeRule(containers); - } + if (forceRefresh || !validate()) { + initializeRule(); } } - private boolean checkContainerState(LifeCycleState state) { - if (state == LifeCycleState.QUASI_CLOSED || state == LifeCycleState.CLOSED) { - return true; - } - return false; + private boolean isClosed(ContainerInfo container) { + final LifeCycleState state = container.getState(); + return state == LifeCycleState.QUASI_CLOSED || + state == LifeCycleState.CLOSED; } - private void initializeRule(List containers) { - + private void initializeRule() { + final List containers = containerManager.getContainers(); // Clean up the related data in the map. ratisContainers.clear(); ecContainers.clear(); @@ -325,10 +329,9 @@ private void initializeRule(List containers) { // created by the client. We are not considering these containers for // now. These containers can be handled by tracking pipelines. - LifeCycleState containerState = container.getState(); HddsProtos.ReplicationType replicationType = container.getReplicationType(); - if (checkContainerState(containerState) && container.getNumberOfKeys() > 0) { + if (isClosed(container) && container.getNumberOfKeys() > 0) { // If it's of type Ratis if (replicationType.equals(HddsProtos.ReplicationType.RATIS)) { ratisContainers.add(container.getContainerID()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index 78ce994af73..f4e6f6ee2cf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -19,7 +19,6 @@ import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,7 +27,6 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMContext; @@ -105,7 +103,6 @@ public class SCMSafeModeManager implements SafeModeManager { private Set validatedPreCheckRules = new HashSet<>(1); private final EventQueue eventPublisher; - private final PipelineManager pipelineManager; private final SCMServiceManager serviceManager; private final SCMContext scmContext; @@ -114,12 +111,10 @@ public class SCMSafeModeManager implements SafeModeManager { // TODO: Remove allContainers argument. (HDDS-11795) public SCMSafeModeManager(ConfigurationSource conf, - List allContainers, ContainerManager containerManager, PipelineManager pipelineManager, EventQueue eventQueue, SCMServiceManager serviceManager, SCMContext scmContext) { this.config = conf; - this.pipelineManager = pipelineManager; this.eventPublisher = eventQueue; this.serviceManager = serviceManager; this.scmContext = scmContext; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java index 69c1a86ac37..746e825f34b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java @@ -41,6 +41,10 @@ public abstract class SafeModeExitRule implements EventHandler { protected static final int SAMPLE_CONTAINER_DISPLAY_LIMIT = 5; protected static final int SAMPLE_PIPELINE_DISPLAY_LIMIT = 5; + // TODO: Report processing logic will be removed in future. HDDS-11958. + // This flag is to add new code without breaking Safemode logic until we have HDDS-11958. + private boolean validateBasedOnReportProcessing = true; + public SafeModeExitRule(SCMSafeModeManager safeModeManager, String ruleName, EventQueue eventQueue) { this.safeModeManager = safeModeManager; @@ -48,6 +52,14 @@ public SafeModeExitRule(SCMSafeModeManager safeModeManager, eventQueue.addHandler(getEventType(), this); } + public void setValidateBasedOnReportProcessing(boolean newValue) { + validateBasedOnReportProcessing = newValue; + } + + protected boolean validateBasedOnReportProcessing() { + return validateBasedOnReportProcessing; + } + /** * Return's the name of this SafeModeExit Rule. * @return ruleName diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index d117e891c4b..52148c3d683 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -841,8 +841,8 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmSafeModeManager = configurator.getScmSafeModeManager(); } else { scmSafeModeManager = new SCMSafeModeManager(conf, - containerManager.getContainers(), containerManager, - pipelineManager, eventQueue, serviceManager, scmContext); + containerManager, pipelineManager, eventQueue, + serviceManager, scmContext); } scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index e258c8ee66e..353c6c50104 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -37,6 +37,7 @@ import picocli.CommandLine.Command; import java.io.IOException; +import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; @@ -49,7 +50,7 @@ hidden = true, description = "Start or initialize the scm server.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class StorageContainerManagerStarter extends GenericCli { +public class StorageContainerManagerStarter extends GenericCli implements Callable { private OzoneConfiguration conf; private SCMStarterInterface receiver; @@ -91,8 +92,8 @@ public Void call() throws Exception { versionProvider = HddsVersionProvider.class) public void generateClusterId() { commonInit(); - System.out.println("Generating new cluster id:"); - System.out.println(receiver.generateClusterId()); + out().println("Generating new cluster id:"); + out().println(receiver.generateClusterId()); } /** @@ -150,7 +151,7 @@ private void startScm() throws Exception { * is set and print the startup banner message. */ private void commonInit() { - conf = createOzoneConfiguration(); + conf = getOzoneConf(); TracingUtil.initTracing("StorageContainerManager", conf); String[] originalArgs = getCmd().getParseResult().originalArgs() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 621c9297e7e..528891623df 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -162,8 +162,7 @@ void setUp(@TempDir File tempDir) throws Exception { new ContainerReplicaPendingOps( Clock.system(ZoneId.systemDefault()))); SCMSafeModeManager safeModeManager = new SCMSafeModeManager(conf, - containerManager.getContainers(), containerManager, - pipelineManager, eventQueue, serviceManager, scmContext) { + containerManager, pipelineManager, eventQueue, serviceManager, scmContext) { @Override public void emitSafeModeStatus() { // skip diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 2a012cbe180..4fb323d7451 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -112,7 +111,6 @@ public class TestDeletedBlockLog { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); replicationManager = mock(ReplicationManager.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java similarity index 80% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java index 75a943ee8da..2d9a18c5a8e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java @@ -18,13 +18,11 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmRatisServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.common.Storage; @@ -35,13 +33,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; @@ -63,8 +58,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -72,7 +65,7 @@ /** * Test for SCM HA-related configuration. */ -class TestSCMHAConfiguration { +class TestSCMConfiguration { private OzoneConfiguration conf; @TempDir private File tempDir; @@ -85,7 +78,7 @@ void setup() { } @Test - public void testSCMHAConfig() throws Exception { + public void testSCMConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -225,7 +218,7 @@ public void testSCMHAConfig() throws Exception { @Test - public void testHAWithSamePortConfig() throws Exception { + public void testSamePortConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -301,25 +294,7 @@ public void testHAWithSamePortConfig() throws Exception { } @Test - public void testRatisEnabledDefaultConfigWithoutInitializedSCM() - throws IOException { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()).thenReturn(Storage.StorageState.NOT_INITIALIZED); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertEquals(SCMHAUtils.isSCMHAEnabled(conf), - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertFalse(SCMHAUtils.isSCMHAEnabled(conf)); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); - } - - @Test - public void testRatisEnabledDefaultConfigWithInitializedSCM() + public void testDefaultConfigWithInitializedSCM() throws IOException { SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); when(scmStorageConfig.getState()) @@ -333,44 +308,4 @@ public void testRatisEnabledDefaultConfigWithInitializedSCM() DefaultConfigManager.clearDefaultConfigs(); assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); } - - @Test - public void testRatisEnabledDefaultConflictConfigWithInitializedSCM() { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()) - .thenReturn(Storage.StorageState.INITIALIZED); - when(scmStorageConfig.isSCMHAEnabled()).thenReturn(true); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - assertThrows(ConfigurationException.class, - () -> SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig)); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testHAConfig(boolean ratisEnabled) throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ratisEnabled); - SCMStorageConfig scmStorageConfig = newStorageConfig(ratisEnabled); - StorageContainerManager.scmInit(conf, scmStorageConfig.getClusterID()); - assertEquals(ratisEnabled, DefaultConfigManager.getValue( - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, !ratisEnabled)); - } - - @Test - void testInvalidHAConfig() throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMStorageConfig scmStorageConfig = newStorageConfig(true); - String clusterID = scmStorageConfig.getClusterID(); - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, clusterID)); - } - - private SCMStorageConfig newStorageConfig( - boolean ratisEnabled) throws IOException { - final SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); - scmStorageConfig.setClusterId(UUID.randomUUID().toString()); - scmStorageConfig.setSCMHAFlag(ratisEnabled); - scmStorageConfig.initialize(); - return scmStorageConfig; - } - } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 4e69f46b6e9..33da298423d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -20,7 +20,6 @@ import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; @@ -48,7 +47,6 @@ public class TestStatefulServiceStateManagerImpl { @BeforeEach void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 6d11cb5fe58..e4e4a57232f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -184,7 +184,6 @@ OzoneConfiguration getConf() { TimeUnit.MILLISECONDS); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); return conf; } @@ -283,7 +282,7 @@ public void testScmLayoutOnHeartbeat() throws Exception { 1, TimeUnit.DAYS); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - assertTrue(scm.checkLeader()); + assertTrue(scm.getScmContext().isLeader()); // Register 2 nodes correctly. // These will be used with a faulty node to test pipeline creation. DatanodeDetails goodNode1 = registerWithCapacity(nodeManager); @@ -402,7 +401,7 @@ public void testScmLayoutOnRegister() 1, TimeUnit.DAYS); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - assertTrue(scm.checkLeader()); + assertTrue(scm.getScmContext().isLeader()); // Nodes with mismatched SLV cannot join the cluster. registerWithCapacity(nodeManager, LARGER_SLV_LAYOUT_PROTO, errorNodeNotPermitted); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 1dfbfd32785..900b09c0146 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -358,7 +358,7 @@ public void testClosePipelineShouldFailOnFollower() throws Exception { public void testPipelineReport() throws Exception { try (PipelineManagerImpl pipelineManager = createPipelineManager(true)) { SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(conf, new ArrayList<>(), + new SCMSafeModeManager(conf, mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); Pipeline pipeline = pipelineManager @@ -469,7 +469,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(), + new SCMSafeModeManager(new OzoneConfiguration(), mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); PipelineReportHandler pipelineReportHandler = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 13eb4be724c..b2b3530c1e7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -99,7 +99,7 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -179,7 +179,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -275,7 +275,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index 76bafa8b1fb..44594740210 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -120,7 +120,7 @@ private void setup(int nodes, int pipelineFactorThreeCount, HddsProtos.ReplicationFactor.ONE); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(ozoneConfiguration, containers, containerManager, + new SCMSafeModeManager(ozoneConfiguration, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index fc8ec9c1912..1d9a41b683f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -136,7 +136,7 @@ private void testSafeMode(int numContainers) throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, + config, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -175,7 +175,7 @@ public void testSafeModeExitRule() throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, + config, containerManager, null, queue, serviceManager, scmContext); long cutOff = (long) Math.ceil(numContainers * config.getDouble( @@ -244,7 +244,7 @@ public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, - () -> new SCMSafeModeManager(conf, containers, containerManager, + () -> new SCMSafeModeManager(conf, containerManager, pipelineManager, queue, serviceManager, scmContext)); assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } @@ -311,7 +311,7 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, pipelineManager, queue, serviceManager, + conf, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -447,7 +447,7 @@ public void testDisableSafeMode() { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, pipelineManager, queue, serviceManager, + conf, containerManager, pipelineManager, queue, serviceManager, scmContext); assertFalse(scmSafeModeManager.getInSafeMode()); } @@ -489,7 +489,7 @@ public void testContainerSafeModeRule() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, serviceManager, scmContext); + config, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -565,7 +565,7 @@ public void testContainerSafeModeRuleEC(int data, int parity) throws Exception { new ContainerReplicaPendingOps(Clock.system(ZoneId.systemDefault()))); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); + config, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); // Only 20 containers are involved in the calculation, @@ -588,7 +588,7 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, null, queue, + conf, containerManager, null, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -702,7 +702,7 @@ public void testSafeModePipelineExitRule() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, + config, containerManager, pipelineManager, queue, serviceManager, scmContext); SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = @@ -757,7 +757,7 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, + config, containerManager, pipelineManager, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java index 8b4bc906e0d..91dfaa1dafb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.upgrade; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; @@ -26,19 +25,16 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.upgrade.UpgradeException; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -62,20 +58,12 @@ public static void setup() { ExitUtils.disableSystemExit(); } - @ParameterizedTest - @CsvSource({ - "true, true", - "true, false", - "false, true", - "false, false", - }) - public void testUpgrade(boolean haEnabledBefore, - boolean haEnabledPreFinalized, @TempDir Path dataPath) throws Exception { + @Test + public void testUpgrade(@TempDir Path dataPath) throws Exception { // Write version file for original version. OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledBefore); conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dataPath.toString()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, dataPath.toString()); // This init should always succeed, since SCM is not pre-finalized yet. @@ -83,43 +71,17 @@ public void testUpgrade(boolean haEnabledBefore, boolean initResult1 = StorageContainerManager.scmInit(conf, CLUSTER_ID); assertTrue(initResult1); - // Set up new pre-finalized SCM. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - haEnabledPreFinalized); - /* Clusters from Ratis SCM -> Non Ratis SCM - Ratis SCM -> Non Ratis SCM not supported - */ - if (haEnabledPreFinalized != haEnabledBefore) { - if (haEnabledBefore) { - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } else { - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } - return; - } StorageContainerManager scm = HddsTestUtils.getScm(conf); assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED, scm.getFinalizationManager().getUpgradeFinalizer().getStatus()); - final boolean shouldFail = !haEnabledBefore && haEnabledPreFinalized; + DefaultConfigManager.clearDefaultConfigs(); - if (shouldFail) { - // Start on its own should fail. - assertThrows(UpgradeException.class, scm::start); + boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); + assertTrue(initResult2); + scm.start(); + scm.stop(); - // Init followed by start should both fail. - // Init is not necessary here, but is allowed to be run. - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - assertThrows(UpgradeException.class, scm::start); - } else { - boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); - assertTrue(initResult2); - scm.start(); - scm.stop(); - } } } diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 6ff87083c03..0c4d5598192 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,18 +21,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Test Utils - Apache Ozone HDDS Test Utils jar - - - - + Apache Ozone HDDS Test Utils + Apache Ozone Distributed Data Store Test Utils - org.assertj - assertj-core + ch.qos.reload4j + reload4j com.google.guava @@ -50,25 +43,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - org.junit.jupiter - junit-jupiter-api + jakarta.annotation + jakarta.annotation-api - org.junit.platform - junit-platform-launcher - provided + org.apache.commons + commons-lang3 - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-api - jakarta.annotation - jakarta.annotation-api + org.assertj + assertj-core - org.apache.commons - commons-lang3 + org.junit.jupiter + junit-jupiter-api org.slf4j @@ -85,10 +77,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.logging.log4j - log4j-api - org.apache.logging.log4j log4j-core @@ -99,6 +87,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.jacoco.core provided + + org.junit.platform + junit-platform-launcher + provided + org.mockito mockito-core diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 8a770424766..959326e210f 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -20,6 +20,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; +import java.io.InputStream; import java.io.OutputStream; import java.io.PrintStream; import java.io.StringWriter; @@ -34,6 +35,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; +import org.apache.commons.io.input.CharSequenceInputStream; import org.apache.commons.lang3.tuple.Pair; import org.apache.log4j.Layout; import org.apache.log4j.Level; @@ -45,6 +47,8 @@ import java.lang.reflect.Modifier; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.nio.charset.StandardCharsets.UTF_8; @@ -353,87 +357,102 @@ private static long monotonicNow() { return System.nanoTime() / NANOSECONDS_PER_MILLISECOND; } - /** - * Capture output printed to {@link System#err}. - *

- * Usage: - *

-   *   try (SystemErrCapturer capture = new SystemErrCapturer()) {
-   *     ...
-   *     // Call capture.getOutput() to get the output string
-   *   }
-   * 
- *

- * TODO: Add lambda support once Java 8 is common. - * {@code - * SystemErrCapturer.withCapture(capture -> { - * ... - * }) - * } - */ - public static class SystemErrCapturer implements AutoCloseable { + public static PrintStreamCapturer captureOut() { + return new SystemOutCapturer(); + } + + public static PrintStreamCapturer captureErr() { + return new SystemErrCapturer(); + } + + /** Capture contents of a {@code PrintStream}, until {@code close()}d. */ + public abstract static class PrintStreamCapturer implements AutoCloseable, Supplier { private final ByteArrayOutputStream bytes; private final PrintStream bytesPrintStream; - private final PrintStream oldErr; + private final PrintStream old; + private final Consumer restore; - public SystemErrCapturer() throws UnsupportedEncodingException { + protected PrintStreamCapturer(PrintStream out, Consumer install) { + old = out; bytes = new ByteArrayOutputStream(); - bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); - oldErr = System.err; - System.setErr(new TeePrintStream(oldErr, bytesPrintStream)); + try { + bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); + install.accept(new TeePrintStream(out, bytesPrintStream)); + restore = install; + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException(e); + } + } + + @Override + public String get() { + return getOutput(); + } + + public String getOutput() { + try { + return bytes.toString(UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException(e); + } } - public String getOutput() throws UnsupportedEncodingException { - return bytes.toString(UTF_8.name()); + public void reset() { + bytes.reset(); } @Override public void close() throws Exception { IOUtils.closeQuietly(bytesPrintStream); - System.setErr(oldErr); + restore.accept(old); } } /** - * Capture output printed to {@link System#out}. + * Capture output printed to {@link System#err}. *

* Usage: *

-   *   try (SystemOutCapturer capture = new SystemOutCapturer()) {
+   *   try (PrintStreamCapturer capture = captureErr()) {
    *     ...
    *     // Call capture.getOutput() to get the output string
    *   }
    * 
- *

- * TODO: Add lambda support once Java 8 is common. - * {@code - * SystemOutCapturer.withCapture(capture -> { - * ... - * }) - * } */ - public static class SystemOutCapturer implements AutoCloseable { - private final ByteArrayOutputStream bytes; - private final PrintStream bytesPrintStream; - private final PrintStream oldOut; - - public SystemOutCapturer() throws - UnsupportedEncodingException { - bytes = new ByteArrayOutputStream(); - bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); - oldOut = System.out; - System.setOut(new TeePrintStream(oldOut, bytesPrintStream)); + public static class SystemErrCapturer extends PrintStreamCapturer { + public SystemErrCapturer() { + super(System.err, System::setErr); } + } - public String getOutput() throws UnsupportedEncodingException { - return bytes.toString(UTF_8.name()); + /** + * Capture output printed to {@link System#out}. + *

+ * Usage: + *

+   *   try (PrintStreamCapturer capture = captureOut()) {
+   *     ...
+   *     // Call capture.getOutput() to get the output string
+   *   }
+   * 
+ */ + public static class SystemOutCapturer extends PrintStreamCapturer { + public SystemOutCapturer() { + super(System.out, System::setOut); } + } - @Override - public void close() throws Exception { - IOUtils.closeQuietly(bytesPrintStream); - System.setOut(oldOut); - } + /** + * Replaces {@link System#in} with a stream that provides {@code lines} as input. + * @return an {@code AutoCloseable} to restore the original {@link System#in} stream + */ + public static AutoCloseable supplyOnSystemIn(String... lines) { + final InputStream original = System.in; + final InputStream in = CharSequenceInputStream.builder() + .setCharSequence(String.join("\n", lines)) + .get(); + System.setIn(in); + return () -> System.setIn(original); } /** diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java new file mode 100644 index 00000000000..912b7b051b2 --- /dev/null +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import java.util.function.IntSupplier; + +/** Test utilities for working with lambdas returning int value. */ +public interface IntLambda { + + static ToIntExecutable withTextFromSystemIn(String... lines) { + return runnable -> { + try (AutoCloseable ignored = GenericTestUtils.supplyOnSystemIn(lines)) { + return runnable.getAsInt(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + } + + /** Function that takes a block of code returning int, executes it, and returns the value. */ + @FunctionalInterface + interface ToIntExecutable { + int execute(IntSupplier code); + } + +} diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 5b77f394c96..6a7dd1e9706 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,14 +22,55 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-tools 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Tools - Apache Ozone HDDS Tools jar - - - + Apache Ozone HDDS Tools + Apache Ozone Distributed Data Store Tools + + ch.qos.reload4j + reload4j + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + com.google.guava + guava + + + commons-cli + commons-cli + + + commons-io + commons-io + + + info.picocli + picocli + + + org.apache.commons + commons-lang3 + + + org.apache.ozone + hdds-client + org.apache.ozone hdds-common @@ -59,20 +97,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-client - - - - org.apache.commons - commons-lang3 + hdds-server-scm org.apache.ratis ratis-common - ratis-tools org.apache.ratis + ratis-tools ${ratis.version} @@ -81,22 +114,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - commons-cli - commons-cli - - - ch.qos.reload4j - reload4j - org.kohsuke.metainf-services metainf-services - - org.xerial - sqlite-jdbc - org.slf4j slf4j-api @@ -107,39 +128,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${slf4j.version} - org.apache.ozone - hdds-server-scm - - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.google.guava - guava - - - - info.picocli - picocli + org.xerial + sqlite-jdbc - - commons-io - commons-io + commons-codec + commons-codec + test @@ -152,8 +147,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-container-service - test test-jar + test org.apache.ozone @@ -165,11 +160,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - commons-codec - commons-codec - test - @@ -201,7 +191,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java new file mode 100644 index 00000000000..1201f2058c6 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +import static picocli.CommandLine.Spec.Target.MIXEE; + +/** Base functionality for all Ozone CLI mixins. */ +@CommandLine.Command +public abstract class AbstractMixin { + + @CommandLine.Spec(MIXEE) + private CommandLine.Model.CommandSpec spec; + + protected CommandLine.Model.CommandSpec spec() { + return spec; + } + + protected GenericParentCommand rootCommand() { + return AbstractSubcommand.findRootCommand(spec); + } + + protected OzoneConfiguration getOzoneConf() { + return rootCommand().getOzoneConf(); + } + +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java new file mode 100644 index 00000000000..00d907c5ce5 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.ratis.util.MemoizedSupplier; +import picocli.CommandLine; + +import java.io.PrintWriter; +import java.util.function.Supplier; + +/** Base functionality for all Ozone subcommands. */ +@CommandLine.Command( + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class +) +public abstract class AbstractSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + private final Supplier rootSupplier = + MemoizedSupplier.valueOf(() -> findRootCommand(spec)); + + protected CommandLine.Model.CommandSpec spec() { + return spec; + } + + /** Get the Ozone object annotated with {@link CommandLine.Command}) that was used to run this command. + * Usually this is some subclass of {@link GenericCli}, but in unit tests it could be any subcommand. */ + protected GenericParentCommand rootCommand() { + return rootSupplier.get(); + } + + protected boolean isVerbose() { + return rootCommand().isVerbose(); + } + + /** @see GenericParentCommand#getOzoneConf() */ + protected OzoneConfiguration getOzoneConf() { + return rootCommand().getOzoneConf(); + } + + static GenericParentCommand findRootCommand(CommandLine.Model.CommandSpec spec) { + Object root = spec.root().userObject(); + return root instanceof GenericParentCommand + ? (GenericParentCommand) root + : new NoParentCommand(); + } + + /** No-op implementation for unit tests, which may bypass creation of GenericCli object. */ + private static class NoParentCommand implements GenericParentCommand { + + private final OzoneConfiguration conf = new OzoneConfiguration(); + + @Override + public boolean isVerbose() { + return false; + } + + @Override + public OzoneConfiguration getOzoneConf() { + return conf; + } + } + + protected PrintWriter out() { + return spec().commandLine().getOut(); + } + + protected PrintWriter err() { + return spec().commandLine().getErr(); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java index 2264f096a28..408f5a53d64 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.hdds.scm.cli; import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; - -import java.util.concurrent.Callable; /** * Subcommand to group container balancer related operations. @@ -90,14 +85,6 @@ ContainerBalancerStatusSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ContainerBalancerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class ContainerBalancerCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index a16e5227514..b5c962d0090 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group replication manager related operations. @@ -42,14 +37,6 @@ ReplicationManagerStatusSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ReplicationManagerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class ReplicationManagerCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 49f73e6faea..de0c1e64a70 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group safe mode related operations. @@ -42,14 +37,6 @@ SafeModeWaitSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class SafeModeCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class SafeModeCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java index dea8ac0ec87..faff193fa93 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java @@ -19,7 +19,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericParentCommand; +import org.apache.hadoop.hdds.cli.AbstractMixin; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -33,15 +33,11 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; -import static picocli.CommandLine.Spec.Target.MIXEE; /** * Defines command-line option for SCM address. */ -public class ScmOption { - - @CommandLine.Spec(MIXEE) - private CommandLine.Model.CommandSpec spec; +public class ScmOption extends AbstractMixin { @CommandLine.Option(names = {"--scm"}, description = "The destination scm (host:port)") @@ -53,9 +49,7 @@ public class ScmOption { private String scmServiceId; public ScmClient createScmClient() throws IOException { - GenericParentCommand parent = (GenericParentCommand) - spec.root().userObject(); - OzoneConfiguration conf = parent.createOzoneConfiguration(); + OzoneConfiguration conf = getOzoneConf(); checkAndSetSCMAddressArg(conf); return new ContainerOperationClient(conf); @@ -91,13 +85,10 @@ private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { public SCMSecurityProtocol createScmSecurityClient() { try { - GenericParentCommand parent = (GenericParentCommand) - spec.root().userObject(); - return getScmSecurityClient(parent.createOzoneConfiguration()); + return getScmSecurityClient(getOzoneConf()); } catch (IOException ex) { throw new IllegalArgumentException( "Can't create SCM Security client", ex); } } - } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java index 6dc09c2cbec..a0afddd9a40 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; @@ -26,7 +27,7 @@ /** * Base class for admin commands that connect via SCM client. */ -public abstract class ScmSubcommand implements Callable { +public abstract class ScmSubcommand extends AbstractSubcommand implements Callable { @CommandLine.Mixin private ScmOption scmOption; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java index 211e3bb0925..c78ec1ed020 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.cert; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Sub command for certificate related operations. @@ -43,14 +38,5 @@ }) @MetaInfServices(AdminSubcommand.class) -public class CertCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } +public class CertCommands implements AdminSubcommand { } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index a38b98c53a9..393d7e88f2d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -17,18 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group container related operations. @@ -47,21 +40,6 @@ UpgradeSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ContainerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; - - @ParentCommand - private OzoneAdmin parent; - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } +public class ContainerCommands implements AdminSubcommand { - public OzoneAdmin getParent() { - return parent; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 0e67661bba1..3665a7d3fa7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -27,7 +27,6 @@ import java.util.Scanner; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -47,9 +46,7 @@ import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Parameters; -import picocli.CommandLine.Spec; /** * This is the handler that process container info command. @@ -61,9 +58,6 @@ versionProvider = HddsVersionProvider.class) public class InfoSubcommand extends ScmSubcommand { - @Spec - private CommandSpec spec; - @CommandLine.Option(names = { "--json" }, defaultValue = "false", description = "Format output as JSON") @@ -181,10 +175,7 @@ private void printDetails(ScmClient scmClient, long containerID, } else { // Print container report info. System.out.printf("Container id: %s%n", containerID); - boolean verbose = spec != null - && spec.root().userObject() instanceof GenericParentCommand - && ((GenericParentCommand) spec.root().userObject()).isVerbose(); - if (verbose) { + if (isVerbose()) { System.out.printf("Pipeline Info: %s%n", container.getPipeline()); } else { System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId()); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 88ccef702b3..cf338c7d774 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -38,7 +38,6 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -82,9 +81,6 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; - @ParentCommand - private ContainerCommands parent; - static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -116,7 +112,7 @@ public void execute(ScmClient scmClient) throws IOException { replication, new OzoneConfiguration()); } - int maxCountAllowed = parent.getParent().getOzoneConf() + int maxCountAllowed = getOzoneConf() .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java index a94f631b5bc..3aeb7813a09 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -40,7 +41,6 @@ import java.io.File; import java.io.InputStreamReader; -import java.io.PrintWriter; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.List; @@ -56,14 +56,11 @@ "for this datanode.", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class UpgradeSubcommand implements Callable { +public class UpgradeSubcommand extends AbstractSubcommand implements Callable { private static final Logger LOG = LoggerFactory.getLogger(UpgradeSubcommand.class); - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - @CommandLine.Option(names = {"--volume"}, required = false, description = "volume path") @@ -194,12 +191,4 @@ private OzoneConfiguration getConfiguration() { } return ozoneConfiguration; } - - private static PrintWriter err() { - return spec.commandLine().getErr(); - } - - private static PrintWriter out() { - return spec.commandLine().getOut(); - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 6c020e46f37..b01a8996b28 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.hdds.scm.cli.datanode; import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; - -import java.util.concurrent.Callable; /** * Subcommand for datanode related operations. @@ -44,14 +39,6 @@ UsageInfoSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class DatanodeCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class DatanodeCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 31123ae81b5..d9474d7355a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,7 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; /** * Decommission one or more datanodes. @@ -39,14 +36,8 @@ versionProvider = HddsVersionProvider.class) public class DecommissionSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @CommandLine.Option(names = { "--force" }, defaultValue = "false", @@ -55,33 +46,22 @@ public class DecommissionSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = scmClient.decommissionNodes(hosts, force); - System.out.println("Started decommissioning datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could not enter the decommission workflow"); + List hosts = hostNameParams.getHostNames(); + List errors = scmClient.decommissionNodes(hosts, force); + System.out.println("Started decommissioning datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could not enter the decommission workflow"); + } + + static void showErrors(List errors, String message) throws IOException { + if (!errors.isEmpty()) { + for (DatanodeAdminError error : errors) { + System.err.println("Error: " + error.getHostname() + ": " + + error.getError()); } - } else { - GenericCli.missingSubcommand(spec); + // Throwing the exception will cause a non-zero exit status for the + // command. + throw new IOException(message); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java new file mode 100644 index 00000000000..4020d256bc6 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.datanode; + +import picocli.CommandLine; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Scanner; + +/** Parameter for specifying list of hostnames. */ +@CommandLine.Command +public class HostNameParameters { + + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + arity = "1..*", + paramLabel = "") + private List parameters = new ArrayList<>(); + + public List getHostNames() { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, StandardCharsets.UTF_8.name()); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } + return hosts; + } + +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index b07af660a8f..23b91323edb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,9 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; + +import static org.apache.hadoop.hdds.scm.cli.datanode.DecommissionSubCommand.showErrors; /** * Place one or more datanodes into Maintenance Mode. @@ -39,14 +38,8 @@ versionProvider = HddsVersionProvider.class) public class MaintenanceSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @CommandLine.Option(names = {"--end"}, description = "Automatically end maintenance after the given hours. " + @@ -61,34 +54,11 @@ public class MaintenanceSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = - scmClient.startMaintenanceNodes(hosts, endInHours, force); - System.out.println("Entering maintenance mode on datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could not start the maintenance workflow"); - } - } else { - GenericCli.missingSubcommand(spec); - } + List hosts = hostNameParams.getHostNames(); + List errors = + scmClient.startMaintenanceNodes(hosts, endInHours, force); + System.out.println("Entering maintenance mode on datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could not start the maintenance workflow"); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index e21d61ed3d7..37f902f6830 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,9 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; + +import static org.apache.hadoop.hdds.scm.cli.datanode.DecommissionSubCommand.showErrors; /** * Recommission one or more datanodes. @@ -40,44 +39,15 @@ versionProvider = HddsVersionProvider.class) public class RecommissionSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = scmClient.recommissionNodes(hosts); - System.out.println("Started recommissioning datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could be recommissioned"); - } - } else { - GenericCli.missingSubcommand(spec); - } + List hosts = hostNameParams.getHostNames(); + List errors = scmClient.recommissionNodes(hosts); + System.out.println("Started recommissioning datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could be recommissioned"); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index b33a5d1ea96..ab4b98d6818 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -17,11 +17,8 @@ * limitations under the License. */ -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import picocli.CommandLine; import picocli.CommandLine.Command; -import java.util.concurrent.Callable; /** * View status of one or more datanodes. @@ -35,14 +32,6 @@ DecommissionStatusSubCommand.class }) -public class StatusSubCommand implements Callable { +public class StatusSubCommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index 9c391035560..531c8fd9e65 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.pipeline; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group pipeline related operations. @@ -44,14 +39,6 @@ ClosePipelineSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class PipelineCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class PipelineCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 6b5a1ac0c8b..427237eeaed 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -28,6 +28,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Client jar + true diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 76baefd71dd..6181ac55fdc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -156,8 +156,6 @@ protected void setChecksumType(ContainerProtos.ChecksumType type) { protected abstract AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, long blockLength); - protected abstract String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException; - protected abstract List getChunkInfos( OmKeyLocationInfo keyLocationInfo) throws IOException; @@ -167,6 +165,39 @@ protected ByteBuffer getBlockChecksumFromChunkChecksums(AbstractBlockChecksumCom return blockChecksumComputer.getOutByteBuffer(); } + /** + * Parses out the raw blockChecksum bytes from {@code checksumData} byte + * buffer according to the blockChecksumType and populates the cumulative + * blockChecksumBuf with it. + * + * @return a debug-string representation of the parsed checksum if + * debug is enabled, otherwise null. + */ + + protected String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException { + String blockChecksumForDebug = null; + switch (getCombineMode()) { + case MD5MD5CRC: + final MD5Hash md5 = new MD5Hash(blockChecksumByteBuffer.array()); + md5.write(getBlockChecksumBuf()); + if (LOG.isDebugEnabled()) { + blockChecksumForDebug = md5.toString(); + } + break; + case COMPOSITE_CRC: + byte[] crcBytes = blockChecksumByteBuffer.array(); + if (LOG.isDebugEnabled()) { + blockChecksumForDebug = CrcUtil.toMultiCrcString(crcBytes); + } + getBlockChecksumBuf().write(crcBytes); + break; + default: + throw new IOException( + "Unknown combine mode: " + getCombineMode()); + } + return blockChecksumForDebug; + }; + /** * Compute block checksums block by block and append the raw bytes of the * block checksums into getBlockChecksumBuf(). diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java index db36b9837ad..8f9daed6c0e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -37,7 +36,6 @@ import org.apache.hadoop.security.token.Token; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -60,33 +58,6 @@ protected AbstractBlockChecksumComputer getBlockChecksumComputer(List getChunkInfos(OmKeyLocationInfo keyLocationInfo) throws IOException { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java index 9c2df0fdb47..27a08617618 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -36,7 +35,6 @@ import org.apache.hadoop.security.token.Token; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.List; /** @@ -107,48 +105,4 @@ protected List getChunkInfos( return chunks; } - - /** - * Parses out the raw blockChecksum bytes from {@code checksumData} byte - * buffer according to the blockChecksumType and populates the cumulative - * blockChecksumBuf with it. - * - * @return a debug-string representation of the parsed checksum if - * debug is enabled, otherwise null. - */ - @Override - protected String populateBlockChecksumBuf(ByteBuffer checksumData) - throws IOException { - String blockChecksumForDebug = null; - switch (getCombineMode()) { - case MD5MD5CRC: - //read md5 - final MD5Hash md5 = new MD5Hash(checksumData.array()); - md5.write(getBlockChecksumBuf()); - if (LOG.isDebugEnabled()) { - blockChecksumForDebug = md5.toString(); - } - break; - case COMPOSITE_CRC: - // TODO: abort if chunk checksum type is not CRC32/CRC32C - //BlockChecksumType returnedType = PBHelperClient.convert( - // checksumData.getBlockChecksumOptions().getBlockChecksumType()); - /*if (returnedType != BlockChecksumType.COMPOSITE_CRC) { - throw new IOException(String.format( - "Unexpected blockChecksumType '%s', expecting COMPOSITE_CRC", - returnedType)); - }*/ - byte[] crcBytes = checksumData.array(); - if (LOG.isDebugEnabled()) { - blockChecksumForDebug = CrcUtil.toSingleCrcString(crcBytes); - } - getBlockChecksumBuf().write(crcBytes); - break; - default: - throw new IOException( - "Unknown combine mode: " + getCombineMode()); - } - - return blockChecksumForDebug; - } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java index 4e5a35a539c..67fc205cbf7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java @@ -116,6 +116,18 @@ public void flush() throws IOException { } } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + if (this.byteBufferStreamOutput != null) { + this.byteBufferStreamOutput.hsync(); + } + } + @Override public void close() throws IOException { if (this.byteBufferStreamOutput != null) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java index 8e80b381041..153d514cfef 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -62,6 +63,7 @@ public class BlockDataStreamOutputEntryPool implements KeyMetadataAware { private final long openID; private final ExcludeList excludeList; private List bufferList; + private ContainerBlockID lastUpdatedBlockId = new ContainerBlockID(-1, -1); @SuppressWarnings({"parameternumber", "squid:S00107"}) public BlockDataStreamOutputEntryPool( @@ -152,6 +154,33 @@ public List getLocationInfoList() { return locationInfoList; } + void hsyncKey(long offset) throws IOException { + if (keyArgs != null) { + // in test, this could be null + keyArgs.setDataSize(offset); + keyArgs.setLocationInfoList(getLocationInfoList()); + // When the key is multipart upload part file upload, we should not + // commit the key, as this is not an actual key, this is a just a + // partial key of a large file. + if (keyArgs.getIsMultipartKey()) { + throw new IOException("Hsync is unsupported for multipart keys."); + } else { + if (keyArgs.getLocationInfoList().size() == 0) { + omClient.hsyncKey(keyArgs, openID); + } else { + ContainerBlockID lastBLockId = keyArgs.getLocationInfoList().get(keyArgs.getLocationInfoList().size() - 1) + .getBlockID().getContainerBlockID(); + if (!lastUpdatedBlockId.equals(lastBLockId)) { + omClient.hsyncKey(keyArgs, openID); + lastUpdatedBlockId = lastBLockId; + } + } + } + } else { + LOG.warn("Closing KeyOutputStream, but key args is null"); + } + } + /** * Discards the subsequent pre allocated blocks and removes the streamEntries * from the streamEntries list for the container which is closed. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java index e5a43819a3c..811435b8489 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java @@ -65,7 +65,7 @@ public class KeyDataStreamOutput extends AbstractDataStreamOutput * Defines stream action while calling handleFlushOrClose. */ enum StreamAction { - FLUSH, CLOSE, FULL + FLUSH, HSYNC, CLOSE, FULL } public static final Logger LOG = @@ -234,6 +234,21 @@ private int writeToDataStreamOutput(BlockDataStreamOutputEntry current, return writeLen; } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + checkNotClosed(); + final long hsyncPos = writeOffset; + handleFlushOrClose(KeyDataStreamOutput.StreamAction.HSYNC); + Preconditions.checkState(offset >= hsyncPos, + "offset = %s < hsyncPos = %s", offset, hsyncPos); + blockDataStreamOutputEntryPool.hsyncKey(hsyncPos); + } + /** * It performs following actions : * a. Updates the committed length at datanode for the current stream in @@ -394,6 +409,9 @@ private void handleStreamAction(BlockDataStreamOutputEntry entry, case FLUSH: entry.flush(); break; + case HSYNC: + entry.hsync(); + break; default: throw new IOException("Invalid Operation"); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java index c0af1c53010..da61b3e30ef 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client.io; import org.apache.hadoop.crypto.CryptoOutputStream; +import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -24,6 +25,8 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.Map; +import java.util.Objects; +import java.util.Optional; /** * OzoneDataStreamOutput is used to write data into Ozone. @@ -32,14 +35,52 @@ public class OzoneDataStreamOutput extends ByteBufferOutputStream implements KeyMetadataAware { private final ByteBufferStreamOutput byteBufferStreamOutput; + private boolean enableHsync; + private final Syncable syncable; /** - * Constructs OzoneDataStreamOutput with KeyDataStreamOutput. + * Constructs an instance with a {@link Syncable} {@link OutputStream}. * - * @param byteBufferStreamOutput the underlying ByteBufferStreamOutput + * @param outputStream an {@link OutputStream} which is {@link Syncable}. + * @param enableHsync if false, hsync() executes flush() instead. */ - public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput) { - this.byteBufferStreamOutput = byteBufferStreamOutput; + public OzoneDataStreamOutput(Syncable outputStream, boolean enableHsync) { + this(Optional.of(Objects.requireNonNull(outputStream, + "outputStream == null")) + .filter(s -> s instanceof OzoneDataStreamOutput) + .map(s -> (OzoneDataStreamOutput)s) + .orElseThrow(() -> new IllegalArgumentException( + "The parameter syncable is not an OutputStream")), + outputStream, enableHsync); + } + + /** + * Constructs an instance with a (non-{@link Syncable}) {@link ByteBufferStreamOutput} + * with an optional {@link Syncable} object. + * + * @param byteBufferStreamOutput for writing data. + * @param syncable an optional parameter + * for accessing the {@link Syncable} feature. + */ + public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput, Syncable syncable) { + this(byteBufferStreamOutput, syncable, false); + } + + /** + * Constructs an instance with a (non-{@link Syncable}) {@link ByteBufferStreamOutput} + * with an optional {@link Syncable} object. + * + * @param byteBufferStreamOutput for writing data. + * @param syncable an optional parameter + * for accessing the {@link Syncable} feature. + * @param enableHsync if false, hsync() executes flush() instead. + */ + public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput, Syncable syncable, + boolean enableHsync) { + this.byteBufferStreamOutput = Objects.requireNonNull(byteBufferStreamOutput, + "byteBufferStreamOutput == null"); + this.syncable = syncable != null ? syncable : byteBufferStreamOutput; + this.enableHsync = enableHsync; } @Override @@ -93,6 +134,27 @@ public KeyDataStreamOutput getKeyDataStreamOutput() { return null; } + public void hflush() throws IOException { + hsync(); + } + + public void hsync() throws IOException { + // Disable the feature flag restores the prior behavior. + if (!enableHsync) { + byteBufferStreamOutput.flush(); + return; + } + if (syncable != null) { + if (byteBufferStreamOutput != syncable) { + byteBufferStreamOutput.flush(); + } + syncable.hsync(); + } else { + throw new UnsupportedOperationException(byteBufferStreamOutput.getClass() + + " is not " + Syncable.class.getSimpleName()); + } + } + public ByteBufferStreamOutput getByteBufStreamOutput() { return byteBufferStreamOutput; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java index bd056185e75..f161d80c834 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java @@ -105,6 +105,12 @@ public synchronized void close() throws IOException { outputStream.close(); } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override public void hsync() throws IOException { // Disable the feature flag restores the prior behavior. if (!enableHsync) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 93c675d9b90..3d52e3f6972 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1765,16 +1765,21 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName) @Override public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, int partNumber) throws IOException { - OmKeyInfo keyInfo = getS3KeyInfo(bucketName, keyName, false); - List filteredKeyLocationInfo = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() - .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == - partNumber) - .collect(Collectors.toList()); - keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false); - keyInfo.setDataSize(filteredKeyLocationInfo.stream() - .mapToLong(OmKeyLocationInfo::getLength) - .sum()); + OmKeyInfo keyInfo; + if (omVersion.compareTo(OzoneManagerVersion.S3_PART_AWARE_GET) >= 0) { + keyInfo = getS3PartKeyInfo(bucketName, keyName, partNumber); + } else { + keyInfo = getS3KeyInfo(bucketName, keyName, false); + List filteredKeyLocationInfo = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() + .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == + partNumber) + .collect(Collectors.toList()); + keyInfo.updateLocationInfoList(filteredKeyLocationInfo, true, true); + keyInfo.setDataSize(filteredKeyLocationInfo.stream() + .mapToLong(OmKeyLocationInfo::getLength) + .sum()); + } return getOzoneKeyDetails(keyInfo); } @@ -1801,6 +1806,29 @@ private OmKeyInfo getS3KeyInfo( return keyInfoWithS3Context.getKeyInfo(); } + @Nonnull + private OmKeyInfo getS3PartKeyInfo( + String bucketName, String keyName, int partNumber) throws IOException { + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + // Volume name is not important, as we call GetKeyInfo with + // assumeS3Context = true, OM will infer the correct s3 volume. + .setVolumeName(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT) + .setBucketName(bucketName) + .setKeyName(keyName) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) + .setLatestVersionLocation(getLatestVersionLocation) + .setForceUpdateContainerCacheFromSCM(false) + .setMultipartUploadPartNumber(partNumber) + .build(); + KeyInfoWithVolumeContext keyInfoWithS3Context = + ozoneManagerClient.getKeyInfo(keyArgs, true); + keyInfoWithS3Context.getUserPrincipal().ifPresent(this::updateS3Principal); + return keyInfoWithS3Context.getKeyInfo(); + } + private OmKeyInfo getKeyInfo( String volumeName, String bucketName, String keyName, boolean forceUpdateContainerCache) throws IOException { @@ -1996,7 +2024,7 @@ public OzoneDataStreamOutput createMultipartStreamKey( } else { out = createMultipartOutputStream(openKey, uploadID, partNumber); } - return new OzoneDataStreamOutput(out); + return new OzoneDataStreamOutput(out, out); } @Override @@ -2417,7 +2445,7 @@ private OzoneDataStreamOutput createDataStreamOutput(OpenKeySession openKey) } else { out = createOutputStream(openKey); } - return new OzoneDataStreamOutput(out); + return new OzoneDataStreamOutput(out, out); } private KeyDataStreamOutput.Builder newKeyOutputStreamBuilder() { diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index f7f60dcd1d1..1084e418069 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -29,6 +29,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 8d24f2de155..cd7c6ff6186 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -923,7 +923,7 @@ public static boolean isBucketSnapshotIndicator(String key) { } public static List> format( - List nodes, int port, String leaderId) { + List nodes, int port, String leaderId, String leaderReadiness) { List> omInfoList = new ArrayList<>(); // Ensuring OM's are printed in correct order List omNodes = nodes.stream() @@ -940,6 +940,7 @@ public static List> format( omInfo.add(info.getOmRoleInfo().getNodeId()); omInfo.add(String.valueOf(port)); omInfo.add(role); + omInfo.add(leaderReadiness); omInfoList.add(omInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index c7e20fb7e8b..540efe1f88c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.client.io; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; import org.apache.ratis.util.function.CheckedFunction; @@ -37,7 +38,7 @@ * @param The underlying {@link OutputStream} type. */ public class SelectorOutputStream - extends OutputStream implements Syncable { + extends OutputStream implements Syncable, StreamCapabilities { /** A buffer backed by a byte[]. */ static final class ByteArrayBuffer { private byte[] array; @@ -182,6 +183,20 @@ public void hsync() throws IOException { } } + @Override + public boolean hasCapability(String capability) { + try { + final OUT out = select(); + if (out instanceof StreamCapabilities) { + return ((StreamCapabilities) out).hasCapability(capability); + } else { + return false; + } + } catch (Exception e) { + return false; + } + } + @Override public void close() throws IOException { select().close(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 880fe8614b2..e274d822b63 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -177,10 +177,6 @@ private OMConfigKeys() { /** * OM Ratis related configurations. */ - public static final String OZONE_OM_RATIS_ENABLE_KEY - = "ozone.om.ratis.enable"; - public static final boolean OZONE_OM_RATIS_ENABLE_DEFAULT - = true; public static final String OZONE_OM_RATIS_PORT_KEY = "ozone.om.ratis.port"; public static final int OZONE_OM_RATIS_PORT_DEFAULT diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index ba28b45a0e5..106ef6a06ab 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -213,6 +213,7 @@ public OmKeyArgs.Builder toBuilder() { if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } + return builder; } @@ -227,7 +228,11 @@ public KeyArgs toProtobuf() { .setLatestVersionLocation(getLatestVersionLocation()) .setHeadOp(isHeadOp()) .setForceUpdateContainerCacheFromSCM( - isForceUpdateContainerCacheFromSCM()); + isForceUpdateContainerCacheFromSCM() + ); + if (multipartUploadPartNumber != 0) { + builder.setMultipartNumber(multipartUploadPartNumber); + } if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } @@ -308,8 +313,8 @@ public Builder setMultipartUploadID(String uploadID) { return this; } - public Builder setMultipartUploadPartNumber(int partNumber) { - this.multipartUploadPartNumber = partNumber; + public Builder setMultipartUploadPartNumber(int multipartUploadPartNumber) { + this.multipartUploadPartNumber = multipartUploadPartNumber; return this; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index bf4ffa9d8de..5c258ab670d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -318,7 +318,7 @@ public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) return confHsyncEnabled; } else { if (confHsyncEnabled) { - LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", + LOG.debug("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true, confKey); } diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index ba66c5d5272..2c5bb5d7f96 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -31,6 +31,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> false true + true diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java index dbafccf4fd2..13fedd061c6 100644 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java @@ -54,7 +54,7 @@ public class CsiServer extends GenericCli implements Callable { public Void call() throws Exception { String[] originalArgs = getCmd().getParseResult().originalArgs() .toArray(new String[0]); - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); + OzoneConfiguration ozoneConfiguration = getOzoneConf(); HddsServerUtil.startupShutdownMessage(OzoneVersionInfo.OZONE_VERSION_INFO, CsiServer.class, originalArgs, LOG, ozoneConfiguration); CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class); diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 733f0837fda..2c98b3b8500 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -30,6 +30,7 @@ false true true + true diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 0249c7a498d..8b7ed939b27 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -63,6 +63,7 @@ grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ cat "${crashes}" >> "${tempfile}" # Check for tests that started but were not finished +timeouts=${REPORT_DIR}/timeouts.txt if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then diff -uw \ <(grep -e 'Running org' "${REPORT_DIR}/output.log" \ @@ -75,7 +76,8 @@ if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then | sort -u -k2) \ | grep '^- ' \ | awk '{ print $3 }' \ - >> "${tempfile}" + > "${timeouts}" + cat "${timeouts}" >> "${tempfile}" fi sort -u "${tempfile}" | tee "${REPORT_DIR}/summary.txt" @@ -118,5 +120,11 @@ if [[ -s "${crashes}" ]]; then fi rm -f "${crashes}" +if [[ -s "${timeouts}" ]]; then + printf "# Fork Timeout\n\n" >> "$SUMMARY_FILE" + cat "${timeouts}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${timeouts}" + ## generate counter wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml index ea4a4da516e..f6d6278ac67 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml @@ -16,7 +16,7 @@ --> -