, GenericParentCommand {
+@CommandLine.Command
+public abstract class GenericCli implements GenericParentCommand {
public static final int EXECUTION_ERROR_EXIT_CODE = -1;
@@ -71,15 +70,6 @@ public GenericCli(CommandLine.IFactory factory) {
ExtensibleParentCommand.addSubcommands(cmd);
}
- /**
- * Handle the error when subcommand is required but not set.
- */
- public static void missingSubcommand(CommandSpec spec) {
- System.err.println("Incomplete command");
- spec.commandLine().usage(System.err);
- System.exit(EXECUTION_ERROR_EXIT_CODE);
- }
-
public void run(String[] argv) {
int exitCode = execute(argv);
@@ -103,11 +93,6 @@ protected void printError(Throwable error) {
}
}
- @Override
- public Void call() throws Exception {
- throw new MissingSubcommandException(cmd);
- }
-
@Override
public OzoneConfiguration getOzoneConf() {
return config;
@@ -121,7 +106,7 @@ public UserGroupInformation getUser() throws IOException {
}
@VisibleForTesting
- public picocli.CommandLine getCmd() {
+ public CommandLine getCmd() {
return cmd;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
deleted file mode 100644
index 759476579e9..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import picocli.CommandLine;
-
-/**
- * Exception to throw if subcommand is not selected but required.
- */
-public class MissingSubcommandException extends CommandLine.ParameterException {
-
- public MissingSubcommandException(CommandLine cmd) {
- super(cmd, "Incomplete command");
- }
-
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index bf33b9780d2..a6980e232b1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -26,6 +26,7 @@
import java.util.List;
import java.util.Map;
import java.util.UUID;
+import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -97,7 +98,7 @@
hidden = true, description = "Start the datanode for ozone",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
-public class HddsDatanodeService extends GenericCli implements ServicePlugin {
+public class HddsDatanodeService extends GenericCli implements Callable, ServicePlugin {
private static final Logger LOG = LoggerFactory.getLogger(
HddsDatanodeService.class);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
index 8c0044f66a9..1eef7bce14c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
@@ -37,6 +37,7 @@
import picocli.CommandLine.Command;
import java.io.IOException;
+import java.util.concurrent.Callable;
import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
@@ -49,7 +50,7 @@
hidden = true, description = "Start or initialize the scm server.",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
-public class StorageContainerManagerStarter extends GenericCli {
+public class StorageContainerManagerStarter extends GenericCli implements Callable {
private OzoneConfiguration conf;
private SCMStarterInterface receiver;
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot
index a28888b23f4..2f1d0825b39 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot
@@ -22,7 +22,7 @@ Test Timeout 5 minutes
*** Test Cases ***
Incomplete command
${output} = Execute And Ignore Error ozone admin
- Should contain ${output} Incomplete command
+ Should contain ${output} Missing required subcommand
Should contain ${output} container
Should contain ${output} datanode
Should contain ${output} om
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 27cb8d8aa3c..a587a628533 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -33,6 +33,7 @@
import picocli.CommandLine.Command;
import java.io.IOException;
+import java.util.concurrent.Callable;
import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
@@ -44,7 +45,7 @@
hidden = true, description = "Start or initialize the Ozone Manager.",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
-public class OzoneManagerStarter extends GenericCli {
+public class OzoneManagerStarter extends GenericCli implements Callable {
private OzoneConfiguration conf;
private OMStarterInterface receiver;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index 0970c2da687..fc0dc18cce9 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -56,6 +56,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor;
@@ -70,7 +71,7 @@
/**
* Recon server main class that stops and starts recon services.
*/
-public class ReconServer extends GenericCli {
+public class ReconServer extends GenericCli implements Callable {
private static final Logger LOG = LoggerFactory.getLogger(ReconServer.class);
private Injector injector;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java
index c20c9b496f0..511592d3a04 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.concurrent.Callable;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.cli.GenericCli;
@@ -53,7 +54,7 @@
hidden = true, description = "S3 compatible rest server.",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
-public class Gateway extends GenericCli {
+public class Gateway extends GenericCli implements Callable {
private static final Logger LOG = LoggerFactory.getLogger(Gateway.class);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
index 927e9186ff5..c88b6b2d698 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
@@ -41,6 +41,8 @@
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.Callable;
+
/**
* GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml
* This tool generates an ozone-site.xml with minimally required configs.
@@ -56,7 +58,7 @@
description = "Tool to generate template ozone-site.xml",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
-public final class GenerateOzoneRequiredConfigurations extends GenericCli {
+public final class GenerateOzoneRequiredConfigurations extends GenericCli implements Callable {
@Parameters(arity = "1..1",
description = "Directory path where ozone-site file should be generated.")
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java
index f19548a1fa7..b6b5cc989b9 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java
@@ -25,6 +25,7 @@
import picocli.CommandLine;
import java.util.Collections;
+import java.util.concurrent.Callable;
import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
@@ -33,7 +34,7 @@
*/
@CommandLine.Command(name = "ozone checknative",
description = "Checks if native libraries are loaded")
-public class CheckNative extends GenericCli {
+public class CheckNative extends GenericCli implements Callable {
public static void main(String[] argv) {
new CheckNative().run(argv);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
index be8b4ceed17..d1a3518769f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
@@ -19,13 +19,11 @@
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExtendedUserAccessIdInfo;
import org.apache.hadoop.ozone.shell.OzoneAddress;
-import org.jooq.tools.StringUtils;
import picocli.CommandLine;
import java.io.IOException;
@@ -38,9 +36,6 @@
description = "Get tenant related information of a user")
public class GetUserInfoHandler extends TenantHandler {
- @CommandLine.Spec
- private CommandLine.Model.CommandSpec spec;
-
@CommandLine.Parameters(description = "User name (principal)", arity = "1..1")
private String userPrincipal;
@@ -52,11 +47,6 @@ public class GetUserInfoHandler extends TenantHandler {
protected void execute(OzoneClient client, OzoneAddress address)
throws IOException {
- if (StringUtils.isEmpty(userPrincipal)) {
- GenericCli.missingSubcommand(spec);
- return;
- }
-
final TenantUserInfoValue tenantUserInfo =
client.getObjectStore().tenantGetUserInfo(userPrincipal);
final List accessIdInfoList =
From 6b8b844dfa7eb30df29ab849e834e0a00d3bc97e Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 7 Jan 2025 07:42:06 +0100
Subject: [PATCH 07/15] HDDS-12009. Merge FSORepairTool and FSORepairCLI
(#7639)
---
.../ozone/repair/om/TestFSORepairTool.java | 25 +-
.../hadoop/ozone/repair/om/FSORepairCLI.java | 78 --
.../hadoop/ozone/repair/om/FSORepairTool.java | 752 +++++++++---------
.../hadoop/ozone/repair/om/OMRepair.java | 2 +-
4 files changed, 404 insertions(+), 453 deletions(-)
delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java
index d37f8ce57fb..fb6472d7bc7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.repair.OzoneRepair;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
@@ -89,8 +88,8 @@ public class TestFSORepairTool {
private static FSORepairTool.Report fullReport;
private static FSORepairTool.Report emptyReport;
- private GenericTestUtils.PrintStreamCapturer out;
- private GenericTestUtils.PrintStreamCapturer err;
+ private static GenericTestUtils.PrintStreamCapturer out;
+ private static GenericTestUtils.PrintStreamCapturer err;
@BeforeAll
public static void setup() throws Exception {
@@ -103,6 +102,8 @@ public static void setup() throws Exception {
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
fs = FileSystem.get(conf);
+ out = GenericTestUtils.captureOut();
+ err = GenericTestUtils.captureErr();
cmd = new OzoneRepair().getCmd();
dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath();
@@ -147,19 +148,13 @@ public static void setup() throws Exception {
@BeforeEach
public void init() throws Exception {
- out = GenericTestUtils.captureOut();
- err = GenericTestUtils.captureErr();
- }
-
- @AfterEach
- public void clean() throws Exception {
- // reset stream after each unit test
- IOUtils.closeQuietly(out, err);
+ out.reset();
+ err.reset();
}
@AfterAll
public static void reset() throws IOException {
- IOUtils.closeQuietly(fs, client, cluster);
+ IOUtils.closeQuietly(fs, client, cluster, out, err);
}
/**
@@ -239,7 +234,7 @@ public void testNonExistentBucket() {
// When a non-existent bucket filter is passed
int exitCode = dryRun("--volume", "/vol1", "--bucket", "bucket3");
assertEquals(0, exitCode);
- String cliOutput = out.getOutput();
+ String cliOutput = err.getOutput();
assertThat(cliOutput).contains("Bucket 'bucket3' does not exist in volume '/vol1'.");
}
@@ -249,7 +244,7 @@ public void testNonExistentVolume() {
// When a non-existent volume filter is passed
int exitCode = dryRun("--volume", "/vol5");
assertEquals(0, exitCode);
- String cliOutput = out.getOutput();
+ String cliOutput = err.getOutput();
assertThat(cliOutput).contains("Volume '/vol5' does not exist.");
}
@@ -259,7 +254,7 @@ public void testBucketFilterWithoutVolume() {
// When bucket filter is passed without the volume filter.
int exitCode = dryRun("--bucket", "bucket1");
assertEquals(0, exitCode);
- String cliOutput = out.getOutput();
+ String cliOutput = err.getOutput();
assertThat(cliOutput).contains("--bucket flag cannot be used without specifying --volume.");
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java
deleted file mode 100644
index fd6d75c7136..00000000000
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.repair.om;
-
-import org.apache.hadoop.ozone.repair.RepairTool;
-import picocli.CommandLine;
-
-/**
- * Parser for scm.db file.
- */
-@CommandLine.Command(
- name = "fso-tree",
- description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " +
- "OM should be stopped while this tool is run."
-)
-public class FSORepairCLI extends RepairTool {
-
- @CommandLine.Option(names = {"--db"},
- required = true,
- description = "Path to OM RocksDB")
- private String dbPath;
-
- @CommandLine.Option(names = {"-r", "--repair"},
- defaultValue = "false",
- description = "Run in repair mode to move unreferenced files and directories to deleted tables.")
- private boolean repair;
-
- @CommandLine.Option(names = {"-v", "--volume"},
- description = "Filter by volume name. Add '/' before the volume name.")
- private String volume;
-
- @CommandLine.Option(names = {"-b", "--bucket"},
- description = "Filter by bucket name")
- private String bucket;
-
- @CommandLine.Option(names = {"--verbose"},
- description = "Verbose output. Show all intermediate steps and deleted keys info.")
- private boolean verbose;
-
- @Override
- public void execute() throws Exception {
- if (checkIfServiceIsRunning("OM")) {
- return;
- }
- if (repair) {
- info("FSO Repair Tool is running in repair mode");
- } else {
- info("FSO Repair Tool is running in debug mode");
- }
- try {
- FSORepairTool
- repairTool = new FSORepairTool(dbPath, repair, volume, bucket, verbose);
- repairTool.run();
- } catch (Exception ex) {
- throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage());
- }
-
- if (verbose) {
- info("FSO repair finished.");
- }
- }
-}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
index 7e0fb23f5aa..a4068415db6 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
@@ -36,9 +36,11 @@
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.helpers.WithObjectID;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.repair.RepairTool;
import org.apache.ratis.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import picocli.CommandLine;
import java.io.File;
import java.io.IOException;
@@ -69,402 +71,471 @@
* The tool is idempotent. reachable.db will not be deleted automatically when the tool finishes,
* in case users want to manually inspect it. It can be safely deleted once the tool finishes.
*/
-public class FSORepairTool {
+@CommandLine.Command(
+ name = "fso-tree",
+ description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " +
+ "OM should be stopped while this tool is run."
+)
+public class FSORepairTool extends RepairTool {
public static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class);
-
- private final String omDBPath;
- private final DBStore store;
- private final Table volumeTable;
- private final Table bucketTable;
- private final Table directoryTable;
- private final Table fileTable;
- private final Table deletedDirectoryTable;
- private final Table deletedTable;
- private final Table snapshotInfoTable;
- private final String volumeFilter;
- private final String bucketFilter;
private static final String REACHABLE_TABLE = "reachable";
- private DBStore reachableDB;
- private final ReportStatistics reachableStats;
- private final ReportStatistics unreachableStats;
- private final ReportStatistics unreferencedStats;
- private final boolean repair;
- private final boolean verbose;
-
- public FSORepairTool(String dbPath, boolean repair, String volume, String bucket, boolean verbose)
- throws IOException {
- this(getStoreFromPath(dbPath), dbPath, repair, volume, bucket, verbose);
- }
- /**
- * Allows passing RocksDB instance from a MiniOzoneCluster directly to this class for testing.
- */
- public FSORepairTool(DBStore dbStore, String dbPath, boolean repair, String volume, String bucket, boolean verbose)
- throws IOException {
- this.reachableStats = new ReportStatistics(0, 0, 0);
- this.unreachableStats = new ReportStatistics(0, 0, 0);
- this.unreferencedStats = new ReportStatistics(0, 0, 0);
-
- this.store = dbStore;
- this.omDBPath = dbPath;
- this.repair = repair;
- this.volumeFilter = volume;
- this.bucketFilter = bucket;
- this.verbose = verbose;
- volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE,
- String.class,
- OmVolumeArgs.class);
- bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE,
- String.class,
- OmBucketInfo.class);
- directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE,
- String.class,
- OmDirectoryInfo.class);
- fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE,
- String.class,
- OmKeyInfo.class);
- deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE,
- String.class,
- OmKeyInfo.class);
- deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE,
- String.class,
- RepeatedOmKeyInfo.class);
- snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE,
- String.class,
- SnapshotInfo.class);
- }
+ @CommandLine.Option(names = {"--db"},
+ required = true,
+ description = "Path to OM RocksDB")
+ private String omDBPath;
- protected static DBStore getStoreFromPath(String dbPath) throws IOException {
- File omDBFile = new File(dbPath);
- if (!omDBFile.exists() || !omDBFile.isDirectory()) {
- throw new IOException(String.format("Specified OM DB instance %s does " +
- "not exist or is not a RocksDB directory.", dbPath));
- }
- // Load RocksDB and tables needed.
- return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1);
- }
+ @CommandLine.Option(names = {"-r", "--repair"},
+ defaultValue = "false",
+ description = "Run in repair mode to move unreferenced files and directories to deleted tables.")
+ private boolean repair;
+
+ @CommandLine.Option(names = {"-v", "--volume"},
+ description = "Filter by volume name. Add '/' before the volume name.")
+ private String volumeFilter;
- public FSORepairTool.Report run() throws Exception {
+ @CommandLine.Option(names = {"-b", "--bucket"},
+ description = "Filter by bucket name")
+ private String bucketFilter;
+
+ @CommandLine.Option(names = {"--verbose"},
+ description = "Verbose output. Show all intermediate steps and deleted keys info.")
+ private boolean verbose;
+
+ @Override
+ public void execute() throws Exception {
+ if (checkIfServiceIsRunning("OM")) {
+ return;
+ }
+ if (repair) {
+ info("FSO Repair Tool is running in repair mode");
+ } else {
+ info("FSO Repair Tool is running in debug mode");
+ }
try {
- if (bucketFilter != null && volumeFilter == null) {
- System.out.println("--bucket flag cannot be used without specifying --volume.");
- return null;
- }
+ Impl repairTool = new Impl();
+ repairTool.run();
+ } catch (Exception ex) {
+ throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage());
+ }
- if (volumeFilter != null) {
- OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter);
- if (volumeArgs == null) {
- System.out.println("Volume '" + volumeFilter + "' does not exist.");
+ if (verbose) {
+ info("FSO repair finished.");
+ }
+ }
+
+ private class Impl {
+
+ private final DBStore store;
+ private final Table volumeTable;
+ private final Table bucketTable;
+ private final Table directoryTable;
+ private final Table fileTable;
+ private final Table deletedDirectoryTable;
+ private final Table deletedTable;
+ private final Table snapshotInfoTable;
+ private DBStore reachableDB;
+ private final ReportStatistics reachableStats;
+ private final ReportStatistics unreachableStats;
+ private final ReportStatistics unreferencedStats;
+
+ Impl() throws IOException {
+ this.reachableStats = new ReportStatistics(0, 0, 0);
+ this.unreachableStats = new ReportStatistics(0, 0, 0);
+ this.unreferencedStats = new ReportStatistics(0, 0, 0);
+
+ this.store = getStoreFromPath(omDBPath);
+ volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE,
+ String.class,
+ OmVolumeArgs.class);
+ bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE,
+ String.class,
+ OmBucketInfo.class);
+ directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE,
+ String.class,
+ OmDirectoryInfo.class);
+ fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE,
+ String.class,
+ OmKeyInfo.class);
+ deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE,
+ String.class,
+ OmKeyInfo.class);
+ deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE,
+ String.class,
+ RepeatedOmKeyInfo.class);
+ snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE,
+ String.class,
+ SnapshotInfo.class);
+ }
+
+ public Report run() throws Exception {
+ try {
+ if (bucketFilter != null && volumeFilter == null) {
+ error("--bucket flag cannot be used without specifying --volume.");
return null;
}
- }
- // Iterate all volumes or a specific volume if specified
- try (TableIterator>
- volumeIterator = volumeTable.iterator()) {
- try {
- openReachableDB();
- } catch (IOException e) {
- System.out.println("Failed to open reachable database: " + e.getMessage());
- throw e;
+ if (volumeFilter != null) {
+ OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter);
+ if (volumeArgs == null) {
+ error("Volume '" + volumeFilter + "' does not exist.");
+ return null;
+ }
}
- while (volumeIterator.hasNext()) {
- Table.KeyValue volumeEntry = volumeIterator.next();
- String volumeKey = volumeEntry.getKey();
- if (volumeFilter != null && !volumeFilter.equals(volumeKey)) {
- continue;
+ // Iterate all volumes or a specific volume if specified
+ try (TableIterator>
+ volumeIterator = volumeTable.iterator()) {
+ try {
+ openReachableDB();
+ } catch (IOException e) {
+ error("Failed to open reachable database: " + e.getMessage());
+ throw e;
}
+ while (volumeIterator.hasNext()) {
+ Table.KeyValue volumeEntry = volumeIterator.next();
+ String volumeKey = volumeEntry.getKey();
- System.out.println("Processing volume: " + volumeKey);
-
- if (bucketFilter != null) {
- OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter);
- if (bucketInfo == null) {
- //Bucket does not exist in the volume
- System.out.println("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'.");
- return null;
- }
-
- if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) {
- System.out.println("Skipping non-FSO bucket " + bucketFilter);
+ if (volumeFilter != null && !volumeFilter.equals(volumeKey)) {
continue;
}
- processBucket(volumeEntry.getValue(), bucketInfo);
- } else {
+ info("Processing volume: " + volumeKey);
- // Iterate all buckets in the volume.
- try (TableIterator>
- bucketIterator = bucketTable.iterator()) {
- bucketIterator.seek(volumeKey);
- while (bucketIterator.hasNext()) {
- Table.KeyValue bucketEntry = bucketIterator.next();
- String bucketKey = bucketEntry.getKey();
- OmBucketInfo bucketInfo = bucketEntry.getValue();
-
- if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) {
- System.out.println("Skipping non-FSO bucket " + bucketKey);
- continue;
- }
+ if (bucketFilter != null) {
+ OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter);
+ if (bucketInfo == null) {
+ //Bucket does not exist in the volume
+ error("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'.");
+ return null;
+ }
- // Stop this loop once we have seen all buckets in the current
- // volume.
- if (!bucketKey.startsWith(volumeKey)) {
- break;
- }
+ if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) {
+ info("Skipping non-FSO bucket " + bucketFilter);
+ continue;
+ }
- processBucket(volumeEntry.getValue(), bucketInfo);
+ processBucket(volumeEntry.getValue(), bucketInfo);
+ } else {
+
+ // Iterate all buckets in the volume.
+ try (TableIterator>
+ bucketIterator = bucketTable.iterator()) {
+ bucketIterator.seek(volumeKey);
+ while (bucketIterator.hasNext()) {
+ Table.KeyValue bucketEntry = bucketIterator.next();
+ String bucketKey = bucketEntry.getKey();
+ OmBucketInfo bucketInfo = bucketEntry.getValue();
+
+ if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) {
+ info("Skipping non-FSO bucket " + bucketKey);
+ continue;
+ }
+
+ // Stop this loop once we have seen all buckets in the current
+ // volume.
+ if (!bucketKey.startsWith(volumeKey)) {
+ break;
+ }
+
+ processBucket(volumeEntry.getValue(), bucketInfo);
+ }
}
}
}
}
+ } catch (IOException e) {
+ error("An error occurred while processing" + e.getMessage());
+ throw e;
+ } finally {
+ closeReachableDB();
+ store.close();
}
- } catch (IOException e) {
- System.out.println("An error occurred while processing" + e.getMessage());
- throw e;
- } finally {
- closeReachableDB();
- store.close();
+
+ return buildReportAndLog();
}
- return buildReportAndLog();
- }
+ private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException {
+ if (snapshotInfoTable == null) {
+ return false;
+ }
- private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException {
- if (snapshotInfoTable == null) {
+ try (TableIterator> iterator =
+ snapshotInfoTable.iterator()) {
+ while (iterator.hasNext()) {
+ SnapshotInfo snapshotInfo = iterator.next().getValue();
+ String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", "");
+ if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) {
+ return true;
+ }
+ }
+ }
return false;
}
- try (TableIterator> iterator =
- snapshotInfoTable.iterator()) {
- while (iterator.hasNext()) {
- SnapshotInfo snapshotInfo = iterator.next().getValue();
- String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", "");
- if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) {
- return true;
+ private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException {
+ info("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName());
+ if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) {
+ if (!repair) {
+ info(
+ "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. ");
+ } else {
+ info(
+ "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " +
+ "due to snapshot presence.");
+ return;
}
}
+ markReachableObjectsInBucket(volume, bucketInfo);
+ handleUnreachableAndUnreferencedObjects(volume, bucketInfo);
}
- return false;
- }
- private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException {
- System.out.println("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName());
- if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) {
- if (!repair) {
- System.out.println(
- "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. ");
- } else {
- System.out.println(
- "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " +
- "due to snapshot presence.");
- return;
- }
+ private Report buildReportAndLog() {
+ Report report = new Report.Builder()
+ .setReachable(reachableStats)
+ .setUnreachable(unreachableStats)
+ .setUnreferenced(unreferencedStats)
+ .build();
+
+ info("\n" + report);
+ return report;
}
- markReachableObjectsInBucket(volume, bucketInfo);
- handleUnreachableAndUnreferencedObjects(volume, bucketInfo);
- }
- private Report buildReportAndLog() {
- Report report = new Report.Builder()
- .setReachable(reachableStats)
- .setUnreachable(unreachableStats)
- .setUnreferenced(unreferencedStats)
- .build();
+ private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException {
+ // Only put directories in the stack.
+ // Directory keys should have the form /volumeID/bucketID/parentID/name.
+ Stack dirKeyStack = new Stack<>();
- System.out.println("\n" + report);
- return report;
- }
+ // Since the tool uses parent directories to check for reachability, add
+ // a reachable entry for the bucket as well.
+ addReachableEntry(volume, bucket, bucket);
+ // Initialize the stack with all immediate child directories of the
+ // bucket, and mark them all as reachable.
+ Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket);
+ dirKeyStack.addAll(childDirs);
+
+ while (!dirKeyStack.isEmpty()) {
+ // Get one directory and process its immediate children.
+ String currentDirKey = dirKeyStack.pop();
+ OmDirectoryInfo currentDir = directoryTable.get(currentDirKey);
+ if (currentDir == null) {
+ info("Directory key" + currentDirKey + "to be processed was not found in the directory table.");
+ continue;
+ }
- private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException {
- // Only put directories in the stack.
- // Directory keys should have the form /volumeID/bucketID/parentID/name.
- Stack dirKeyStack = new Stack<>();
-
- // Since the tool uses parent directories to check for reachability, add
- // a reachable entry for the bucket as well.
- addReachableEntry(volume, bucket, bucket);
- // Initialize the stack with all immediate child directories of the
- // bucket, and mark them all as reachable.
- Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket);
- dirKeyStack.addAll(childDirs);
-
- while (!dirKeyStack.isEmpty()) {
- // Get one directory and process its immediate children.
- String currentDirKey = dirKeyStack.pop();
- OmDirectoryInfo currentDir = directoryTable.get(currentDirKey);
- if (currentDir == null) {
- System.out.println("Directory key" + currentDirKey + "to be processed was not found in the directory table.");
- continue;
+ // TODO revisit this for a more memory efficient implementation,
+ // possibly making better use of RocksDB iterators.
+ childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir);
+ dirKeyStack.addAll(childDirs);
}
+ }
- // TODO revisit this for a more memory efficient implementation,
- // possibly making better use of RocksDB iterators.
- childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir);
- dirKeyStack.addAll(childDirs);
+ private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException {
+ return deletedDirectoryTable.isExist(dirKey);
}
- }
- private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException {
- return deletedDirectoryTable.isExist(dirKey);
- }
+ private boolean isFileKeyInDeletedTable(String fileKey) throws IOException {
+ return deletedTable.isExist(fileKey);
+ }
- private boolean isFileKeyInDeletedTable(String fileKey) throws IOException {
- return deletedTable.isExist(fileKey);
- }
+ private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException {
+ // Check for unreachable and unreferenced directories in the bucket.
+ String bucketPrefix = OM_KEY_PREFIX +
+ volume.getObjectID() +
+ OM_KEY_PREFIX +
+ bucket.getObjectID();
- private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException {
- // Check for unreachable and unreferenced directories in the bucket.
- String bucketPrefix = OM_KEY_PREFIX +
- volume.getObjectID() +
- OM_KEY_PREFIX +
- bucket.getObjectID();
-
- try (TableIterator> dirIterator =
- directoryTable.iterator()) {
- dirIterator.seek(bucketPrefix);
- while (dirIterator.hasNext()) {
- Table.KeyValue dirEntry = dirIterator.next();
- String dirKey = dirEntry.getKey();
-
- // Only search directories in this bucket.
- if (!dirKey.startsWith(bucketPrefix)) {
- break;
- }
+ try (TableIterator> dirIterator =
+ directoryTable.iterator()) {
+ dirIterator.seek(bucketPrefix);
+ while (dirIterator.hasNext()) {
+ Table.KeyValue dirEntry = dirIterator.next();
+ String dirKey = dirEntry.getKey();
+
+ // Only search directories in this bucket.
+ if (!dirKey.startsWith(bucketPrefix)) {
+ break;
+ }
- if (!isReachable(dirKey)) {
- if (!isDirectoryInDeletedDirTable(dirKey)) {
- System.out.println("Found unreferenced directory: " + dirKey);
- unreferencedStats.addDir();
+ if (!isReachable(dirKey)) {
+ if (!isDirectoryInDeletedDirTable(dirKey)) {
+ info("Found unreferenced directory: " + dirKey);
+ unreferencedStats.addDir();
- if (!repair) {
- if (verbose) {
- System.out.println("Marking unreferenced directory " + dirKey + " for deletion.");
+ if (!repair) {
+ if (verbose) {
+ info("Marking unreferenced directory " + dirKey + " for deletion.");
+ }
+ } else {
+ info("Deleting unreferenced directory " + dirKey);
+ OmDirectoryInfo dirInfo = dirEntry.getValue();
+ markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo);
}
} else {
- System.out.println("Deleting unreferenced directory " + dirKey);
- OmDirectoryInfo dirInfo = dirEntry.getValue();
- markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo);
+ unreachableStats.addDir();
}
- } else {
- unreachableStats.addDir();
}
}
}
- }
- // Check for unreachable and unreferenced files
- try (TableIterator>
- fileIterator = fileTable.iterator()) {
- fileIterator.seek(bucketPrefix);
- while (fileIterator.hasNext()) {
- Table.KeyValue fileEntry = fileIterator.next();
- String fileKey = fileEntry.getKey();
- // Only search files in this bucket.
- if (!fileKey.startsWith(bucketPrefix)) {
- break;
- }
+ // Check for unreachable and unreferenced files
+ try (TableIterator>
+ fileIterator = fileTable.iterator()) {
+ fileIterator.seek(bucketPrefix);
+ while (fileIterator.hasNext()) {
+ Table.KeyValue fileEntry = fileIterator.next();
+ String fileKey = fileEntry.getKey();
+ // Only search files in this bucket.
+ if (!fileKey.startsWith(bucketPrefix)) {
+ break;
+ }
- OmKeyInfo fileInfo = fileEntry.getValue();
- if (!isReachable(fileKey)) {
- if (!isFileKeyInDeletedTable(fileKey)) {
- System.out.println("Found unreferenced file: " + fileKey);
- unreferencedStats.addFile(fileInfo.getDataSize());
+ OmKeyInfo fileInfo = fileEntry.getValue();
+ if (!isReachable(fileKey)) {
+ if (!isFileKeyInDeletedTable(fileKey)) {
+ info("Found unreferenced file: " + fileKey);
+ unreferencedStats.addFile(fileInfo.getDataSize());
- if (!repair) {
- if (verbose) {
- System.out.println("Marking unreferenced file " + fileKey + " for deletion." + fileKey);
+ if (!repair) {
+ if (verbose) {
+ info("Marking unreferenced file " + fileKey + " for deletion." + fileKey);
+ }
+ } else {
+ info("Deleting unreferenced file " + fileKey);
+ markFileForDeletion(fileKey, fileInfo);
}
} else {
- System.out.println("Deleting unreferenced file " + fileKey);
- markFileForDeletion(fileKey, fileInfo);
+ unreachableStats.addFile(fileInfo.getDataSize());
}
} else {
- unreachableStats.addFile(fileInfo.getDataSize());
+ // NOTE: We are deserializing the proto of every reachable file
+ // just to log it's size. If we don't need this information we could
+ // save time by skipping this step.
+ reachableStats.addFile(fileInfo.getDataSize());
}
- } else {
- // NOTE: We are deserializing the proto of every reachable file
- // just to log it's size. If we don't need this information we could
- // save time by skipping this step.
- reachableStats.addFile(fileInfo.getDataSize());
}
}
}
- }
- protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException {
- try (BatchOperation batch = store.initBatchOperation()) {
- fileTable.deleteWithBatch(batch, fileKey);
-
- RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey);
- RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
- fileInfo, fileInfo.getUpdateID(), true);
- // NOTE: The FSO code seems to write the open key entry with the whole
- // path, using the object's names instead of their ID. This would only
- // be possible when the file is deleted explicitly, and not part of a
- // directory delete. It is also not possible here if the file's parent
- // is gone. The name of the key does not matter so just use IDs.
- deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo);
- if (verbose) {
- System.out.println("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo);
+ protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException {
+ try (BatchOperation batch = store.initBatchOperation()) {
+ fileTable.deleteWithBatch(batch, fileKey);
+
+ RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey);
+ RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+ fileInfo, fileInfo.getUpdateID(), true);
+ // NOTE: The FSO code seems to write the open key entry with the whole
+ // path, using the object's names instead of their ID. This would only
+ // be possible when the file is deleted explicitly, and not part of a
+ // directory delete. It is also not possible here if the file's parent
+ // is gone. The name of the key does not matter so just use IDs.
+ deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo);
+ if (verbose) {
+ info("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo);
+ }
+ store.commitBatchOperation(batch);
}
- store.commitBatchOperation(batch);
}
- }
- protected void markDirectoryForDeletion(String volumeName, String bucketName,
- String dirKeyName, OmDirectoryInfo dirInfo) throws IOException {
- try (BatchOperation batch = store.initBatchOperation()) {
- directoryTable.deleteWithBatch(batch, dirKeyName);
- // HDDS-7592: Make directory entries in deleted dir table unique.
- String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID();
+ protected void markDirectoryForDeletion(String volumeName, String bucketName,
+ String dirKeyName, OmDirectoryInfo dirInfo) throws IOException {
+ try (BatchOperation batch = store.initBatchOperation()) {
+ directoryTable.deleteWithBatch(batch, dirKeyName);
+ // HDDS-7592: Make directory entries in deleted dir table unique.
+ String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID();
- // Convert the directory to OmKeyInfo for deletion.
- OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName());
- deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo);
+ // Convert the directory to OmKeyInfo for deletion.
+ OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName());
+ deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo);
- store.commitBatchOperation(batch);
+ store.commitBatchOperation(batch);
+ }
}
- }
- private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket,
- WithObjectID currentDir) throws IOException {
-
- Collection childDirs = new ArrayList<>();
-
- try (TableIterator>
- dirIterator = directoryTable.iterator()) {
- String dirPrefix = buildReachableKey(volume, bucket, currentDir);
- // Start searching the directory table at the current directory's
- // prefix to get its immediate children.
- dirIterator.seek(dirPrefix);
- while (dirIterator.hasNext()) {
- Table.KeyValue childDirEntry = dirIterator.next();
- String childDirKey = childDirEntry.getKey();
- // Stop processing once we have seen all immediate children of this
- // directory.
- if (!childDirKey.startsWith(dirPrefix)) {
- break;
+ private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket,
+ WithObjectID currentDir) throws IOException {
+
+ Collection childDirs = new ArrayList<>();
+
+ try (TableIterator>
+ dirIterator = directoryTable.iterator()) {
+ String dirPrefix = buildReachableKey(volume, bucket, currentDir);
+ // Start searching the directory table at the current directory's
+ // prefix to get its immediate children.
+ dirIterator.seek(dirPrefix);
+ while (dirIterator.hasNext()) {
+ Table.KeyValue childDirEntry = dirIterator.next();
+ String childDirKey = childDirEntry.getKey();
+ // Stop processing once we have seen all immediate children of this
+ // directory.
+ if (!childDirKey.startsWith(dirPrefix)) {
+ break;
+ }
+ // This directory was reached by search.
+ addReachableEntry(volume, bucket, childDirEntry.getValue());
+ childDirs.add(childDirKey);
+ reachableStats.addDir();
}
- // This directory was reached by search.
- addReachableEntry(volume, bucket, childDirEntry.getValue());
- childDirs.add(childDirKey);
- reachableStats.addDir();
}
+
+ return childDirs;
+ }
+
+ /**
+ * Add the specified object to the reachable table, indicating it is part
+ * of the connected FSO tree.
+ */
+ private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException {
+ String reachableKey = buildReachableKey(volume, bucket, object);
+ // No value is needed for this table.
+ reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{});
+ }
+
+ /**
+ * @param fileOrDirKey The key of a file or directory in RocksDB.
+ * @return true if the entry's parent is in the reachable table.
+ */
+ protected boolean isReachable(String fileOrDirKey) throws IOException {
+ String reachableParentKey = buildReachableParentKey(fileOrDirKey);
+
+ return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null;
+ }
+
+ private void openReachableDB() throws IOException {
+ File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db");
+ info("Creating database of reachable directories at " + reachableDBFile);
+ // Delete the DB from the last run if it exists.
+ if (reachableDBFile.exists()) {
+ FileUtils.deleteDirectory(reachableDBFile);
+ }
+
+ ConfigurationSource conf = new OzoneConfiguration();
+ reachableDB = DBStoreBuilder.newBuilder(conf)
+ .setName("reachable.db")
+ .setPath(reachableDBFile.getParentFile().toPath())
+ .addTable(REACHABLE_TABLE)
+ .build();
}
- return childDirs;
+ private void closeReachableDB() throws IOException {
+ if (reachableDB != null) {
+ reachableDB.close();
+ }
+ File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db");
+ if (reachableDBFile.exists()) {
+ FileUtils.deleteDirectory(reachableDBFile);
+ }
+ }
}
- /**
- * Add the specified object to the reachable table, indicating it is part
- * of the connected FSO tree.
- */
- private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException {
- String reachableKey = buildReachableKey(volume, bucket, object);
- // No value is needed for this table.
- reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{});
+ protected static DBStore getStoreFromPath(String dbPath) throws IOException {
+ File omDBFile = new File(dbPath);
+ if (!omDBFile.exists() || !omDBFile.isDirectory()) {
+ throw new IOException(String.format("Specified OM DB instance %s does " +
+ "not exist or is not a RocksDB directory.", dbPath));
+ }
+ // Load RocksDB and tables needed.
+ return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1);
}
/**
@@ -480,17 +551,6 @@ private static String buildReachableKey(OmVolumeArgs volume, OmBucketInfo bucket
object.getObjectID();
}
- /**
- *
- * @param fileOrDirKey The key of a file or directory in RocksDB.
- * @return true if the entry's parent is in the reachable table.
- */
- protected boolean isReachable(String fileOrDirKey) throws IOException {
- String reachableParentKey = buildReachableParentKey(fileOrDirKey);
-
- return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null;
- }
-
/**
* Build an entry in the reachable table for the current object's parent
* object. The object could be a file or directory.
@@ -512,32 +572,6 @@ private static String buildReachableParentKey(String fileOrDirKey) {
parentID;
}
- private void openReachableDB() throws IOException {
- File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db");
- System.out.println("Creating database of reachable directories at " + reachableDBFile);
- // Delete the DB from the last run if it exists.
- if (reachableDBFile.exists()) {
- FileUtils.deleteDirectory(reachableDBFile);
- }
-
- ConfigurationSource conf = new OzoneConfiguration();
- reachableDB = DBStoreBuilder.newBuilder(conf)
- .setName("reachable.db")
- .setPath(reachableDBFile.getParentFile().toPath())
- .addTable(REACHABLE_TABLE)
- .build();
- }
-
- private void closeReachableDB() throws IOException {
- if (reachableDB != null) {
- reachableDB.close();
- }
- File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db");
- if (reachableDBFile.exists()) {
- FileUtils.deleteDirectory(reachableDBFile);
- }
- }
-
/**
* Define a Report to be created.
*/
@@ -549,19 +583,19 @@ public static class Report {
/**
* Builds one report that is the aggregate of multiple others.
*/
- public Report(FSORepairTool.Report... reports) {
+ public Report(Report... reports) {
reachable = new ReportStatistics();
unreachable = new ReportStatistics();
unreferenced = new ReportStatistics();
- for (FSORepairTool.Report report : reports) {
+ for (Report report : reports) {
reachable.add(report.reachable);
unreachable.add(report.unreachable);
unreferenced.add(report.unreferenced);
}
}
- private Report(FSORepairTool.Report.Builder builder) {
+ private Report(Report.Builder builder) {
this.reachable = builder.reachable;
this.unreachable = builder.unreachable;
this.unreferenced = builder.unreferenced;
@@ -591,7 +625,7 @@ public boolean equals(Object other) {
if (other == null || getClass() != other.getClass()) {
return false;
}
- FSORepairTool.Report report = (FSORepairTool.Report) other;
+ Report report = (Report) other;
// Useful for testing.
System.out.println("Comparing reports\nExpect:\n" + this + "\nActual:\n" + report);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java
index 3b880f87543..9e20f6b9d1f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java
@@ -27,7 +27,7 @@
*/
@CommandLine.Command(name = "om",
subcommands = {
- FSORepairCLI.class,
+ FSORepairTool.class,
SnapshotRepair.class,
TransactionInfoRepair.class
},
From 8a774a57df907c1e5c6c274054cfde21f914a33b Mon Sep 17 00:00:00 2001
From: Chung En Lee
Date: Tue, 7 Jan 2025 15:15:26 +0800
Subject: [PATCH 08/15] HDDS-11989. Enable SCM Ratis in tests related to
DeletedBlockLog (#7615)
---
.../hdds/scm/TestStorageContainerManager.java | 12 +----
.../apache/hadoop/ozone/OzoneTestUtils.java | 33 +++++++++++++
.../rpc/TestDeleteWithInAdequateDN.java | 5 ++
.../commandhandler/TestBlockDeletion.java | 48 ++++++++++---------
.../TestDeleteContainerHandler.java | 8 ++++
5 files changed, 72 insertions(+), 34 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index 94c8f914294..47f6d3823d2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -321,17 +321,7 @@ public void testBlockDeletionTransactions() throws Exception {
// after sometime, all the TX should be proceed and by then
// the number of containerBlocks of all known containers will be
// empty again.
- GenericTestUtils.waitFor(() -> {
- try {
- if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
- cluster.getStorageContainerManager().getScmHAManager()
- .asSCMHADBTransactionBuffer().flush();
- }
- return delLog.getNumOfValidTransactions() == 0;
- } catch (IOException e) {
- return false;
- }
- }, 1000, 22000);
+ OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager());
assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks));
// Continue the work, add some TXs that with known container names,
// but unknown block IDs.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index 884e435d25e..0a5f7114c40 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -161,4 +161,37 @@ public static void closeContainer(StorageContainerManager scm,
container.getState() == HddsProtos.LifeCycleState.CLOSED,
200, 30000);
}
+
+ /**
+ * Flush deleted block log & wait till something was flushed.
+ */
+ public static void flushAndWaitForDeletedBlockLog(StorageContainerManager scm)
+ throws InterruptedException, TimeoutException {
+ GenericTestUtils.waitFor(() -> {
+ try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
+ if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() > 0) {
+ return true;
+ }
+ } catch (IOException e) {
+ }
+ return false;
+ }, 100, 3000);
+ }
+
+ /**
+ * Wait till all blocks are removed.
+ */
+ public static void waitBlockDeleted(StorageContainerManager scm)
+ throws InterruptedException, TimeoutException {
+ GenericTestUtils.waitFor(() -> {
+ try {
+ if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() == 0) {
+ return true;
+ }
+ } catch (IOException e) {
+ }
+ return false;
+ }, 1000, 60000);
+ }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index bb42d8a0f57..2b199306b76 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
@@ -65,6 +66,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
@@ -73,6 +75,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
+
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
@@ -103,6 +106,7 @@ public static void init() throws Exception {
conf = new OzoneConfiguration();
+ conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
@@ -281,6 +285,7 @@ void testDeleteKeyWithInAdequateDN() throws Exception {
//cluster.getOzoneManager().deleteKey(keyArgs);
client.getObjectStore().getVolume(volumeName).getBucket(bucketName).
deleteKey("ratis");
+ OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager());
// make sure the chunk was never deleted on the leader even though
// deleteBlock handler is invoked
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index cf7d26847bb..e38312e02e6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -47,7 +47,6 @@
import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
@@ -95,6 +94,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -133,6 +133,7 @@ public void init() throws Exception {
GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG);
+ conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
conf.set("ozone.replication.allowed-configs",
"^(RATIS/THREE)|(EC/2-1-256k)$");
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
@@ -239,6 +240,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
// verify key blocks were created in DN.
GenericTestUtils.waitFor(() -> {
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
verifyBlocksCreated(omKeyLocationInfoGroupList);
return true;
} catch (Throwable t) {
@@ -283,6 +285,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
// The blocks should be deleted in the DN.
GenericTestUtils.waitFor(() -> {
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
verifyBlocksDeleted(omKeyLocationInfoGroupList);
return true;
} catch (Throwable t) {
@@ -299,6 +302,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
// Verify transactions committed
GenericTestUtils.waitFor(() -> {
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
verifyTransactionsCommitted();
return true;
} catch (Throwable t) {
@@ -380,10 +384,16 @@ public void testContainerStatisticsAfterDelete() throws Exception {
writeClient.deleteKey(keyArgs);
// Wait for blocks to be deleted and container reports to be processed
- GenericTestUtils.waitFor(() ->
- scm.getContainerManager().getContainers().stream()
- .allMatch(c -> c.getUsedBytes() == 0 &&
- c.getNumberOfKeys() == 0), 500, 20000);
+ GenericTestUtils.waitFor(() -> {
+ try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ return scm.getContainerManager().getContainers().stream()
+ .allMatch(c -> c.getUsedBytes() == 0 &&
+ c.getNumberOfKeys() == 0);
+ }, 500, 20000);
Thread.sleep(5000);
// Verify that pending block delete num are as expected with resent cmds
cluster.getHddsDatanodes().forEach(dn -> {
@@ -425,6 +435,7 @@ public void testContainerStatisticsAfterDelete() throws Exception {
assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
@@ -516,14 +527,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
GenericTestUtils.waitFor(() -> {
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
return scm.getContainerManager().getContainerReplicas(
containerId).stream().
allMatch(replica -> replica.isEmpty());
- } catch (ContainerNotFoundException e) {
+ } catch (IOException e) {
throw new RuntimeException(e);
}
- },
- 100, 10 * 1000);
+ }, 100, 10 * 1000);
// Container state should be empty now as key got deleted
assertTrue(getContainerFromDN(
@@ -546,6 +557,7 @@ public void testContainerStateAfterDNRestart() throws Exception {
assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
@@ -560,7 +572,6 @@ public void testContainerStateAfterDNRestart() throws Exception {
}
return true;
}, 500, 30000);
- LOG.info(metrics.toString());
}
/**
@@ -646,14 +657,14 @@ public void testContainerDeleteWithInvalidKeyCount()
// Ensure isEmpty are true for all replica after delete key
GenericTestUtils.waitFor(() -> {
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
return scm.getContainerManager().getContainerReplicas(
containerId).stream()
.allMatch(replica -> replica.isEmpty());
- } catch (ContainerNotFoundException e) {
+ } catch (IOException e) {
throw new RuntimeException(e);
}
- },
- 500, 5 * 2000);
+ }, 500, 5 * 2000);
// Update container replica by making invalid keyCount in one replica
ContainerReplica replicaOne = ContainerReplica.newBuilder()
@@ -683,6 +694,7 @@ public void testContainerDeleteWithInvalidKeyCount()
assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
+ scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
@@ -812,17 +824,7 @@ public void testBlockDeleteCommandParallelProcess() throws Exception {
}
// Wait for block delete command sent from OM
- GenericTestUtils.waitFor(() -> {
- try {
- if (scm.getScmBlockManager().getDeletedBlockLog()
- .getNumOfValidTransactions() > 0) {
- return true;
- }
- } catch (IOException e) {
- }
- return false;
- }, 100, 5000);
-
+ OzoneTestUtils.flushAndWaitForDeletedBlockLog(scm);
long start = System.currentTimeMillis();
// Wait for all blocks been deleted.
GenericTestUtils.waitFor(() -> {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 192c933f53c..705ef1e0d86 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -75,6 +75,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -97,6 +98,7 @@ public class TestDeleteContainerHandler {
@BeforeAll
public static void setup() throws Exception {
conf = new OzoneConfiguration();
+ conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true);
conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
0, StorageUnit.MB);
@@ -196,6 +198,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
.getBucket(bucketName).deleteKey(keyName);
+ OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager());
+ OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager());
// Ensure isEmpty flag is true when key is deleted and container is empty
GenericTestUtils.waitFor(() -> getContainerfromDN(
@@ -313,6 +317,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
.getBucket(bucketName).deleteKey(keyName);
+ OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager());
+ OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager());
// Ensure isEmpty flag is true when key is deleted and container is empty
GenericTestUtils.waitFor(() -> getContainerfromDN(
@@ -652,6 +658,8 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
.getBucket(bucketName).deleteKey(keyName);
+ OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager());
+ OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager());
// Ensure isEmpty flag is true when key is deleted
GenericTestUtils.waitFor(() -> getContainerfromDN(
From 44ba9a3f5d689d003cc8770ad62815d04d2596a2 Mon Sep 17 00:00:00 2001
From: Chung En Lee
Date: Tue, 7 Jan 2025 16:14:24 +0800
Subject: [PATCH 09/15] HDDS-12023. Enable SCM Ratis in TestContainerCommandsEC
(#7650)
---
.../hadoop/hdds/scm/storage/TestContainerCommandsEC.java | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index 1b7eb837cf8..bf40a600e29 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -70,6 +70,7 @@
import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ec.reconstruction.ECContainerOperationClient;
import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator;
import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics;
@@ -170,6 +171,7 @@ public class TestContainerCommandsEC {
@BeforeAll
public static void init() throws Exception {
config = new OzoneConfiguration();
+ config.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
config.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
config.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
config.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true);
@@ -320,8 +322,10 @@ public void testOrphanBlock() throws Exception {
.setTxID(1L)
.setCount(10)
.build()));
- dn2Service.getDatanodeStateMachine().getContext()
- .addCommand(deleteBlocksCommand);
+ StateContext context = dn2Service.getDatanodeStateMachine().getContext();
+ deleteBlocksCommand.setTerm(context.getTermOfLeaderSCM().isPresent() ?
+ context.getTermOfLeaderSCM().getAsLong() : 0);
+ context.addCommand(deleteBlocksCommand);
try (XceiverClientGrpc client = new XceiverClientGrpc(
createSingleNodePipeline(orphanPipeline, dn2, 1), cluster.getConf())) {
From e8d96f422efe094b9191dc2d65459a29e8a8faac Mon Sep 17 00:00:00 2001
From: Chung En Lee
Date: Tue, 7 Jan 2025 17:26:12 +0800
Subject: [PATCH 10/15] HDDS-12022. Enable SCM Ratis in
TestStorageContainerManager (#7651)
---
.../hdds/scm/TestStorageContainerManager.java | 71 ++++++-------------
1 file changed, 22 insertions(+), 49 deletions(-)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index 47f6d3823d2..14df7670f67 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.ha.RatisUtil;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
@@ -92,7 +93,6 @@
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
@@ -142,15 +142,12 @@
import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser;
import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.any;
@@ -191,11 +188,13 @@ public void cleanupDefaults() {
public void testRpcPermission() throws Exception {
// Test with default configuration
OzoneConfiguration defaultConf = new OzoneConfiguration();
+ defaultConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser");
// Test with ozone.administrators defined in configuration
String admins = "adminUser1, adminUser2";
OzoneConfiguration ozoneConf = new OzoneConfiguration();
+ ozoneConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins);
// Non-admin user will get permission denied.
// Admin user will pass the permission check.
@@ -267,6 +266,7 @@ private void verifyPermissionDeniedException(Exception e, String userName) {
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
DatanodeConfiguration datanodeConfiguration = conf.getObject(
@@ -358,6 +358,7 @@ public void testBlockDeletionTransactions() throws Exception {
@Test
public void testOldDNRegistersToReInitialisedSCM() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS);
@@ -369,10 +370,13 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
cluster.waitForClusterToBeReady();
HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0);
StorageContainerManager scm = cluster.getStorageContainerManager();
+ File dbDir = scm.getScmMetadataStore().getStore().getDbLocation();
scm.stop();
// re-initialise SCM with new clusterID
+ GenericTestUtils.deleteDirectory(new File(SCMHAUtils.getRatisStorageDir(conf)));
+ GenericTestUtils.deleteDirectory(dbDir);
GenericTestUtils.deleteDirectory(
new File(scm.getScmStorageConfig().getStorageDir()));
String newClusterId = UUID.randomUUID().toString();
@@ -413,7 +417,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
datanode.getDatanodeDetails());
GenericTestUtils.waitFor(
() -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100,
- 5000);
+ 30000);
ExitUtil.disableSystemExit();
// As part of processing response for re-register, DN EndpointStateMachine
// goes to GET-VERSION state which checks if there is already existing
@@ -432,6 +436,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
assertThat(versionEndPointTaskLog.getOutput()).contains(
"org.apache.hadoop.ozone.common" +
".InconsistentStorageStateException: Mismatched ClusterIDs");
+ scm.stop();
}
}
@@ -439,6 +444,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -549,6 +555,7 @@ private Map> createDeleteTXLog(
@Test
public void testSCMInitialization(@TempDir Path tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
Path scmPath = tempDir.resolve("scm-meta");
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
@@ -564,27 +571,13 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception {
assertEquals(NodeType.SCM, scmStore.getNodeType());
assertEquals(testClusterId, scmStore.getClusterID());
assertTrue(scmStore.isSCMHAEnabled());
- }
-
- @Test
- public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
- Path scmPath = tempDir.resolve("scm-meta");
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-
- final UUID clusterId = UUID.randomUUID();
- // This will initialize SCM
- StorageContainerManager.scmInit(conf, clusterId.toString());
- SCMStorageConfig scmStore = new SCMStorageConfig(conf);
- assertTrue(scmStore.isSCMHAEnabled());
validateRatisGroupExists(conf, clusterId.toString());
}
@Test
public void testSCMReinitialization(@TempDir Path tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
Path scmPath = tempDir.resolve("scm-meta");
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
//This will set the cluster id in the version file
@@ -646,6 +639,7 @@ public static void validateRatisGroupExists(OzoneConfiguration conf,
@Test
void testSCMInitializationFailure(@TempDir Path tempDir) {
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
Path scmPath = tempDir.resolve("scm-meta");
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
@@ -658,15 +652,21 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
Path scmPath = tempDir.resolve("scm-meta");
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
SCMStorageConfig scmStore = new SCMStorageConfig(conf);
String clusterId = UUID.randomUUID().toString();
String scmId = UUID.randomUUID().toString();
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
+ scmStore.setSCMHAFlag(true);
// writes the version file properties
scmStore.initialize();
+ SCMRatisServerImpl.initialize(clusterId, scmId,
+ SCMHANodeDetails.loadSCMHAConfig(conf, scmStore)
+ .getLocalNodeDetails(), conf);
StorageContainerManager scm = HddsTestUtils.getScmSimple(conf);
+ scm.start();
//Reads the SCM Info from SCM instance
ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
assertEquals(clusterId, scmInfo.getClusterId());
@@ -684,6 +684,7 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception {
public void testScmProcessDatanodeHeartbeat() throws Exception {
String rackName = "/rack1";
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)),
@@ -726,6 +727,7 @@ public void testScmProcessDatanodeHeartbeat() throws Exception {
public void testCloseContainerCommandOnRestart() throws Exception {
int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -943,35 +945,6 @@ public void testIncrementalContainerReportQueue() throws Exception {
containerReportExecutors.close();
}
- @Test
- public void testNonRatisToRatis()
- throws IOException, AuthenticationException, InterruptedException,
- TimeoutException {
- final OzoneConfiguration conf = new OzoneConfiguration();
- try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build()) {
- final StorageContainerManager nonRatisSCM = cluster
- .getStorageContainerManager();
- assertNull(nonRatisSCM.getScmHAManager().getRatisServer());
- assertFalse(nonRatisSCM.getScmStorageConfig().isSCMHAEnabled());
- nonRatisSCM.stop();
- nonRatisSCM.join();
-
- DefaultConfigManager.clearDefaultConfigs();
- conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- StorageContainerManager.scmInit(conf, cluster.getClusterId());
- conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort());
- conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
- cluster.restartStorageContainerManager(false);
-
- final StorageContainerManager ratisSCM = cluster
- .getStorageContainerManager();
- assertNotNull(ratisSCM.getScmHAManager().getRatisServer());
- assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled());
- }
- }
-
private void addTransactions(StorageContainerManager scm,
DeletedBlockLog delLog,
Map> containerBlocksMap)
From 3dfd2410a04259e58afc99c08c89adb4abcea30b Mon Sep 17 00:00:00 2001
From: Wei-Chiu Chuang
Date: Tue, 7 Jan 2025 10:21:17 -0800
Subject: [PATCH 11/15] HDDS-11753. Deprecate file per chunk layout from
datanode code. (#7654)
---
.../container/common/impl/ContainerLayoutVersion.java | 3 ++-
.../ozone/container/keyvalue/KeyValueHandler.java | 10 ++++++++++
.../ozone/container/keyvalue/TestKeyValueHandler.java | 8 ++++++++
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java
index 210c538f274..99f56baa799 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java
@@ -34,6 +34,7 @@
*/
public enum ContainerLayoutVersion {
+ @Deprecated /* Use FILE_PER_BLOCK instead */
FILE_PER_CHUNK(1, "One file per chunk") {
@Override
public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) {
@@ -47,7 +48,7 @@ public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) {
}
};
- private static final ContainerLayoutVersion
+ public static final ContainerLayoutVersion
DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK;
private static final List CONTAINER_LAYOUT_VERSIONS =
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 9cae71e9baf..0ef8d5e68a0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -124,6 +125,7 @@
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest;
import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion;
import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST;
+import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.DEFAULT_LAYOUT;
import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult;
import org.apache.hadoop.util.Time;
@@ -191,6 +193,14 @@ public KeyValueHandler(ConfigurationSource config,
byteBufferToByteString =
ByteStringConversion
.createByteBufferConversion(isUnsafeByteBufferConversionEnabled);
+
+ if (ContainerLayoutVersion.getConfiguredVersion(conf) ==
+ ContainerLayoutVersion.FILE_PER_CHUNK) {
+ LOG.warn("FILE_PER_CHUNK layout is not supported. Falling back to default : {}.",
+ DEFAULT_LAYOUT.name());
+ OzoneConfiguration.of(conf).set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY,
+ DEFAULT_LAYOUT.name());
+ }
}
@VisibleForTesting
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 655ecbb48b4..d02910358de 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -57,6 +57,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -292,6 +293,13 @@ public void testVolumeSetInKeyValueHandler() throws Exception {
keyValueHandler.getVolumeChoosingPolicyForTesting()
.getClass().getName());
+ // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK.
+ conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK");
+ new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet,
+ metrics, c -> { });
+ assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK,
+ conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK));
+
//Set a class which is not of sub class of VolumeChoosingPolicy
conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
"org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
From 984027cc250448418a253dc1f2ea3e8b596263de Mon Sep 17 00:00:00 2001
From: Nandakumar Vadivelu
Date: Wed, 8 Jan 2025 02:05:10 +0530
Subject: [PATCH 12/15] HDDS-12035. Enable sortpom in
hdds-hadoop-dependency-server and -test (#7659)
---
hadoop-hdds/erasurecode/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-client/pom.xml | 2 +-
hadoop-hdds/hadoop-dependency-server/pom.xml | 124 +++++++++----------
hadoop-hdds/hadoop-dependency-test/pom.xml | 30 ++---
4 files changed, 76 insertions(+), 84 deletions(-)
diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml
index bb98efe1894..b84b6e087c3 100644
--- a/hadoop-hdds/erasurecode/pom.xml
+++ b/hadoop-hdds/erasurecode/pom.xml
@@ -38,13 +38,13 @@
org.slf4j
slf4j-api
+
+
org.apache.ozone
hdds-config
test
-
-
org.apache.ozone
hdds-hadoop-dependency-test
diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml
index 276f6935584..c05614456e7 100644
--- a/hadoop-hdds/hadoop-dependency-client/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-client/pom.xml
@@ -100,6 +100,7 @@
commons-beanutils
commons-beanutils
+
commons-codec
commons-codec
@@ -152,7 +153,6 @@
org.apache.commons
commons-lang3
-
org.apache.commons
commons-math3
diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml
index 05923dab2cd..324b21ef668 100644
--- a/hadoop-hdds/hadoop-dependency-server/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-server/pom.xml
@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
4.0.0
org.apache.ozone
@@ -24,65 +21,68 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-hadoop-dependency-server
2.0.0-SNAPSHOT
- Apache Ozone Distributed Data Store Hadoop server dependencies
-
- Apache Ozone HDDS Hadoop Server dependencies
jar
+ Apache Ozone HDDS Hadoop Server dependencies
+ Apache Ozone Distributed Data Store Hadoop server dependencies
- true
- true
+
+ true
+
+ com.nimbusds
+ nimbus-jose-jwt
+
+
+
+ commons-cli
+ commons-cli
+
org.apache.hadoop
hadoop-annotations
org.apache.hadoop
- hadoop-common
+ hadoop-auth
${hadoop.version}
- com.nimbusds
- nimbus-jose-jwt
+ ch.qos.reload4j
+ reload4j
- org.xerial.snappy
- snappy-java
+ log4j
+ log4j
org.apache.curator
*
- org.apache.avro
- avro
+ org.apache.kerby
+ kerb-simplekdc
org.apache.zookeeper
zookeeper
- org.apache.commons
+ org.slf4j
*
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop.version}
+
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
- org.codehaus.jackson
- jackson-jaxrs
-
-
- org.codehaus.jackson
- jackson-xc
+ ch.qos.reload4j
+ reload4j
com.github.pjfanning
@@ -93,32 +93,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jsr305
- com.sun.jersey
- *
+ com.nimbusds
+ nimbus-jose-jwt
- org.apache.kerby
- kerb-simplekdc
+ com.sun.jersey
+ *
log4j
log4j
- ch.qos.reload4j
- reload4j
+ org.apache.avro
+ avro
- org.slf4j
+ org.apache.commons
*
-
-
-
- org.apache.hadoop
- hadoop-auth
- ${hadoop.version}
-
org.apache.curator
*
@@ -132,34 +125,41 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
zookeeper
- log4j
- log4j
+ org.codehaus.jackson
+ jackson-core-asl
- ch.qos.reload4j
- reload4j
+ org.codehaus.jackson
+ jackson-jaxrs
+
+
+ org.codehaus.jackson
+ jackson-mapper-asl
+
+
+ org.codehaus.jackson
+ jackson-xc
org.slf4j
*
+
+ org.xerial.snappy
+ snappy-java
+
-
- com.nimbusds
- nimbus-jose-jwt
-
-
-
- commons-cli
- commons-cli
-
org.apache.hadoop
hadoop-hdfs
${hadoop.version}
compile
+
+ ch.qos.reload4j
+ reload4j
+
com.sun.jersey
*
@@ -168,17 +168,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
io.netty
*
-
- org.fusesource.leveldbjni
- leveldbjni-all
-
log4j
log4j
- ch.qos.reload4j
- reload4j
+ org.fusesource.leveldbjni
+ leveldbjni-all
org.slf4j
diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml
index 5df30c7dfdd..48bdff714fb 100644
--- a/hadoop-hdds/hadoop-dependency-test/pom.xml
+++ b/hadoop-hdds/hadoop-dependency-test/pom.xml
@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
4.0.0
org.apache.ozone
@@ -24,17 +21,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-hadoop-dependency-test
2.0.0-SNAPSHOT
- Apache Ozone Distributed Data Store Hadoop test dependencies
-
- Apache Ozone HDDS Hadoop Test dependencies
jar
+ Apache Ozone HDDS Hadoop Test dependencies
+ Apache Ozone Distributed Data Store Hadoop test dependencies
- true
- true
+
+ true
+
+ commons-codec
+ commons-codec
+
+
+ org.apache.commons
+ commons-compress
+
org.apache.hadoop
hadoop-common
@@ -59,14 +63,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
-
- commons-codec
- commons-codec
-
-
- org.apache.commons
- commons-compress
-
org.assertj
From 2fc9c6e5f16724ef4bdc4ba4f9e1988e959a25cf Mon Sep 17 00:00:00 2001
From: Nandakumar Vadivelu
Date: Wed, 8 Jan 2025 10:03:33 +0530
Subject: [PATCH 13/15] HDDS-12034. Enable sortpom in hdds-interface-admin,
-client and -server. (#7660)
---
hadoop-hdds/interface-admin/pom.xml | 21 +++-----
hadoop-hdds/interface-client/pom.xml | 78 +++++++++-------------------
hadoop-hdds/interface-server/pom.xml | 58 +++++++--------------
3 files changed, 53 insertions(+), 104 deletions(-)
diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml
index 94122423085..047db244faa 100644
--- a/hadoop-hdds/interface-admin/pom.xml
+++ b/hadoop-hdds/interface-admin/pom.xml
@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
4.0.0
org.apache.ozone
@@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-interface-admin
2.0.0-SNAPSHOT
- Apache Ozone Distributed Data Store Admin interface
-
- Apache Ozone HDDS Admin Interface
jar
+ Apache Ozone HDDS Admin Interface
+ Apache Ozone Distributed Data Store Admin interface
- true
- true
- true
+
+ true
+
+ true
@@ -72,9 +69,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile
-
- com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
target/generated-sources/java
false
diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml
index b373d11d507..da6dec5cda4 100644
--- a/hadoop-hdds/interface-client/pom.xml
+++ b/hadoop-hdds/interface-client/pom.xml
@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
4.0.0
org.apache.ozone
@@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-interface-client
2.0.0-SNAPSHOT
- Apache Ozone Distributed Data Store Client interface
-
- Apache Ozone HDDS Client Interface
jar
+ Apache Ozone HDDS Client Interface
+ Apache Ozone Distributed Data Store Client interface
- true
- true
- true
+
+ true
+
+ true
@@ -40,6 +37,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
com.google.protobuf
protobuf-java
+
+ javax.annotation
+ javax.annotation-api
+
org.apache.hadoop.thirdparty
hadoop-shaded-protobuf_3_25
@@ -49,10 +50,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
ratis-thirdparty-misc
${ratis.thirdparty.version}
-
- javax.annotation
- javax.annotation-api
-
@@ -82,9 +79,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile-custom
-
- com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
DatanodeClientProtocol.proto
@@ -93,9 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
target/generated-sources/java
false
grpc-java
-
- io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier}
-
+ io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier}
@@ -105,9 +98,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile
-
- com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
hdds.proto
@@ -124,9 +115,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile
-
- com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
hdds.proto
@@ -143,38 +132,21 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
${maven-antrun-plugin.version}
+
+ run
+
generate-sources
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
- run
-
diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml
index 539a0a5430e..83aa5f72e36 100644
--- a/hadoop-hdds/interface-server/pom.xml
+++ b/hadoop-hdds/interface-server/pom.xml
@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
4.0.0
org.apache.ozone
@@ -24,22 +21,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hdds-interface-server
2.0.0-SNAPSHOT
- Apache Ozone Distributed Data Store Server interface
-
- Apache Ozone HDDS Server Interface
jar
+ Apache Ozone HDDS Server Interface
+ Apache Ozone Distributed Data Store Server interface
- true
- true
- true
+
+ true
+
+ true
-
- org.apache.ratis
- ratis-thirdparty-misc
-
com.google.protobuf
protobuf-java
@@ -50,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.apache.ozone
hdds-interface-client
+
+ org.apache.ratis
+ ratis-thirdparty-misc
+
@@ -79,9 +76,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile-custom
-
- com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
InterSCMProtocol.proto
@@ -90,9 +85,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
target/generated-sources/java
false
grpc-java
-
- io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier}
-
+ io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier}
@@ -102,9 +95,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-compile
-
- com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
-
+ com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier}
${basedir}/src/main/proto/
InterSCMProtocol.proto
@@ -121,26 +112,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
${maven-antrun-plugin.version}
+
+ run
+
generate-sources
-
-
-
-
-
-
+
+
+
-
- run
-
From 36a430db458a0b9b913a51c28199694f9273c64f Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Wed, 8 Jan 2025 06:43:36 +0100
Subject: [PATCH 14/15] HDDS-7307. Move S3 Gateway web content to separate port
(#6898)
---
.../src/main/resources/ozone-default.xml | 42 ++++
.../hdds/server/http/BaseHttpServer.java | 22 ++-
.../hadoop/hdds/server/http/HttpServer2.java | 47 +++--
.../main/compose/ozone/docker-compose.yaml | 1 +
.../src/main/compose/ozone/prometheus.yml | 4 +-
.../src/main/smoketest/s3/bucketcreate.robot | 12 +-
.../src/main/smoketest/s3/commonawslib.robot | 14 ++
.../smoketest/s3/s3_compatbility_check.sh | 1 +
.../main/smoketest/s3/secretgenerate.robot | 2 +-
.../src/main/smoketest/s3/secretrevoke.robot | 3 +-
.../dist/src/main/smoketest/s3/webui.robot | 13 +-
.../dist/src/main/smoketest/spnego/web.robot | 2 +-
.../hadoop/ozone/s3/AuthorizationFilter.java | 6 -
.../org/apache/hadoop/ozone/s3/Gateway.java | 6 +
.../hadoop/ozone/s3/GatewayApplication.java | 2 +-
.../ozone/s3/RootPageDisplayFilter.java | 64 ------
.../hadoop/ozone/s3/S3GatewayConfigKeys.java | 14 ++
.../hadoop/ozone/s3/S3GatewayHttpServer.java | 80 +-------
.../ozone/s3/S3GatewayWebAdminServer.java | 186 ++++++++++++++++++
.../ozone/s3/VirtualHostStyleFilter.java | 7 -
.../hadoop/ozone/s3secret/Application.java} | 16 +-
.../s3secret/S3SecretManagementEndpoint.java | 2 +-
.../resources/webapps/s3g-web/WEB-INF/web.xml | 33 ++++
.../{static => s3g-web}/images/ozone.ico | Bin
.../webapps/{static => s3g-web}/index.html | 24 +--
.../webapps/s3gateway/WEB-INF/web.xml | 11 +-
.../ozone/s3/TestAuthorizationFilter.java | 22 ---
.../ozone/s3/TestVirtualHostStyleFilter.java | 23 ---
28 files changed, 388 insertions(+), 271 deletions(-)
delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java
create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java
rename hadoop-ozone/s3gateway/src/main/{resources/webapps/static/s3g.js => java/org/apache/hadoop/ozone/s3secret/Application.java} (75%)
create mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml
rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/images/ozone.ico (100%)
rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/index.html (74%)
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 1fcef139daf..dfd058f5d70 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1892,6 +1892,48 @@
interfaces by setting it to 0.0.0.0.