From 8f2689dfe5f9b79e64f7d78acbaaf300c1ccbd1c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 6 Jan 2025 08:40:12 +0100 Subject: [PATCH 01/15] HDDS-11864. Remove config from OM for disabling Ratis (#7640) --- .../src/main/resources/ozone-default.xml | 9 ------ hadoop-hdds/docs/content/feature/OM-HA.md | 8 ------ hadoop-hdds/docs/content/feature/OM-HA.zh.md | 9 ------ .../apache/hadoop/ozone/om/OMConfigKeys.java | 4 --- .../main/compose/ozone-balancer/docker-config | 1 - .../src/main/compose/ozone-ha/docker-config | 1 - .../main/compose/ozone-om-ha/docker-config | 1 - .../compose/ozone-om-prepare/docker-config | 1 - .../main/compose/ozonesecure-ha/docker-config | 1 - .../compose/upgrade/compose/ha/docker-config | 1 - .../AbstractRootedOzoneFileSystemTest.java | 15 +--------- ...tractRootedOzoneFileSystemTestWithFSO.java | 4 +-- .../org/apache/hadoop/fs/ozone/TestOFS.java | 2 +- .../hadoop/fs/ozone/TestOFSWithCacheOnly.java | 28 ------------------- .../hadoop/fs/ozone/TestOFSWithFSO.java | 2 +- .../fs/ozone/TestOFSWithFSOAndCacheOnly.java | 27 ------------------ .../hadoop/fs/ozone/TestOFSWithFSPaths.java | 2 +- .../ratis/TestOzoneManagerRatisRequest.java | 18 +++++------- .../ozone/om/OmMetadataManagerImpl.java | 7 +---- .../apache/hadoop/ozone/om/OzoneManager.java | 7 +---- ...ManagerProtocolServerSideTranslatorPB.java | 15 +--------- .../OzoneDelegationTokenSecretManager.java | 5 +--- .../impl/OzoneManagerServiceProviderImpl.java | 4 --- 23 files changed, 17 insertions(+), 155 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index fdeb5c1c043..a0fdcd4b683 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2045,15 +2045,6 @@ - - ozone.om.ratis.enable - true - OZONE, OM, RATIS, MANAGEMENT - Property to enable or disable Ratis server on OM. - Please note - this is a temporary property to disable OM Ratis server. - - - ozone.om.ratis.port 9872 diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md index 3872c387335..cf8ca4351f3 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.md @@ -41,14 +41,6 @@ Client connects to the Leader Ozone Manager which process the request and schedu ## Configuration -HA mode of Ozone Manager can be enabled with the following settings in `ozone-site.xml`: - -```XML - - ozone.om.ratis.enable - true - -``` One Ozone configuration (`ozone-site.xml`) can support multiple Ozone HA cluster. To select between the available HA clusters a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Ozone Managers. This logical name is called `serviceId` and can be configured in the `ozone-site.xml` diff --git a/hadoop-hdds/docs/content/feature/OM-HA.zh.md b/hadoop-hdds/docs/content/feature/OM-HA.zh.md index 2ce92087a0c..fae76ef03b4 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.zh.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.zh.md @@ -42,15 +42,6 @@ Ozone Manager 和 Storage Container Manager 都支持 HA。在这种模式下, ## 配置 -可以在 `ozone-site.xml` 中配置以下设置来启用 Ozone Manager 的高可用模式: - -```XML - - ozone.om.ratis.enable - true - -``` - 一个 Ozone 的配置(`ozone-site.xml`)支持多个 Ozone 高可用集群。为了支持在多个高可用集群之间进行选择,每个集群都需要一个逻辑名称,该逻辑名称可以解析为 Ozone Manager 的 IP 地址(和域名)。 该逻辑名称叫做 `serviceId`,可以在 `ozone-site.xml` 中进行配置: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 880fe8614b2..e274d822b63 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -177,10 +177,6 @@ private OMConfigKeys() { /** * OM Ratis related configurations. */ - public static final String OZONE_OM_RATIS_ENABLE_KEY - = "ozone.om.ratis.enable"; - public static final boolean OZONE_OM_RATIS_ENABLE_DEFAULT - = true; public static final String OZONE_OM_RATIS_PORT_KEY = "ozone.om.ratis.port"; public static final int OZONE_OM_RATIS_PORT_DEFAULT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 6e0781a1d9e..3d0cfce1eaa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -26,7 +26,6 @@ OZONE-SITE.XML_ozone.om.nodes.om=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.om.om1=om1 OZONE-SITE.XML_ozone.om.address.om.om2=om2 OZONE-SITE.XML_ozone.om.address.om.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index ebf2ce532bd..92a71eea3c1 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -26,7 +26,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index ae2fb092be6..b0ebb395f9a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -21,7 +21,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config index f0ec8fcaa1a..8550e618501 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config @@ -21,7 +21,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 1495e89813a..a4f030d45f5 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -30,7 +30,6 @@ OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.http-address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.http-address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index d06d3279dc9..bb68e9bf60f 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -23,7 +23,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index a3b59824548..c2697ef541a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -157,12 +157,11 @@ abstract class AbstractRootedOzoneFileSystemTest { private OzoneClient client; AbstractRootedOzoneFileSystemTest(BucketLayout bucketLayout, boolean setDefaultFs, - boolean isAclEnabled, boolean noFlush) { + boolean isAclEnabled) { // Initialize the cluster before EACH set of parameters this.bucketLayout = bucketLayout; enabledFileSystemPaths = setDefaultFs; enableAcl = isAclEnabled; - useOnlyCache = noFlush; isBucketFSOptimized = bucketLayout.isFileSystemOptimized(); } @@ -204,8 +203,6 @@ public Path getBucketPath() { private final boolean isBucketFSOptimized; private final boolean enableAcl; - private final boolean useOnlyCache; - private OzoneConfiguration conf; private MiniOzoneCluster cluster; private FileSystem fs; @@ -279,10 +276,6 @@ void initClusterAndEnv() throws IOException, InterruptedException, TimeoutExcept userOfs = UGI_USER1.doAs( (PrivilegedExceptionAction)() -> (RootedOzoneFileSystem) FileSystem.get(conf)); - - if (useOnlyCache) { - cluster.getOzoneManager().getOmServerProtocol().setShouldFlushCache(true); - } } protected OMMetrics getOMMetrics() { @@ -2361,9 +2354,6 @@ private Path createAndGetBucketPath() @Test void testSnapshotRead() throws Exception { - if (useOnlyCache) { - return; - } // Init data OzoneBucket bucket1 = TestDataUtil.createVolumeAndBucket(client, bucketLayout); @@ -2410,9 +2400,6 @@ void testFileSystemDeclaresCapability() throws Throwable { @Test void testSnapshotDiff() throws Exception { - if (useOnlyCache) { - return; - } OzoneBucket bucket1 = TestDataUtil.createVolumeAndBucket(client, bucketLayout); Path volumePath1 = new Path(OZONE_URI_DELIMITER, bucket1.getVolumeName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java index 40ef0bff7ec..1698b814617 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java @@ -47,8 +47,8 @@ abstract class AbstractRootedOzoneFileSystemTestWithFSO extends AbstractRootedOz private static final Logger LOG = LoggerFactory.getLogger(AbstractRootedOzoneFileSystemTestWithFSO.class); - AbstractRootedOzoneFileSystemTestWithFSO(boolean enableOMRatis, boolean isAclEnabled, boolean noFlush) { - super(BucketLayout.FILE_SYSTEM_OPTIMIZED, true, isAclEnabled, noFlush); + AbstractRootedOzoneFileSystemTestWithFSO(boolean isAclEnabled) { + super(BucketLayout.FILE_SYSTEM_OPTIMIZED, true, isAclEnabled); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java index e9f734a426c..7c5460b29fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFS extends AbstractRootedOzoneFileSystemTest { TestOFS() { - super(BucketLayout.LEGACY, false, false, false); + super(BucketLayout.LEGACY, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java deleted file mode 100644 index 58b1f97a8d8..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithCacheOnly extends AbstractRootedOzoneFileSystemTest { - TestOFSWithCacheOnly() { - super(BucketLayout.LEGACY, false, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java index de38b786f31..1c81c6e5bf9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java @@ -22,6 +22,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithFSO extends AbstractRootedOzoneFileSystemTestWithFSO { TestOFSWithFSO() { - super(false, false, false); + super(false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java deleted file mode 100644 index 99e08f9eeff..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSOAndCacheOnly extends AbstractRootedOzoneFileSystemTestWithFSO { - TestOFSWithFSOAndCacheOnly() { - super(false, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java index 75c09467237..37937bf5e63 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithFSPaths extends AbstractRootedOzoneFileSystemTest { TestOFSWithFSPaths() { - super(BucketLayout.LEGACY, true, false, false); + super(BucketLayout.LEGACY, true, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java index 6d396cf3af0..3fc991ff580 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java @@ -114,7 +114,6 @@ public void testUnknownRequestHandling() OzoneManagerRatisServer ratisServer = mock(OzoneManagerRatisServer.class); ProtocolMessageMetrics protocolMessageMetrics = mock(ProtocolMessageMetrics.class); - long lastTransactionIndexForNonRatis = 100L; OzoneManagerProtocolProtos.OMResponse expectedResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder() @@ -126,17 +125,14 @@ public void testUnknownRequestHandling() omRequest.getCmdType()) .build(); - boolean[] enableRatisValues = {true, false}; - for (boolean enableRatis : enableRatisValues) { - OzoneManagerProtocolServerSideTranslatorPB serverSideTranslatorPB = - new OzoneManagerProtocolServerSideTranslatorPB(ozoneManager, - ratisServer, protocolMessageMetrics, enableRatis, - lastTransactionIndexForNonRatis); + OzoneManagerProtocolServerSideTranslatorPB serverSideTranslatorPB = + new OzoneManagerProtocolServerSideTranslatorPB(ozoneManager, + ratisServer, protocolMessageMetrics, true, + 100L); - OzoneManagerProtocolProtos.OMResponse actualResponse = - serverSideTranslatorPB.processRequest(omRequest); + OzoneManagerProtocolProtos.OMResponse actualResponse = + serverSideTranslatorPB.processRequest(omRequest); - assertEquals(expectedResponse, actualResponse); - } + assertEquals(expectedResponse, actualResponse); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6698ece4a8d..8f4c070b76c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -351,12 +351,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, this.ozoneManager = ozoneManager; this.perfMetrics = perfMetrics; this.lock = new OzoneManagerLock(conf); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = conf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; this.omEpoch = OmUtils.getOMEpoch(isRatisEnabled); // For test purpose only ignorePipelineinKey = conf.getBoolean( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 6720f314748..d26546e47ee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -582,12 +582,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) OZONE_OM_NAMESPACE_STRICT_S3, OZONE_OM_NAMESPACE_STRICT_S3_DEFAULT); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; // Ratis server comes with JvmPauseMonitor, no need to start another jvmPauseMonitor = !isRatisEnabled ? newJvmPauseMonitor(omId) : null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 6b55b7384bd..654610f81dc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -85,9 +85,6 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerP private final RequestValidations requestValidations; private final OMPerformanceMetrics perfMetrics; - // always true, only used in tests - private boolean shouldFlushCache = true; - private OMRequest lastRequestToSubmit; @@ -313,9 +310,7 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { return createErrorResponse(request, ex); } try { - if (shouldFlushCache) { - omClientResponse.getFlushFuture().get(); - } + omClientResponse.getFlushFuture().get(); if (LOG.isTraceEnabled()) { LOG.trace("Future for {} is completed", request); } @@ -365,12 +360,4 @@ public static Logger getLog() { public void awaitDoubleBufferFlush() throws InterruptedException { ozoneManagerDoubleBuffer.awaitFlush(); } - - @VisibleForTesting - public void setShouldFlushCache(boolean shouldFlushCache) { - if (ozoneManagerDoubleBuffer != null) { - ozoneManagerDoubleBuffer.stopDaemon(); - } - this.shouldFlushCache = shouldFlushCache; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 420cb6c6dcb..c496da41eb4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -105,9 +104,7 @@ public OzoneDelegationTokenSecretManager(Builder b) throws IOException { this.ozoneManager = b.ozoneManager; this.store = new OzoneSecretStore(b.ozoneConf, this.ozoneManager.getMetadataManager()); - isRatisEnabled = b.ozoneConf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; this.secretKeyClient = b.secretKeyClient; loadTokenSecretState(store.loadState()); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 491d631249c..6bff4344f45 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -352,10 +352,6 @@ public void stop() throws Exception { */ @VisibleForTesting public String getOzoneManagerSnapshotUrl() throws IOException { - if (!configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false)) { - return omDBSnapshotUrl; - } String omLeaderUrl = omDBSnapshotUrl; List serviceList = ozoneManagerClient.getServiceList(); From ab161dd72afe9fcef060f11e0d2721b16b8fd34d Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Mon, 6 Jan 2025 13:33:28 +0530 Subject: [PATCH 02/15] HDDS-11949. Ozone Recon - Update Recon OM Sync default configs and docker configs. (#7600) --- hadoop-hdds/common/src/main/resources/ozone-default.xml | 6 +++--- .../apache/hadoop/ozone/recon/ReconServerConfigKeys.java | 6 +++--- .../recon/spi/impl/OzoneManagerServiceProviderImpl.java | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a0fdcd4b683..1fcef139daf 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3314,7 +3314,7 @@ ozone.recon.om.snapshot.task.interval.delay - 10m + 5s OZONE, RECON, OM Interval in MINUTES by Recon to request OM DB Snapshot. @@ -3330,7 +3330,7 @@ recon.om.delta.update.limit - 2000 + 50000 OZONE, RECON Recon each time get a limited delta updates from OM. @@ -3351,7 +3351,7 @@ recon.om.delta.update.loop.limit - 10 + 50 OZONE, RECON The sync between Recon and OM consists of several small diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 5c9e4039635..02060c03ef8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -85,7 +85,7 @@ public final class ReconServerConfigKeys { public static final String OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY = "ozone.recon.om.snapshot.task.interval.delay"; public static final String OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT - = "10m"; + = "5s"; @Deprecated public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY = "recon.om.snapshot.task.interval.delay"; @@ -98,10 +98,10 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_DELTA_UPDATE_LIMIT = "recon.om.delta.update.limit"; - public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT = 2000; + public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT = 50000; public static final String RECON_OM_DELTA_UPDATE_LOOP_LIMIT = "recon.om.delta.update.loop.limit"; - public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT = 10; + public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT = 50; public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY = "ozone.recon.task.thread.count"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 6bff4344f45..d5b7b1cfc91 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -91,9 +91,9 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconUtils.convertNumericToSymbolic; import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER; @@ -178,10 +178,10 @@ public OzoneManagerServiceProviderImpl( .OZONE_OM_HTTPS_ADDRESS_KEY); long deltaUpdateLimits = configuration.getLong(RECON_OM_DELTA_UPDATE_LIMIT, - RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT); + RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT); int deltaUpdateLoopLimits = configuration.getInt( RECON_OM_DELTA_UPDATE_LOOP_LIMIT, - RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT); + RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT); omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR); From 3d35b01e625d86864a7bec99e5ac490b9e51b573 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:46:01 +0530 Subject: [PATCH 03/15] HDDS-12011. Show PID of running service. (#7648) --- hadoop-ozone/dist/src/shell/ozone/ozone | 4 +++- .../apache/hadoop/ozone/repair/RepairTool.java | 15 +++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 5bb05fee724..d3d226a8b89 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -257,8 +257,10 @@ function check_running_ozone_services for service in "${services[@]}"; do for pid_file in ${OZONE_PID_DIR}/ozone-*-${service}.pid; do if [[ -f "${pid_file}" ]]; then - if kill -0 "$(cat "${pid_file}")" 2>/dev/null; then + pid=$(cat "${pid_file}") + if kill -0 "${pid}" 2>/dev/null; then export "OZONE_${service^^}_RUNNING=true" + export "OZONE_${service^^}_PID=${pid}" fi fi done diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index d8a976b2fd8..20a30f0b187 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -40,15 +40,18 @@ public final Void call() throws Exception { } protected boolean checkIfServiceIsRunning(String serviceName) { - String envVariable = String.format("OZONE_%s_RUNNING", serviceName); - String runningServices = System.getenv(envVariable); - if ("true".equals(runningServices)) { + String runningEnvVar = String.format("OZONE_%s_RUNNING", serviceName); + String pidEnvVar = String.format("OZONE_%s_PID", serviceName); + String isServiceRunning = System.getenv(runningEnvVar); + String servicePid = System.getenv(pidEnvVar); + if ("true".equals(isServiceRunning)) { if (!force) { - error("Error: %s is currently running on this host. " + - "Stop the service before running the repair tool.", serviceName); + error("Error: %s is currently running on this host with PID %s. " + + "Stop the service before running the repair tool.", serviceName, servicePid); return true; } else { - info("Warning: --force flag used. Proceeding despite %s being detected as running.", serviceName); + info("Warning: --force flag used. Proceeding despite %s being detected as running with PID %s.", + serviceName, servicePid); } } else { info("No running %s service detected. Proceeding with repair.", serviceName); From 2ec05cb122f8ae8036d0a55e03f623fcbb8cf93b Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Tue, 7 Jan 2025 00:54:20 +0530 Subject: [PATCH 04/15] HDDS-11987. Remove duplicate Quota In Bytes field from DU metadata (#7649) --- .../src/v2/components/duMetadata/duMetadata.tsx | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx index f2c740f7dbc..e46282f1856 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx @@ -179,11 +179,6 @@ const DUMetadata: React.FC = ({ values.push(moment(objectInfo.modificationTime).format('ll LTS')); } - if (objectInfo?.quotaInBytes !== undefined && objectInfo?.quotaInBytes !== -1) { - keys.push('Quota In Bytes'); - values.push(byteToSize(objectInfo.quotaInBytes, 3)); - } - if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { keys.push('Quota In Namespace'); values.push(byteToSize(objectInfo.quotaInNamespace, 3)); From 5354cec5d46deab4fa54feb1f4b3fd7b100bce46 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 6 Jan 2025 22:13:56 +0100 Subject: [PATCH 05/15] HDDS-12027. Mark TestBlockDataStreamOutput#testMultiBlockWrite as flaky --- .../hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index ecc9e8fae46..c1345207d99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -152,6 +153,7 @@ public void testMultiChunkWrite() throws Exception { } @Test + @Flaky("HDDS-12027") public void testMultiBlockWrite() throws Exception { testWrite(blockSize + 50); testWriteWithFailure(blockSize + 50); From ae9a56fc476df9d6dbad0109f1543bdbf1c43595 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 7 Jan 2025 07:16:07 +0100 Subject: [PATCH 06/15] HDDS-11991. Use picocli built-in for missing subcommand of GenericCli (#7635) --- .../apache/hadoop/hdds/cli/GenericCli.java | 21 ++----------- .../hdds/cli/MissingSubcommandException.java | 31 ------------------- .../hadoop/ozone/HddsDatanodeService.java | 3 +- .../StorageContainerManagerStarter.java | 3 +- .../src/main/smoketest/admincli/admin.robot | 2 +- .../hadoop/ozone/om/OzoneManagerStarter.java | 3 +- .../hadoop/ozone/recon/ReconServer.java | 3 +- .../org/apache/hadoop/ozone/s3/Gateway.java | 3 +- .../GenerateOzoneRequiredConfigurations.java | 4 ++- .../ozone/shell/checknative/CheckNative.java | 3 +- .../shell/tenant/GetUserInfoHandler.java | 10 ------ 11 files changed, 19 insertions(+), 67 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 3afda85498b..c698a9f3d50 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.Map; -import java.util.concurrent.Callable; import com.google.common.base.Strings; import org.apache.hadoop.fs.Path; @@ -28,13 +27,13 @@ import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; import picocli.CommandLine.ExitCode; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; /** * This is a generic parent class for all the ozone related cli tools. */ -public class GenericCli implements Callable, GenericParentCommand { +@CommandLine.Command +public abstract class GenericCli implements GenericParentCommand { public static final int EXECUTION_ERROR_EXIT_CODE = -1; @@ -71,15 +70,6 @@ public GenericCli(CommandLine.IFactory factory) { ExtensibleParentCommand.addSubcommands(cmd); } - /** - * Handle the error when subcommand is required but not set. - */ - public static void missingSubcommand(CommandSpec spec) { - System.err.println("Incomplete command"); - spec.commandLine().usage(System.err); - System.exit(EXECUTION_ERROR_EXIT_CODE); - } - public void run(String[] argv) { int exitCode = execute(argv); @@ -103,11 +93,6 @@ protected void printError(Throwable error) { } } - @Override - public Void call() throws Exception { - throw new MissingSubcommandException(cmd); - } - @Override public OzoneConfiguration getOzoneConf() { return config; @@ -121,7 +106,7 @@ public UserGroupInformation getUser() throws IOException { } @VisibleForTesting - public picocli.CommandLine getCmd() { + public CommandLine getCmd() { return cmd; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java deleted file mode 100644 index 759476579e9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import picocli.CommandLine; - -/** - * Exception to throw if subcommand is not selected but required. - */ -public class MissingSubcommandException extends CommandLine.ParameterException { - - public MissingSubcommandException(CommandLine cmd) { - super(cmd, "Incomplete command"); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index bf33b9780d2..a6980e232b1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -97,7 +98,7 @@ hidden = true, description = "Start the datanode for ozone", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class HddsDatanodeService extends GenericCli implements ServicePlugin { +public class HddsDatanodeService extends GenericCli implements Callable, ServicePlugin { private static final Logger LOG = LoggerFactory.getLogger( HddsDatanodeService.class); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index 8c0044f66a9..1eef7bce14c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -37,6 +37,7 @@ import picocli.CommandLine.Command; import java.io.IOException; +import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; @@ -49,7 +50,7 @@ hidden = true, description = "Start or initialize the scm server.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class StorageContainerManagerStarter extends GenericCli { +public class StorageContainerManagerStarter extends GenericCli implements Callable { private OzoneConfiguration conf; private SCMStarterInterface receiver; diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot index a28888b23f4..2f1d0825b39 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot @@ -22,7 +22,7 @@ Test Timeout 5 minutes *** Test Cases *** Incomplete command ${output} = Execute And Ignore Error ozone admin - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} container Should contain ${output} datanode Should contain ${output} om diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java index 27cb8d8aa3c..a587a628533 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -33,6 +33,7 @@ import picocli.CommandLine.Command; import java.io.IOException; +import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; @@ -44,7 +45,7 @@ hidden = true, description = "Start or initialize the Ozone Manager.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneManagerStarter extends GenericCli { +public class OzoneManagerStarter extends GenericCli implements Callable { private OzoneConfiguration conf; private OMStarterInterface receiver; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 0970c2da687..fc0dc18cce9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -56,6 +56,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor; @@ -70,7 +71,7 @@ /** * Recon server main class that stops and starts recon services. */ -public class ReconServer extends GenericCli { +public class ReconServer extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(ReconServer.class); private Injector injector; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index c20c9b496f0..511592d3a04 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.concurrent.Callable; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; @@ -53,7 +54,7 @@ hidden = true, description = "S3 compatible rest server.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class Gateway extends GenericCli { +public class Gateway extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(Gateway.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index 927e9186ff5..c88b6b2d698 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -41,6 +41,8 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Callable; + /** * GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml
* This tool generates an ozone-site.xml with minimally required configs. @@ -56,7 +58,7 @@ description = "Tool to generate template ozone-site.xml", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public final class GenerateOzoneRequiredConfigurations extends GenericCli { +public final class GenerateOzoneRequiredConfigurations extends GenericCli implements Callable { @Parameters(arity = "1..1", description = "Directory path where ozone-site file should be generated.") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java index f19548a1fa7..b6b5cc989b9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java @@ -25,6 +25,7 @@ import picocli.CommandLine; import java.util.Collections; +import java.util.concurrent.Callable; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -33,7 +34,7 @@ */ @CommandLine.Command(name = "ozone checknative", description = "Checks if native libraries are loaded") -public class CheckNative extends GenericCli { +public class CheckNative extends GenericCli implements Callable { public static void main(String[] argv) { new CheckNative().run(argv); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java index be8b4ceed17..d1a3518769f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java @@ -19,13 +19,11 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExtendedUserAccessIdInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; -import org.jooq.tools.StringUtils; import picocli.CommandLine; import java.io.IOException; @@ -38,9 +36,6 @@ description = "Get tenant related information of a user") public class GetUserInfoHandler extends TenantHandler { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "User name (principal)", arity = "1..1") private String userPrincipal; @@ -52,11 +47,6 @@ public class GetUserInfoHandler extends TenantHandler { protected void execute(OzoneClient client, OzoneAddress address) throws IOException { - if (StringUtils.isEmpty(userPrincipal)) { - GenericCli.missingSubcommand(spec); - return; - } - final TenantUserInfoValue tenantUserInfo = client.getObjectStore().tenantGetUserInfo(userPrincipal); final List accessIdInfoList = From 6b8b844dfa7eb30df29ab849e834e0a00d3bc97e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 7 Jan 2025 07:42:06 +0100 Subject: [PATCH 07/15] HDDS-12009. Merge FSORepairTool and FSORepairCLI (#7639) --- .../ozone/repair/om/TestFSORepairTool.java | 25 +- .../hadoop/ozone/repair/om/FSORepairCLI.java | 78 -- .../hadoop/ozone/repair/om/FSORepairTool.java | 752 +++++++++--------- .../hadoop/ozone/repair/om/OMRepair.java | 2 +- 4 files changed, 404 insertions(+), 453 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java index d37f8ce57fb..fb6472d7bc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.repair.OzoneRepair; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -89,8 +88,8 @@ public class TestFSORepairTool { private static FSORepairTool.Report fullReport; private static FSORepairTool.Report emptyReport; - private GenericTestUtils.PrintStreamCapturer out; - private GenericTestUtils.PrintStreamCapturer err; + private static GenericTestUtils.PrintStreamCapturer out; + private static GenericTestUtils.PrintStreamCapturer err; @BeforeAll public static void setup() throws Exception { @@ -103,6 +102,8 @@ public static void setup() throws Exception { conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); fs = FileSystem.get(conf); + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); cmd = new OzoneRepair().getCmd(); dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); @@ -147,19 +148,13 @@ public static void setup() throws Exception { @BeforeEach public void init() throws Exception { - out = GenericTestUtils.captureOut(); - err = GenericTestUtils.captureErr(); - } - - @AfterEach - public void clean() throws Exception { - // reset stream after each unit test - IOUtils.closeQuietly(out, err); + out.reset(); + err.reset(); } @AfterAll public static void reset() throws IOException { - IOUtils.closeQuietly(fs, client, cluster); + IOUtils.closeQuietly(fs, client, cluster, out, err); } /** @@ -239,7 +234,7 @@ public void testNonExistentBucket() { // When a non-existent bucket filter is passed int exitCode = dryRun("--volume", "/vol1", "--bucket", "bucket3"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("Bucket 'bucket3' does not exist in volume '/vol1'."); } @@ -249,7 +244,7 @@ public void testNonExistentVolume() { // When a non-existent volume filter is passed int exitCode = dryRun("--volume", "/vol5"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("Volume '/vol5' does not exist."); } @@ -259,7 +254,7 @@ public void testBucketFilterWithoutVolume() { // When bucket filter is passed without the volume filter. int exitCode = dryRun("--bucket", "bucket1"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("--bucket flag cannot be used without specifying --volume."); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java deleted file mode 100644 index fd6d75c7136..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.om; - -import org.apache.hadoop.ozone.repair.RepairTool; -import picocli.CommandLine; - -/** - * Parser for scm.db file. - */ -@CommandLine.Command( - name = "fso-tree", - description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + - "OM should be stopped while this tool is run." -) -public class FSORepairCLI extends RepairTool { - - @CommandLine.Option(names = {"--db"}, - required = true, - description = "Path to OM RocksDB") - private String dbPath; - - @CommandLine.Option(names = {"-r", "--repair"}, - defaultValue = "false", - description = "Run in repair mode to move unreferenced files and directories to deleted tables.") - private boolean repair; - - @CommandLine.Option(names = {"-v", "--volume"}, - description = "Filter by volume name. Add '/' before the volume name.") - private String volume; - - @CommandLine.Option(names = {"-b", "--bucket"}, - description = "Filter by bucket name") - private String bucket; - - @CommandLine.Option(names = {"--verbose"}, - description = "Verbose output. Show all intermediate steps and deleted keys info.") - private boolean verbose; - - @Override - public void execute() throws Exception { - if (checkIfServiceIsRunning("OM")) { - return; - } - if (repair) { - info("FSO Repair Tool is running in repair mode"); - } else { - info("FSO Repair Tool is running in debug mode"); - } - try { - FSORepairTool - repairTool = new FSORepairTool(dbPath, repair, volume, bucket, verbose); - repairTool.run(); - } catch (Exception ex) { - throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); - } - - if (verbose) { - info("FSO repair finished."); - } - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java index 7e0fb23f5aa..a4068415db6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -36,9 +36,11 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.repair.RepairTool; import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import picocli.CommandLine; import java.io.File; import java.io.IOException; @@ -69,402 +71,471 @@ * The tool is idempotent. reachable.db will not be deleted automatically when the tool finishes, * in case users want to manually inspect it. It can be safely deleted once the tool finishes. */ -public class FSORepairTool { +@CommandLine.Command( + name = "fso-tree", + description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + + "OM should be stopped while this tool is run." +) +public class FSORepairTool extends RepairTool { public static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class); - - private final String omDBPath; - private final DBStore store; - private final Table volumeTable; - private final Table bucketTable; - private final Table directoryTable; - private final Table fileTable; - private final Table deletedDirectoryTable; - private final Table deletedTable; - private final Table snapshotInfoTable; - private final String volumeFilter; - private final String bucketFilter; private static final String REACHABLE_TABLE = "reachable"; - private DBStore reachableDB; - private final ReportStatistics reachableStats; - private final ReportStatistics unreachableStats; - private final ReportStatistics unreferencedStats; - private final boolean repair; - private final boolean verbose; - - public FSORepairTool(String dbPath, boolean repair, String volume, String bucket, boolean verbose) - throws IOException { - this(getStoreFromPath(dbPath), dbPath, repair, volume, bucket, verbose); - } - /** - * Allows passing RocksDB instance from a MiniOzoneCluster directly to this class for testing. - */ - public FSORepairTool(DBStore dbStore, String dbPath, boolean repair, String volume, String bucket, boolean verbose) - throws IOException { - this.reachableStats = new ReportStatistics(0, 0, 0); - this.unreachableStats = new ReportStatistics(0, 0, 0); - this.unreferencedStats = new ReportStatistics(0, 0, 0); - - this.store = dbStore; - this.omDBPath = dbPath; - this.repair = repair; - this.volumeFilter = volume; - this.bucketFilter = bucket; - this.verbose = verbose; - volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, - String.class, - OmVolumeArgs.class); - bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, - String.class, - OmBucketInfo.class); - directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, - OmDirectoryInfo.class); - fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, - String.class, - OmKeyInfo.class); - deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, - OmKeyInfo.class); - deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, - String.class, - RepeatedOmKeyInfo.class); - snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, - SnapshotInfo.class); - } + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Path to OM RocksDB") + private String omDBPath; - protected static DBStore getStoreFromPath(String dbPath) throws IOException { - File omDBFile = new File(dbPath); - if (!omDBFile.exists() || !omDBFile.isDirectory()) { - throw new IOException(String.format("Specified OM DB instance %s does " + - "not exist or is not a RocksDB directory.", dbPath)); - } - // Load RocksDB and tables needed. - return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); - } + @CommandLine.Option(names = {"-r", "--repair"}, + defaultValue = "false", + description = "Run in repair mode to move unreferenced files and directories to deleted tables.") + private boolean repair; + + @CommandLine.Option(names = {"-v", "--volume"}, + description = "Filter by volume name. Add '/' before the volume name.") + private String volumeFilter; - public FSORepairTool.Report run() throws Exception { + @CommandLine.Option(names = {"-b", "--bucket"}, + description = "Filter by bucket name") + private String bucketFilter; + + @CommandLine.Option(names = {"--verbose"}, + description = "Verbose output. Show all intermediate steps and deleted keys info.") + private boolean verbose; + + @Override + public void execute() throws Exception { + if (checkIfServiceIsRunning("OM")) { + return; + } + if (repair) { + info("FSO Repair Tool is running in repair mode"); + } else { + info("FSO Repair Tool is running in debug mode"); + } try { - if (bucketFilter != null && volumeFilter == null) { - System.out.println("--bucket flag cannot be used without specifying --volume."); - return null; - } + Impl repairTool = new Impl(); + repairTool.run(); + } catch (Exception ex) { + throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); + } - if (volumeFilter != null) { - OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); - if (volumeArgs == null) { - System.out.println("Volume '" + volumeFilter + "' does not exist."); + if (verbose) { + info("FSO repair finished."); + } + } + + private class Impl { + + private final DBStore store; + private final Table volumeTable; + private final Table bucketTable; + private final Table directoryTable; + private final Table fileTable; + private final Table deletedDirectoryTable; + private final Table deletedTable; + private final Table snapshotInfoTable; + private DBStore reachableDB; + private final ReportStatistics reachableStats; + private final ReportStatistics unreachableStats; + private final ReportStatistics unreferencedStats; + + Impl() throws IOException { + this.reachableStats = new ReportStatistics(0, 0, 0); + this.unreachableStats = new ReportStatistics(0, 0, 0); + this.unreferencedStats = new ReportStatistics(0, 0, 0); + + this.store = getStoreFromPath(omDBPath); + volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, + String.class, + OmVolumeArgs.class); + bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, + String.class, + OmBucketInfo.class); + directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, + String.class, + OmDirectoryInfo.class); + fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, + String.class, + OmKeyInfo.class); + deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, + String.class, + OmKeyInfo.class); + deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, + String.class, + RepeatedOmKeyInfo.class); + snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, + String.class, + SnapshotInfo.class); + } + + public Report run() throws Exception { + try { + if (bucketFilter != null && volumeFilter == null) { + error("--bucket flag cannot be used without specifying --volume."); return null; } - } - // Iterate all volumes or a specific volume if specified - try (TableIterator> - volumeIterator = volumeTable.iterator()) { - try { - openReachableDB(); - } catch (IOException e) { - System.out.println("Failed to open reachable database: " + e.getMessage()); - throw e; + if (volumeFilter != null) { + OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); + if (volumeArgs == null) { + error("Volume '" + volumeFilter + "' does not exist."); + return null; + } } - while (volumeIterator.hasNext()) { - Table.KeyValue volumeEntry = volumeIterator.next(); - String volumeKey = volumeEntry.getKey(); - if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { - continue; + // Iterate all volumes or a specific volume if specified + try (TableIterator> + volumeIterator = volumeTable.iterator()) { + try { + openReachableDB(); + } catch (IOException e) { + error("Failed to open reachable database: " + e.getMessage()); + throw e; } + while (volumeIterator.hasNext()) { + Table.KeyValue volumeEntry = volumeIterator.next(); + String volumeKey = volumeEntry.getKey(); - System.out.println("Processing volume: " + volumeKey); - - if (bucketFilter != null) { - OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); - if (bucketInfo == null) { - //Bucket does not exist in the volume - System.out.println("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); - return null; - } - - if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - System.out.println("Skipping non-FSO bucket " + bucketFilter); + if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { continue; } - processBucket(volumeEntry.getValue(), bucketInfo); - } else { + info("Processing volume: " + volumeKey); - // Iterate all buckets in the volume. - try (TableIterator> - bucketIterator = bucketTable.iterator()) { - bucketIterator.seek(volumeKey); - while (bucketIterator.hasNext()) { - Table.KeyValue bucketEntry = bucketIterator.next(); - String bucketKey = bucketEntry.getKey(); - OmBucketInfo bucketInfo = bucketEntry.getValue(); - - if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - System.out.println("Skipping non-FSO bucket " + bucketKey); - continue; - } + if (bucketFilter != null) { + OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); + if (bucketInfo == null) { + //Bucket does not exist in the volume + error("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); + return null; + } - // Stop this loop once we have seen all buckets in the current - // volume. - if (!bucketKey.startsWith(volumeKey)) { - break; - } + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + info("Skipping non-FSO bucket " + bucketFilter); + continue; + } - processBucket(volumeEntry.getValue(), bucketInfo); + processBucket(volumeEntry.getValue(), bucketInfo); + } else { + + // Iterate all buckets in the volume. + try (TableIterator> + bucketIterator = bucketTable.iterator()) { + bucketIterator.seek(volumeKey); + while (bucketIterator.hasNext()) { + Table.KeyValue bucketEntry = bucketIterator.next(); + String bucketKey = bucketEntry.getKey(); + OmBucketInfo bucketInfo = bucketEntry.getValue(); + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + info("Skipping non-FSO bucket " + bucketKey); + continue; + } + + // Stop this loop once we have seen all buckets in the current + // volume. + if (!bucketKey.startsWith(volumeKey)) { + break; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } } } } } + } catch (IOException e) { + error("An error occurred while processing" + e.getMessage()); + throw e; + } finally { + closeReachableDB(); + store.close(); } - } catch (IOException e) { - System.out.println("An error occurred while processing" + e.getMessage()); - throw e; - } finally { - closeReachableDB(); - store.close(); + + return buildReportAndLog(); } - return buildReportAndLog(); - } + private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { + if (snapshotInfoTable == null) { + return false; + } - private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { - if (snapshotInfoTable == null) { + try (TableIterator> iterator = + snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + SnapshotInfo snapshotInfo = iterator.next().getValue(); + String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); + if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { + return true; + } + } + } return false; } - try (TableIterator> iterator = - snapshotInfoTable.iterator()) { - while (iterator.hasNext()) { - SnapshotInfo snapshotInfo = iterator.next().getValue(); - String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); - if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { - return true; + private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { + info("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); + if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { + if (!repair) { + info( + "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); + } else { + info( + "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + + "due to snapshot presence."); + return; } } + markReachableObjectsInBucket(volume, bucketInfo); + handleUnreachableAndUnreferencedObjects(volume, bucketInfo); } - return false; - } - private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { - System.out.println("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); - if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { - if (!repair) { - System.out.println( - "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); - } else { - System.out.println( - "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + - "due to snapshot presence."); - return; - } + private Report buildReportAndLog() { + Report report = new Report.Builder() + .setReachable(reachableStats) + .setUnreachable(unreachableStats) + .setUnreferenced(unreferencedStats) + .build(); + + info("\n" + report); + return report; } - markReachableObjectsInBucket(volume, bucketInfo); - handleUnreachableAndUnreferencedObjects(volume, bucketInfo); - } - private Report buildReportAndLog() { - Report report = new Report.Builder() - .setReachable(reachableStats) - .setUnreachable(unreachableStats) - .setUnreferenced(unreferencedStats) - .build(); + private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Only put directories in the stack. + // Directory keys should have the form /volumeID/bucketID/parentID/name. + Stack dirKeyStack = new Stack<>(); - System.out.println("\n" + report); - return report; - } + // Since the tool uses parent directories to check for reachability, add + // a reachable entry for the bucket as well. + addReachableEntry(volume, bucket, bucket); + // Initialize the stack with all immediate child directories of the + // bucket, and mark them all as reachable. + Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); + dirKeyStack.addAll(childDirs); + + while (!dirKeyStack.isEmpty()) { + // Get one directory and process its immediate children. + String currentDirKey = dirKeyStack.pop(); + OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); + if (currentDir == null) { + info("Directory key" + currentDirKey + "to be processed was not found in the directory table."); + continue; + } - private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { - // Only put directories in the stack. - // Directory keys should have the form /volumeID/bucketID/parentID/name. - Stack dirKeyStack = new Stack<>(); - - // Since the tool uses parent directories to check for reachability, add - // a reachable entry for the bucket as well. - addReachableEntry(volume, bucket, bucket); - // Initialize the stack with all immediate child directories of the - // bucket, and mark them all as reachable. - Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); - dirKeyStack.addAll(childDirs); - - while (!dirKeyStack.isEmpty()) { - // Get one directory and process its immediate children. - String currentDirKey = dirKeyStack.pop(); - OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); - if (currentDir == null) { - System.out.println("Directory key" + currentDirKey + "to be processed was not found in the directory table."); - continue; + // TODO revisit this for a more memory efficient implementation, + // possibly making better use of RocksDB iterators. + childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); + dirKeyStack.addAll(childDirs); } + } - // TODO revisit this for a more memory efficient implementation, - // possibly making better use of RocksDB iterators. - childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); - dirKeyStack.addAll(childDirs); + private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { + return deletedDirectoryTable.isExist(dirKey); } - } - private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { - return deletedDirectoryTable.isExist(dirKey); - } + private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { + return deletedTable.isExist(fileKey); + } - private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { - return deletedTable.isExist(fileKey); - } + private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Check for unreachable and unreferenced directories in the bucket. + String bucketPrefix = OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID(); - private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { - // Check for unreachable and unreferenced directories in the bucket. - String bucketPrefix = OM_KEY_PREFIX + - volume.getObjectID() + - OM_KEY_PREFIX + - bucket.getObjectID(); - - try (TableIterator> dirIterator = - directoryTable.iterator()) { - dirIterator.seek(bucketPrefix); - while (dirIterator.hasNext()) { - Table.KeyValue dirEntry = dirIterator.next(); - String dirKey = dirEntry.getKey(); - - // Only search directories in this bucket. - if (!dirKey.startsWith(bucketPrefix)) { - break; - } + try (TableIterator> dirIterator = + directoryTable.iterator()) { + dirIterator.seek(bucketPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue dirEntry = dirIterator.next(); + String dirKey = dirEntry.getKey(); + + // Only search directories in this bucket. + if (!dirKey.startsWith(bucketPrefix)) { + break; + } - if (!isReachable(dirKey)) { - if (!isDirectoryInDeletedDirTable(dirKey)) { - System.out.println("Found unreferenced directory: " + dirKey); - unreferencedStats.addDir(); + if (!isReachable(dirKey)) { + if (!isDirectoryInDeletedDirTable(dirKey)) { + info("Found unreferenced directory: " + dirKey); + unreferencedStats.addDir(); - if (!repair) { - if (verbose) { - System.out.println("Marking unreferenced directory " + dirKey + " for deletion."); + if (!repair) { + if (verbose) { + info("Marking unreferenced directory " + dirKey + " for deletion."); + } + } else { + info("Deleting unreferenced directory " + dirKey); + OmDirectoryInfo dirInfo = dirEntry.getValue(); + markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); } } else { - System.out.println("Deleting unreferenced directory " + dirKey); - OmDirectoryInfo dirInfo = dirEntry.getValue(); - markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); + unreachableStats.addDir(); } - } else { - unreachableStats.addDir(); } } } - } - // Check for unreachable and unreferenced files - try (TableIterator> - fileIterator = fileTable.iterator()) { - fileIterator.seek(bucketPrefix); - while (fileIterator.hasNext()) { - Table.KeyValue fileEntry = fileIterator.next(); - String fileKey = fileEntry.getKey(); - // Only search files in this bucket. - if (!fileKey.startsWith(bucketPrefix)) { - break; - } + // Check for unreachable and unreferenced files + try (TableIterator> + fileIterator = fileTable.iterator()) { + fileIterator.seek(bucketPrefix); + while (fileIterator.hasNext()) { + Table.KeyValue fileEntry = fileIterator.next(); + String fileKey = fileEntry.getKey(); + // Only search files in this bucket. + if (!fileKey.startsWith(bucketPrefix)) { + break; + } - OmKeyInfo fileInfo = fileEntry.getValue(); - if (!isReachable(fileKey)) { - if (!isFileKeyInDeletedTable(fileKey)) { - System.out.println("Found unreferenced file: " + fileKey); - unreferencedStats.addFile(fileInfo.getDataSize()); + OmKeyInfo fileInfo = fileEntry.getValue(); + if (!isReachable(fileKey)) { + if (!isFileKeyInDeletedTable(fileKey)) { + info("Found unreferenced file: " + fileKey); + unreferencedStats.addFile(fileInfo.getDataSize()); - if (!repair) { - if (verbose) { - System.out.println("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + if (!repair) { + if (verbose) { + info("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + } + } else { + info("Deleting unreferenced file " + fileKey); + markFileForDeletion(fileKey, fileInfo); } } else { - System.out.println("Deleting unreferenced file " + fileKey); - markFileForDeletion(fileKey, fileInfo); + unreachableStats.addFile(fileInfo.getDataSize()); } } else { - unreachableStats.addFile(fileInfo.getDataSize()); + // NOTE: We are deserializing the proto of every reachable file + // just to log it's size. If we don't need this information we could + // save time by skipping this step. + reachableStats.addFile(fileInfo.getDataSize()); } - } else { - // NOTE: We are deserializing the proto of every reachable file - // just to log it's size. If we don't need this information we could - // save time by skipping this step. - reachableStats.addFile(fileInfo.getDataSize()); } } } - } - protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { - try (BatchOperation batch = store.initBatchOperation()) { - fileTable.deleteWithBatch(batch, fileKey); - - RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); - RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - fileInfo, fileInfo.getUpdateID(), true); - // NOTE: The FSO code seems to write the open key entry with the whole - // path, using the object's names instead of their ID. This would only - // be possible when the file is deleted explicitly, and not part of a - // directory delete. It is also not possible here if the file's parent - // is gone. The name of the key does not matter so just use IDs. - deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); - if (verbose) { - System.out.println("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + fileTable.deleteWithBatch(batch, fileKey); + + RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); + RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + fileInfo, fileInfo.getUpdateID(), true); + // NOTE: The FSO code seems to write the open key entry with the whole + // path, using the object's names instead of their ID. This would only + // be possible when the file is deleted explicitly, and not part of a + // directory delete. It is also not possible here if the file's parent + // is gone. The name of the key does not matter so just use IDs. + deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); + if (verbose) { + info("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + } + store.commitBatchOperation(batch); } - store.commitBatchOperation(batch); } - } - protected void markDirectoryForDeletion(String volumeName, String bucketName, - String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { - try (BatchOperation batch = store.initBatchOperation()) { - directoryTable.deleteWithBatch(batch, dirKeyName); - // HDDS-7592: Make directory entries in deleted dir table unique. - String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); + protected void markDirectoryForDeletion(String volumeName, String bucketName, + String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + directoryTable.deleteWithBatch(batch, dirKeyName); + // HDDS-7592: Make directory entries in deleted dir table unique. + String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); - // Convert the directory to OmKeyInfo for deletion. - OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); - deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); + // Convert the directory to OmKeyInfo for deletion. + OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); + deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); - store.commitBatchOperation(batch); + store.commitBatchOperation(batch); + } } - } - private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, - WithObjectID currentDir) throws IOException { - - Collection childDirs = new ArrayList<>(); - - try (TableIterator> - dirIterator = directoryTable.iterator()) { - String dirPrefix = buildReachableKey(volume, bucket, currentDir); - // Start searching the directory table at the current directory's - // prefix to get its immediate children. - dirIterator.seek(dirPrefix); - while (dirIterator.hasNext()) { - Table.KeyValue childDirEntry = dirIterator.next(); - String childDirKey = childDirEntry.getKey(); - // Stop processing once we have seen all immediate children of this - // directory. - if (!childDirKey.startsWith(dirPrefix)) { - break; + private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, + WithObjectID currentDir) throws IOException { + + Collection childDirs = new ArrayList<>(); + + try (TableIterator> + dirIterator = directoryTable.iterator()) { + String dirPrefix = buildReachableKey(volume, bucket, currentDir); + // Start searching the directory table at the current directory's + // prefix to get its immediate children. + dirIterator.seek(dirPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue childDirEntry = dirIterator.next(); + String childDirKey = childDirEntry.getKey(); + // Stop processing once we have seen all immediate children of this + // directory. + if (!childDirKey.startsWith(dirPrefix)) { + break; + } + // This directory was reached by search. + addReachableEntry(volume, bucket, childDirEntry.getValue()); + childDirs.add(childDirKey); + reachableStats.addDir(); } - // This directory was reached by search. - addReachableEntry(volume, bucket, childDirEntry.getValue()); - childDirs.add(childDirKey); - reachableStats.addDir(); } + + return childDirs; + } + + /** + * Add the specified object to the reachable table, indicating it is part + * of the connected FSO tree. + */ + private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { + String reachableKey = buildReachableKey(volume, bucket, object); + // No value is needed for this table. + reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + } + + /** + * @param fileOrDirKey The key of a file or directory in RocksDB. + * @return true if the entry's parent is in the reachable table. + */ + protected boolean isReachable(String fileOrDirKey) throws IOException { + String reachableParentKey = buildReachableParentKey(fileOrDirKey); + + return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; + } + + private void openReachableDB() throws IOException { + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + info("Creating database of reachable directories at " + reachableDBFile); + // Delete the DB from the last run if it exists. + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + + ConfigurationSource conf = new OzoneConfiguration(); + reachableDB = DBStoreBuilder.newBuilder(conf) + .setName("reachable.db") + .setPath(reachableDBFile.getParentFile().toPath()) + .addTable(REACHABLE_TABLE) + .build(); } - return childDirs; + private void closeReachableDB() throws IOException { + if (reachableDB != null) { + reachableDB.close(); + } + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + } } - /** - * Add the specified object to the reachable table, indicating it is part - * of the connected FSO tree. - */ - private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { - String reachableKey = buildReachableKey(volume, bucket, object); - // No value is needed for this table. - reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + protected static DBStore getStoreFromPath(String dbPath) throws IOException { + File omDBFile = new File(dbPath); + if (!omDBFile.exists() || !omDBFile.isDirectory()) { + throw new IOException(String.format("Specified OM DB instance %s does " + + "not exist or is not a RocksDB directory.", dbPath)); + } + // Load RocksDB and tables needed. + return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); } /** @@ -480,17 +551,6 @@ private static String buildReachableKey(OmVolumeArgs volume, OmBucketInfo bucket object.getObjectID(); } - /** - * - * @param fileOrDirKey The key of a file or directory in RocksDB. - * @return true if the entry's parent is in the reachable table. - */ - protected boolean isReachable(String fileOrDirKey) throws IOException { - String reachableParentKey = buildReachableParentKey(fileOrDirKey); - - return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; - } - /** * Build an entry in the reachable table for the current object's parent * object. The object could be a file or directory. @@ -512,32 +572,6 @@ private static String buildReachableParentKey(String fileOrDirKey) { parentID; } - private void openReachableDB() throws IOException { - File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); - System.out.println("Creating database of reachable directories at " + reachableDBFile); - // Delete the DB from the last run if it exists. - if (reachableDBFile.exists()) { - FileUtils.deleteDirectory(reachableDBFile); - } - - ConfigurationSource conf = new OzoneConfiguration(); - reachableDB = DBStoreBuilder.newBuilder(conf) - .setName("reachable.db") - .setPath(reachableDBFile.getParentFile().toPath()) - .addTable(REACHABLE_TABLE) - .build(); - } - - private void closeReachableDB() throws IOException { - if (reachableDB != null) { - reachableDB.close(); - } - File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); - if (reachableDBFile.exists()) { - FileUtils.deleteDirectory(reachableDBFile); - } - } - /** * Define a Report to be created. */ @@ -549,19 +583,19 @@ public static class Report { /** * Builds one report that is the aggregate of multiple others. */ - public Report(FSORepairTool.Report... reports) { + public Report(Report... reports) { reachable = new ReportStatistics(); unreachable = new ReportStatistics(); unreferenced = new ReportStatistics(); - for (FSORepairTool.Report report : reports) { + for (Report report : reports) { reachable.add(report.reachable); unreachable.add(report.unreachable); unreferenced.add(report.unreferenced); } } - private Report(FSORepairTool.Report.Builder builder) { + private Report(Report.Builder builder) { this.reachable = builder.reachable; this.unreachable = builder.unreachable; this.unreferenced = builder.unreferenced; @@ -591,7 +625,7 @@ public boolean equals(Object other) { if (other == null || getClass() != other.getClass()) { return false; } - FSORepairTool.Report report = (FSORepairTool.Report) other; + Report report = (Report) other; // Useful for testing. System.out.println("Comparing reports\nExpect:\n" + this + "\nActual:\n" + report); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index 3b880f87543..9e20f6b9d1f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -27,7 +27,7 @@ */ @CommandLine.Command(name = "om", subcommands = { - FSORepairCLI.class, + FSORepairTool.class, SnapshotRepair.class, TransactionInfoRepair.class }, From 8a774a57df907c1e5c6c274054cfde21f914a33b Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 15:15:26 +0800 Subject: [PATCH 08/15] HDDS-11989. Enable SCM Ratis in tests related to DeletedBlockLog (#7615) --- .../hdds/scm/TestStorageContainerManager.java | 12 +---- .../apache/hadoop/ozone/OzoneTestUtils.java | 33 +++++++++++++ .../rpc/TestDeleteWithInAdequateDN.java | 5 ++ .../commandhandler/TestBlockDeletion.java | 48 ++++++++++--------- .../TestDeleteContainerHandler.java | 8 ++++ 5 files changed, 72 insertions(+), 34 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 94c8f914294..47f6d3823d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -321,17 +321,7 @@ public void testBlockDeletionTransactions() throws Exception { // after sometime, all the TX should be proceed and by then // the number of containerBlocks of all known containers will be // empty again. - GenericTestUtils.waitFor(() -> { - try { - if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) { - cluster.getStorageContainerManager().getScmHAManager() - .asSCMHADBTransactionBuffer().flush(); - } - return delLog.getNumOfValidTransactions() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 22000); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks)); // Continue the work, add some TXs that with known container names, // but unknown block IDs. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 884e435d25e..0a5f7114c40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -161,4 +161,37 @@ public static void closeContainer(StorageContainerManager scm, container.getState() == HddsProtos.LifeCycleState.CLOSED, 200, 30000); } + + /** + * Flush deleted block log & wait till something was flushed. + */ + public static void flushAndWaitForDeletedBlockLog(StorageContainerManager scm) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() > 0) { + return true; + } + } catch (IOException e) { + } + return false; + }, 100, 3000); + } + + /** + * Wait till all blocks are removed. + */ + public static void waitBlockDeleted(StorageContainerManager scm) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() == 0) { + return true; + } + } catch (IOException e) { + } + return false; + }, 1000, 60000); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index bb42d8a0f57..2b199306b76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; @@ -65,6 +66,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -73,6 +75,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.BeforeAll; @@ -103,6 +106,7 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -281,6 +285,7 @@ void testDeleteKeyWithInAdequateDN() throws Exception { //cluster.getOzoneManager().deleteKey(keyArgs); client.getObjectStore().getVolume(volumeName).getBucket(bucketName). deleteKey("ratis"); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); // make sure the chunk was never deleted on the leader even though // deleteBlock handler is invoked diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index cf7d26847bb..e38312e02e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; @@ -95,6 +94,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -133,6 +133,7 @@ public void init() throws Exception { GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set("ozone.replication.allowed-configs", "^(RATIS/THREE)|(EC/2-1-256k)$"); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, @@ -239,6 +240,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // verify key blocks were created in DN. GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyBlocksCreated(omKeyLocationInfoGroupList); return true; } catch (Throwable t) { @@ -283,6 +285,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // The blocks should be deleted in the DN. GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyBlocksDeleted(omKeyLocationInfoGroupList); return true; } catch (Throwable t) { @@ -299,6 +302,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // Verify transactions committed GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyTransactionsCommitted(); return true; } catch (Throwable t) { @@ -380,10 +384,16 @@ public void testContainerStatisticsAfterDelete() throws Exception { writeClient.deleteKey(keyArgs); // Wait for blocks to be deleted and container reports to be processed - GenericTestUtils.waitFor(() -> - scm.getContainerManager().getContainers().stream() - .allMatch(c -> c.getUsedBytes() == 0 && - c.getNumberOfKeys() == 0), 500, 20000); + GenericTestUtils.waitFor(() -> { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return scm.getContainerManager().getContainers().stream() + .allMatch(c -> c.getUsedBytes() == 0 && + c.getNumberOfKeys() == 0); + }, 500, 20000); Thread.sleep(5000); // Verify that pending block delete num are as expected with resent cmds cluster.getHddsDatanodes().forEach(dn -> { @@ -425,6 +435,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -516,14 +527,14 @@ public void testContainerStateAfterDNRestart() throws Exception { GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); return scm.getContainerManager().getContainerReplicas( containerId).stream(). allMatch(replica -> replica.isEmpty()); - } catch (ContainerNotFoundException e) { + } catch (IOException e) { throw new RuntimeException(e); } - }, - 100, 10 * 1000); + }, 100, 10 * 1000); // Container state should be empty now as key got deleted assertTrue(getContainerFromDN( @@ -546,6 +557,7 @@ public void testContainerStateAfterDNRestart() throws Exception { assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -560,7 +572,6 @@ public void testContainerStateAfterDNRestart() throws Exception { } return true; }, 500, 30000); - LOG.info(metrics.toString()); } /** @@ -646,14 +657,14 @@ public void testContainerDeleteWithInvalidKeyCount() // Ensure isEmpty are true for all replica after delete key GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); return scm.getContainerManager().getContainerReplicas( containerId).stream() .allMatch(replica -> replica.isEmpty()); - } catch (ContainerNotFoundException e) { + } catch (IOException e) { throw new RuntimeException(e); } - }, - 500, 5 * 2000); + }, 500, 5 * 2000); // Update container replica by making invalid keyCount in one replica ContainerReplica replicaOne = ContainerReplica.newBuilder() @@ -683,6 +694,7 @@ public void testContainerDeleteWithInvalidKeyCount() assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -812,17 +824,7 @@ public void testBlockDeleteCommandParallelProcess() throws Exception { } // Wait for block delete command sent from OM - GenericTestUtils.waitFor(() -> { - try { - if (scm.getScmBlockManager().getDeletedBlockLog() - .getNumOfValidTransactions() > 0) { - return true; - } - } catch (IOException e) { - } - return false; - }, 100, 5000); - + OzoneTestUtils.flushAndWaitForDeletedBlockLog(scm); long start = System.currentTimeMillis(); // Wait for all blocks been deleted. GenericTestUtils.waitFor(() -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 192c933f53c..705ef1e0d86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -75,6 +75,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,6 +98,7 @@ public class TestDeleteContainerHandler { @BeforeAll public static void setup() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); @@ -196,6 +198,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted and container is empty GenericTestUtils.waitFor(() -> getContainerfromDN( @@ -313,6 +317,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted and container is empty GenericTestUtils.waitFor(() -> getContainerfromDN( @@ -652,6 +658,8 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted GenericTestUtils.waitFor(() -> getContainerfromDN( From 44ba9a3f5d689d003cc8770ad62815d04d2596a2 Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 16:14:24 +0800 Subject: [PATCH 09/15] HDDS-12023. Enable SCM Ratis in TestContainerCommandsEC (#7650) --- .../hadoop/hdds/scm/storage/TestContainerCommandsEC.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 1b7eb837cf8..bf40a600e29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -70,6 +70,7 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ec.reconstruction.ECContainerOperationClient; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; @@ -170,6 +171,7 @@ public class TestContainerCommandsEC { @BeforeAll public static void init() throws Exception { config = new OzoneConfiguration(); + config.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); config.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); config.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); config.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); @@ -320,8 +322,10 @@ public void testOrphanBlock() throws Exception { .setTxID(1L) .setCount(10) .build())); - dn2Service.getDatanodeStateMachine().getContext() - .addCommand(deleteBlocksCommand); + StateContext context = dn2Service.getDatanodeStateMachine().getContext(); + deleteBlocksCommand.setTerm(context.getTermOfLeaderSCM().isPresent() ? + context.getTermOfLeaderSCM().getAsLong() : 0); + context.addCommand(deleteBlocksCommand); try (XceiverClientGrpc client = new XceiverClientGrpc( createSingleNodePipeline(orphanPipeline, dn2, 1), cluster.getConf())) { From e8d96f422efe094b9191dc2d65459a29e8a8faac Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 17:26:12 +0800 Subject: [PATCH 10/15] HDDS-12022. Enable SCM Ratis in TestStorageContainerManager (#7651) --- .../hdds/scm/TestStorageContainerManager.java | 71 ++++++------------- 1 file changed, 22 insertions(+), 49 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 47f6d3823d2..14df7670f67 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.RatisUtil; import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; @@ -92,7 +93,6 @@ import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -142,15 +142,12 @@ import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; @@ -191,11 +188,13 @@ public void cleanupDefaults() { public void testRpcPermission() throws Exception { // Test with default configuration OzoneConfiguration defaultConf = new OzoneConfiguration(); + defaultConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser"); // Test with ozone.administrators defined in configuration String admins = "adminUser1, adminUser2"; OzoneConfiguration ozoneConf = new OzoneConfiguration(); + ozoneConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins); // Non-admin user will get permission denied. // Admin user will pass the permission check. @@ -267,6 +266,7 @@ private void verifyPermissionDeniedException(Exception e, String userName) { public void testBlockDeletionTransactions() throws Exception { int numKeys = 5; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); DatanodeConfiguration datanodeConfiguration = conf.getObject( @@ -358,6 +358,7 @@ public void testBlockDeletionTransactions() throws Exception { @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); @@ -369,10 +370,13 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { cluster.waitForClusterToBeReady(); HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); StorageContainerManager scm = cluster.getStorageContainerManager(); + File dbDir = scm.getScmMetadataStore().getStore().getDbLocation(); scm.stop(); // re-initialise SCM with new clusterID + GenericTestUtils.deleteDirectory(new File(SCMHAUtils.getRatisStorageDir(conf))); + GenericTestUtils.deleteDirectory(dbDir); GenericTestUtils.deleteDirectory( new File(scm.getScmStorageConfig().getStorageDir())); String newClusterId = UUID.randomUUID().toString(); @@ -413,7 +417,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { datanode.getDatanodeDetails()); GenericTestUtils.waitFor( () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, - 5000); + 30000); ExitUtil.disableSystemExit(); // As part of processing response for re-register, DN EndpointStateMachine // goes to GET-VERSION state which checks if there is already existing @@ -432,6 +436,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { assertThat(versionEndPointTaskLog.getOutput()).contains( "org.apache.hadoop.ozone.common" + ".InconsistentStorageStateException: Mismatched ClusterIDs"); + scm.stop(); } } @@ -439,6 +444,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { public void testBlockDeletingThrottling() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -549,6 +555,7 @@ private Map> createDeleteTXLog( @Test public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -564,27 +571,13 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception { assertEquals(NodeType.SCM, scmStore.getNodeType()); assertEquals(testClusterId, scmStore.getClusterID()); assertTrue(scmStore.isSCMHAEnabled()); - } - - @Test - public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - Path scmPath = tempDir.resolve("scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); validateRatisGroupExists(conf, clusterId.toString()); } @Test public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file @@ -646,6 +639,7 @@ public static void validateRatisGroupExists(OzoneConfiguration conf, @Test void testSCMInitializationFailure(@TempDir Path tempDir) { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -658,15 +652,21 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); Path scmPath = tempDir.resolve("scm-meta"); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); scmStore.setClusterId(clusterId); scmStore.setScmId(scmId); + scmStore.setSCMHAFlag(true); // writes the version file properties scmStore.initialize(); + SCMRatisServerImpl.initialize(clusterId, scmId, + SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) + .getLocalNodeDetails(), conf); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); + scm.start(); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); assertEquals(clusterId, scmInfo.getClusterId()); @@ -684,6 +684,7 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { public void testScmProcessDatanodeHeartbeat() throws Exception { String rackName = "/rack1"; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), @@ -726,6 +727,7 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { public void testCloseContainerCommandOnRestart() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -943,35 +945,6 @@ public void testIncrementalContainerReportQueue() throws Exception { containerReportExecutors.close(); } - @Test - public void testNonRatisToRatis() - throws IOException, AuthenticationException, InterruptedException, - TimeoutException { - final OzoneConfiguration conf = new OzoneConfiguration(); - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build()) { - final StorageContainerManager nonRatisSCM = cluster - .getStorageContainerManager(); - assertNull(nonRatisSCM.getScmHAManager().getRatisServer()); - assertFalse(nonRatisSCM.getScmStorageConfig().isSCMHAEnabled()); - nonRatisSCM.stop(); - nonRatisSCM.join(); - - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, cluster.getClusterId()); - conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); - conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - cluster.restartStorageContainerManager(false); - - final StorageContainerManager ratisSCM = cluster - .getStorageContainerManager(); - assertNotNull(ratisSCM.getScmHAManager().getRatisServer()); - assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled()); - } - } - private void addTransactions(StorageContainerManager scm, DeletedBlockLog delLog, Map> containerBlocksMap) From 3dfd2410a04259e58afc99c08c89adb4abcea30b Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 7 Jan 2025 10:21:17 -0800 Subject: [PATCH 11/15] HDDS-11753. Deprecate file per chunk layout from datanode code. (#7654) --- .../container/common/impl/ContainerLayoutVersion.java | 3 ++- .../ozone/container/keyvalue/KeyValueHandler.java | 10 ++++++++++ .../ozone/container/keyvalue/TestKeyValueHandler.java | 8 ++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 210c538f274..99f56baa799 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -34,6 +34,7 @@ */ public enum ContainerLayoutVersion { + @Deprecated /* Use FILE_PER_BLOCK instead */ FILE_PER_CHUNK(1, "One file per chunk") { @Override public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { @@ -47,7 +48,7 @@ public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { } }; - private static final ContainerLayoutVersion + public static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; private static final List CONTAINER_LAYOUT_VERSIONS = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 9cae71e9baf..0ef8d5e68a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -124,6 +125,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.DEFAULT_LAYOUT; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; import org.apache.hadoop.util.Time; @@ -191,6 +193,14 @@ public KeyValueHandler(ConfigurationSource config, byteBufferToByteString = ByteStringConversion .createByteBufferConversion(isUnsafeByteBufferConversionEnabled); + + if (ContainerLayoutVersion.getConfiguredVersion(conf) == + ContainerLayoutVersion.FILE_PER_CHUNK) { + LOG.warn("FILE_PER_CHUNK layout is not supported. Falling back to default : {}.", + DEFAULT_LAYOUT.name()); + OzoneConfiguration.of(conf).set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, + DEFAULT_LAYOUT.name()); + } } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 655ecbb48b4..d02910358de 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -292,6 +293,13 @@ public void testVolumeSetInKeyValueHandler() throws Exception { keyValueHandler.getVolumeChoosingPolicyForTesting() .getClass().getName()); + // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK. + conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK"); + new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, + metrics, c -> { }); + assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, + conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); + //Set a class which is not of sub class of VolumeChoosingPolicy conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); From 984027cc250448418a253dc1f2ea3e8b596263de Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Wed, 8 Jan 2025 02:05:10 +0530 Subject: [PATCH 12/15] HDDS-12035. Enable sortpom in hdds-hadoop-dependency-server and -test (#7659) --- hadoop-hdds/erasurecode/pom.xml | 4 +- hadoop-hdds/hadoop-dependency-client/pom.xml | 2 +- hadoop-hdds/hadoop-dependency-server/pom.xml | 124 +++++++++---------- hadoop-hdds/hadoop-dependency-test/pom.xml | 30 ++--- 4 files changed, 76 insertions(+), 84 deletions(-) diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index bb98efe1894..b84b6e087c3 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -38,13 +38,13 @@ org.slf4j slf4j-api + + org.apache.ozone hdds-config test - - org.apache.ozone hdds-hadoop-dependency-test diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 276f6935584..c05614456e7 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -100,6 +100,7 @@ commons-beanutils commons-beanutils + commons-codec commons-codec @@ -152,7 +153,6 @@ org.apache.commons commons-lang3 - org.apache.commons commons-math3 diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 05923dab2cd..324b21ef668 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,65 +21,68 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop server dependencies - - Apache Ozone HDDS Hadoop Server dependencies jar + Apache Ozone HDDS Hadoop Server dependencies + Apache Ozone Distributed Data Store Hadoop server dependencies - true - true + + true + + com.nimbusds + nimbus-jose-jwt + + + + commons-cli + commons-cli + org.apache.hadoop hadoop-annotations org.apache.hadoop - hadoop-common + hadoop-auth ${hadoop.version} - com.nimbusds - nimbus-jose-jwt + ch.qos.reload4j + reload4j - org.xerial.snappy - snappy-java + log4j + log4j org.apache.curator * - org.apache.avro - avro + org.apache.kerby + kerb-simplekdc org.apache.zookeeper zookeeper - org.apache.commons + org.slf4j * + + + + org.apache.hadoop + hadoop-common + ${hadoop.version} + - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-jaxrs - - - org.codehaus.jackson - jackson-xc + ch.qos.reload4j + reload4j com.github.pjfanning @@ -93,32 +93,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jsr305 - com.sun.jersey - * + com.nimbusds + nimbus-jose-jwt - org.apache.kerby - kerb-simplekdc + com.sun.jersey + * log4j log4j - ch.qos.reload4j - reload4j + org.apache.avro + avro - org.slf4j + org.apache.commons * - - - - org.apache.hadoop - hadoop-auth - ${hadoop.version} - org.apache.curator * @@ -132,34 +125,41 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> zookeeper - log4j - log4j + org.codehaus.jackson + jackson-core-asl - ch.qos.reload4j - reload4j + org.codehaus.jackson + jackson-jaxrs + + + org.codehaus.jackson + jackson-mapper-asl + + + org.codehaus.jackson + jackson-xc org.slf4j * + + org.xerial.snappy + snappy-java + - - com.nimbusds - nimbus-jose-jwt - - - - commons-cli - commons-cli - org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + ch.qos.reload4j + reload4j + com.sun.jersey * @@ -168,17 +168,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty * - - org.fusesource.leveldbjni - leveldbjni-all - log4j log4j - ch.qos.reload4j - reload4j + org.fusesource.leveldbjni + leveldbjni-all org.slf4j diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 5df30c7dfdd..48bdff714fb 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,17 +21,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-test 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop test dependencies - - Apache Ozone HDDS Hadoop Test dependencies jar + Apache Ozone HDDS Hadoop Test dependencies + Apache Ozone Distributed Data Store Hadoop test dependencies - true - true + + true + + commons-codec + commons-codec + + + org.apache.commons + commons-compress + org.apache.hadoop hadoop-common @@ -59,14 +63,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - commons-codec - commons-codec - - - org.apache.commons - commons-compress - org.assertj From 2fc9c6e5f16724ef4bdc4ba4f9e1988e959a25cf Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Wed, 8 Jan 2025 10:03:33 +0530 Subject: [PATCH 13/15] HDDS-12034. Enable sortpom in hdds-interface-admin, -client and -server. (#7660) --- hadoop-hdds/interface-admin/pom.xml | 21 +++----- hadoop-hdds/interface-client/pom.xml | 78 +++++++++------------------- hadoop-hdds/interface-server/pom.xml | 58 +++++++-------------- 3 files changed, 53 insertions(+), 104 deletions(-) diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 94122423085..047db244faa 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-admin 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Admin interface - - Apache Ozone HDDS Admin Interface jar + Apache Ozone HDDS Admin Interface + Apache Ozone Distributed Data Store Admin interface - true - true - true + + true + + true @@ -72,9 +69,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ target/generated-sources/java false diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index b373d11d507..da6dec5cda4 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Client interface - - Apache Ozone HDDS Client Interface jar + Apache Ozone HDDS Client Interface + Apache Ozone Distributed Data Store Client interface - true - true - true + + true + + true @@ -40,6 +37,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + javax.annotation + javax.annotation-api + org.apache.hadoop.thirdparty hadoop-shaded-protobuf_3_25 @@ -49,10 +50,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ratis-thirdparty-misc ${ratis.thirdparty.version} - - javax.annotation - javax.annotation-api - @@ -82,9 +79,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ DatanodeClientProtocol.proto @@ -93,9 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -105,9 +98,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -124,9 +115,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -143,38 +132,21 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - - - - - - - + + + + + + + - - run - diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index 539a0a5430e..83aa5f72e36 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,22 +21,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Server interface - - Apache Ozone HDDS Server Interface jar + Apache Ozone HDDS Server Interface + Apache Ozone Distributed Data Store Server interface - true - true - true + + true + + true - - org.apache.ratis - ratis-thirdparty-misc - com.google.protobuf protobuf-java @@ -50,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-interface-client + + org.apache.ratis + ratis-thirdparty-misc + @@ -79,9 +76,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -90,9 +85,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -102,9 +95,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -121,26 +112,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - + + + - - run - From 36a430db458a0b9b913a51c28199694f9273c64f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 8 Jan 2025 06:43:36 +0100 Subject: [PATCH 14/15] HDDS-7307. Move S3 Gateway web content to separate port (#6898) --- .../src/main/resources/ozone-default.xml | 42 ++++ .../hdds/server/http/BaseHttpServer.java | 22 ++- .../hadoop/hdds/server/http/HttpServer2.java | 47 +++-- .../main/compose/ozone/docker-compose.yaml | 1 + .../src/main/compose/ozone/prometheus.yml | 4 +- .../src/main/smoketest/s3/bucketcreate.robot | 12 +- .../src/main/smoketest/s3/commonawslib.robot | 14 ++ .../smoketest/s3/s3_compatbility_check.sh | 1 + .../main/smoketest/s3/secretgenerate.robot | 2 +- .../src/main/smoketest/s3/secretrevoke.robot | 3 +- .../dist/src/main/smoketest/s3/webui.robot | 13 +- .../dist/src/main/smoketest/spnego/web.robot | 2 +- .../hadoop/ozone/s3/AuthorizationFilter.java | 6 - .../org/apache/hadoop/ozone/s3/Gateway.java | 6 + .../hadoop/ozone/s3/GatewayApplication.java | 2 +- .../ozone/s3/RootPageDisplayFilter.java | 64 ------ .../hadoop/ozone/s3/S3GatewayConfigKeys.java | 14 ++ .../hadoop/ozone/s3/S3GatewayHttpServer.java | 80 +------- .../ozone/s3/S3GatewayWebAdminServer.java | 186 ++++++++++++++++++ .../ozone/s3/VirtualHostStyleFilter.java | 7 - .../hadoop/ozone/s3secret/Application.java} | 16 +- .../s3secret/S3SecretManagementEndpoint.java | 2 +- .../resources/webapps/s3g-web/WEB-INF/web.xml | 33 ++++ .../{static => s3g-web}/images/ozone.ico | Bin .../webapps/{static => s3g-web}/index.html | 24 +-- .../webapps/s3gateway/WEB-INF/web.xml | 11 +- .../ozone/s3/TestAuthorizationFilter.java | 22 --- .../ozone/s3/TestVirtualHostStyleFilter.java | 23 --- 28 files changed, 388 insertions(+), 271 deletions(-) delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java rename hadoop-ozone/s3gateway/src/main/{resources/webapps/static/s3g.js => java/org/apache/hadoop/ozone/s3secret/Application.java} (75%) create mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/images/ozone.ico (100%) rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/index.html (74%) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 1fcef139daf..dfd058f5d70 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1892,6 +1892,48 @@ interfaces by setting it to 0.0.0.0. + + ozone.s3g.webadmin.http.enabled + true + OZONE, S3GATEWAY + This option can be used to disable the web server which serves additional content in Ozone S3 Gateway. + + + + ozone.s3g.webadmin.https-address + + OZONE, S3GATEWAY + Ozone S3Gateway content server's HTTPS address and port. + + + + ozone.s3g.webadmin.https-bind-host + + OZONE, S3GATEWAY + The actual address the HTTPS server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.https-address. + This is useful for making the Ozone S3Gateway HTTPS server listen on all + interfaces by setting it to 0.0.0.0. + + + + ozone.s3g.webadmin.http-address + 0.0.0.0:19878 + OZONE, S3GATEWAY + The address and port where Ozone S3Gateway serves + web content. + + + + ozone.s3g.webadmin.http-bind-host + 0.0.0.0 + OZONE, S3GATEWAY + The actual address the HTTP server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.http-address. + This is useful for making the Ozone S3Gateway HTTP server listen on all + interfaces by setting it to 0.0.0.0. + + ozone.s3g.http.auth.kerberos.principal HTTP/_HOST@REALM diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java index 44c18231549..ffa91404688 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java @@ -139,14 +139,23 @@ public BaseHttpServer(MutableConfigurationSource conf, String name) builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue); + boolean addDefaultApps = shouldAddDefaultApps(); + if (!addDefaultApps) { + builder.withoutDefaultApps(); + } + httpServer = builder.build(); - httpServer.addServlet("conf", "/conf", HddsConfServlet.class); - httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); - prometheusSupport = + // TODO move these to HttpServer2.addDefaultApps + if (addDefaultApps) { + httpServer.addServlet("conf", "/conf", HddsConfServlet.class); + httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); + } + + prometheusSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true); - profilerSupport = + profilerSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false); if (prometheusSupport) { @@ -477,4 +486,9 @@ public boolean isSecurityEnabled() { protected abstract String getHttpAuthConfigPrefix(); + /** Override to disable the default servlets. */ + protected boolean shouldAddDefaultApps() { + return true; + } + } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 9d037fed6bc..691f5374e6f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -250,6 +250,7 @@ public static class Builder { private boolean xFrameEnabled; private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN; + private boolean skipDefaultApps; public Builder setName(String serverName) { this.name = serverName; @@ -446,6 +447,11 @@ private void loadSSLConfiguration() throws IOException { excludeCiphers = sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST); } + public Builder withoutDefaultApps() { + this.skipDefaultApps = true; + return this; + } + public HttpServer2 build() throws IOException { Preconditions.checkNotNull(name, "name is not set"); Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified"); @@ -592,18 +598,13 @@ private HttpServer2(final Builder b) throws IOException { this.findPort = b.findPort; this.portRanges = b.portRanges; - initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, - b.authFilterConfigurationPrefix, b.securityEnabled); + initializeWebServer(b); } - private void initializeWebServer(String name, String hostName, - MutableConfigurationSource conf, String[] pathSpecs, - String authFilterConfigPrefix, - boolean securityEnabled) throws IOException { - + private void initializeWebServer(Builder builder) throws IOException { Preconditions.checkNotNull(webAppContext); - int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, -1); + int maxThreads = builder.conf.getInt(HTTP_MAX_THREADS_KEY, -1); // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the // default value (currently 250). @@ -613,13 +614,13 @@ private void initializeWebServer(String name, String hostName, threadPool.setMaxThreads(maxThreads); } - metrics = HttpServer2Metrics.create(threadPool, name); + metrics = HttpServer2Metrics.create(threadPool, builder.name); SessionHandler handler = webAppContext.getSessionHandler(); handler.setHttpOnly(true); handler.getSessionCookieConfig().setSecure(true); ContextHandlerCollection contexts = new ContextHandlerCollection(); - RequestLog requestLog = HttpRequestLog.getRequestLog(name); + RequestLog requestLog = HttpRequestLog.getRequestLog(builder.name); handlers.addHandler(contexts); if (requestLog != null) { @@ -628,20 +629,22 @@ private void initializeWebServer(String name, String hostName, handlers.addHandler(requestLogHandler); } handlers.addHandler(webAppContext); - final String appDir = getWebAppsPath(name); - addDefaultApps(contexts, appDir, conf); + final String appDir = getWebAppsPath(builder.name); + if (!builder.skipDefaultApps) { + addDefaultApps(contexts, appDir, builder.conf); + } webServer.setHandler(handlers); - Map config = generateFilterConfiguration(conf); + Map config = generateFilterConfiguration(builder.conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), config); - final FilterInitializer[] initializers = getFilterInitializers(conf); + final FilterInitializer[] initializers = getFilterInitializers(builder.conf); if (initializers != null) { - conf.set(BIND_ADDRESS, hostName); + builder.conf.set(BIND_ADDRESS, builder.hostName); org.apache.hadoop.conf.Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + LegacyHadoopConfigurationSource.asHadoopConfiguration(builder.conf); Map filterConfig = getFilterConfigMap(hadoopConf, - authFilterConfigPrefix); + builder.authFilterConfigurationPrefix); for (FilterInitializer c : initializers) { - if ((c instanceof AuthenticationFilterInitializer) && securityEnabled) { + if ((c instanceof AuthenticationFilterInitializer) && builder.securityEnabled) { addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig); } else { @@ -650,10 +653,12 @@ private void initializeWebServer(String name, String hostName, } } - addDefaultServlets(); + if (!builder.skipDefaultApps) { + addDefaultServlets(); + } - if (pathSpecs != null) { - for (String path : pathSpecs) { + if (builder.pathSpecs != null) { + for (String path : builder.pathSpecs) { LOG.info("adding path spec: {}", path); addFilterPathMapping(path, webAppContext); } diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml index b5424cc7c88..3aa99da311e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml @@ -73,6 +73,7 @@ services: <<: *replication ports: - 9878:9878 + - 19878:19878 command: ["ozone","s3g"] recon: <<: *common-config diff --git a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml index a88c30d57f4..562bcb9e71b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml +++ b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml @@ -28,7 +28,7 @@ scrape_configs: labels: component: om - targets: - - "s3g:9878" + - "s3g:19878" labels: component: s3g - targets: # During compose bring up the number of datanodes can be specific, adding 10 nodes to account for that. @@ -60,4 +60,4 @@ scrape_configs: - targets: - "recon:9888" labels: - component: recon \ No newline at end of file + component: recon diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot index 39ddbde41b0..b8c0820bfd5 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot @@ -21,7 +21,7 @@ Resource ../commonlib.robot Resource commonawslib.robot Test Timeout 5 minutes Suite Setup Setup s3 tests -Default Tags no-bucket-type +Test Tags no-bucket-type *** Variables *** ${ENDPOINT_URL} http://s3g:9878 @@ -43,6 +43,7 @@ Create bucket with invalid bucket name Should contain ${result} InvalidBucketName Create new bucket and check default group ACL + [tags] aws-skip ${bucket} = Create bucket ${acl} = Execute ozone sh bucket getacl s3v/${bucket} ${group} = Get Regexp Matches ${acl} "GROUP" @@ -53,3 +54,12 @@ Create new bucket and check default group ACL Should contain ${json}[1][aclList] READ Should contain ${json}[1][aclList] LIST END + +Test buckets named like web endpoints + [tags] aws-skip + ${path} = Create Random File KB 64 + + FOR ${name} IN conf jmx logs logstream prof prom secret stacks static + Create bucket with name ${name} + Put object to bucket bucket=${name} key=testkey path=${path} + END diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index ac64ee36537..7b5bee321bb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -172,6 +172,20 @@ Generate random prefix ${random} = Generate Ozone String Set Global Variable ${PREFIX} ${random} +# Verify object put by listing and getting it +Put object to bucket + [arguments] ${bucket} ${key} ${path} + + Execute AWSS3ApiCli put-object --bucket ${bucket} --key ${key} --body ${path} + + ${result} = Execute AWSS3ApiCli list-objects --bucket ${bucket} + Should contain ${result} ${key} + + Execute AWSS3ApiCli get-object --bucket ${bucket} --key ${key} ${path}.verify + Compare files ${path} ${path}.verify + + [teardown] Remove File ${path}.verify + Revoke S3 secrets Execute and Ignore Error ozone s3 revokesecret -y Execute and Ignore Error ozone s3 revokesecret -y -u testuser diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh index ab2807167d0..f147de90852 100755 --- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh +++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh @@ -60,6 +60,7 @@ run_robot_test() { TEST_NAME=$1 robot \ --nostatusrc \ + --exclude aws-skip \ -v ENDPOINT_URL:https://s3.$OZONE_TEST_S3_REGION.amazonaws.com \ -v BUCKET:$OZONE_TEST_S3_BUCKET1 \ -v DESTBUCKET:$OZONE_TEST_S3_BUCKET2 \ diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index e0c2fc7f818..db561397e1c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -27,7 +27,7 @@ Test Setup Run Keywords Kinit test user testuser testuser.k Test Teardown Run Keyword Revoke S3 secrets *** Variables *** -${ENDPOINT_URL} http://s3g:9878 +${ENDPOINT_URL} http://s3g:19878 ${SECURITY_ENABLED} true *** Test Cases *** diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index ffb03a85a8a..ed66a27a578 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -24,9 +24,10 @@ Test Timeout 5 minutes Default Tags no-bucket-type Test Setup Run Keywords Kinit test user testuser testuser.keytab ... AND Revoke S3 secrets +Suite Teardown Setup v4 headers *** Variables *** -${ENDPOINT_URL} http://s3g:9878 +${ENDPOINT_URL} http://s3g:19878 ${SECURITY_ENABLED} true *** Test Cases *** diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot index 43bd76659bd..896a86c7e4e 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot @@ -24,15 +24,12 @@ Suite Setup Setup s3 tests Default Tags no-bucket-type *** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated + +${S3G_WEB_UI} http://s3g:19878 + *** Test Cases *** -S3 Gateway Web UI - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL} - Should contain ${result} Location: ignore_case=True - Should contain ${result} /static/ - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL}/static/index.html +Check web UI + ${result} = Execute curl --negotiate -u : -v ${S3G_WEB_UI} Should contain ${result} Apache Ozone S3 diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot index 654f8aef675..b18a99443ce 100644 --- a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot +++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot @@ -27,7 +27,7 @@ ${SCM} scm ${OM_URL} http://om:9874 ${RECON_URL} http://recon:9888 -${S3G_URL} http://s3g:9878 +${S3G_URL} http://s3g:19878 ${SCM_URL} http://${SCM}:9876 @{BASE_URLS} ${OM_URL} ${RECON_URL} ${S3G_URL} ${SCM_URL} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index cc63663bf22..d71dc83a049 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -65,12 +65,6 @@ public class AuthorizationFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext context) throws IOException { - // Skip authentication if the uri is hitting S3Secret generation or - // revocation endpoint. - if (context.getUriInfo().getRequestUri().getPath().startsWith("/secret")) { - return; - } - try { signatureInfo.initialize(signatureProcessor.parseSignature()); if (signatureInfo.getVersion() == Version.V4) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index 511592d3a04..96effa757b5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.OzoneSecurityUtil; @@ -59,6 +60,8 @@ public class Gateway extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(Gateway.class); private S3GatewayHttpServer httpServer; + /** Servlets and static content on separate port. */ + private BaseHttpServer contentServer; private S3GatewayMetrics metrics; private final JvmPauseMonitor jvmPauseMonitor = newJvmPauseMonitor("S3G"); @@ -80,6 +83,7 @@ public Void call() throws Exception { loginS3GUser(OzoneConfigurationHolder.configuration()); setHttpBaseDir(OzoneConfigurationHolder.configuration()); httpServer = new S3GatewayHttpServer(OzoneConfigurationHolder.configuration(), "s3gateway"); + contentServer = new S3GatewayWebAdminServer(OzoneConfigurationHolder.configuration(), "s3g-web"); metrics = S3GatewayMetrics.create(OzoneConfigurationHolder.configuration()); start(); @@ -103,11 +107,13 @@ public void start() throws IOException { HddsServerUtil.initializeMetrics(OzoneConfigurationHolder.configuration(), "S3Gateway"); jvmPauseMonitor.start(); httpServer.start(); + contentServer.start(); } public void stop() throws Exception { LOG.info("Stopping Ozone S3 gateway"); httpServer.stop(); + contentServer.stop(); jvmPauseMonitor.stop(); S3GatewayMetrics.unRegister(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java index 778b375a66e..c5a291b4450 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java @@ -24,6 +24,6 @@ */ public class GatewayApplication extends ResourceConfig { public GatewayApplication() { - packages("org.apache.hadoop.ozone.s3", "org.apache.hadoop.ozone.s3secret"); + packages("org.apache.hadoop.ozone.s3"); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java deleted file mode 100644 index 5cd3bd85f00..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.servlet.Filter; -import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; -import javax.servlet.ServletException; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; - -/** - * This redirect helps to show and info page in case the endpoint is opened - * from the browser. - */ -public class RootPageDisplayFilter implements Filter { - - @Override - public void init(FilterConfig filterConfig) throws ServletException { - - } - - @Override - public void doFilter(ServletRequest servletRequest, - ServletResponse servletResponse, FilterChain filterChain) - throws IOException, ServletException { - HttpServletRequest httpRequest = (HttpServletRequest) servletRequest; - String httpMethod = httpRequest.getMethod(); - String uri = httpRequest.getRequestURI(); - String authorizationHeader = httpRequest.getHeader("Authorization"); - if (httpMethod.equalsIgnoreCase("GET") && !containsAWSAuth(authorizationHeader) && uri.equals("/")) { - ((HttpServletResponse) servletResponse).sendRedirect("/static/"); - } else { - filterChain.doFilter(httpRequest, servletResponse); - } - } - - private boolean containsAWSAuth(String authorizationHeader) { - return authorizationHeader != null && authorizationHeader.startsWith("AWS"); - } - - @Override - public void destroy() { - - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index 9160025a016..3b9155298c6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -38,10 +38,24 @@ public final class S3GatewayConfigKeys { public static final String OZONE_S3G_HTTPS_ADDRESS_KEY = "ozone.s3g.https-address"; + public static final String OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY = + "ozone.s3g.webadmin.http.enabled"; + public static final String OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY = + "ozone.s3g.webadmin.http-bind-host"; + public static final String OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY = + "ozone.s3g.webadmin.https-bind-host"; + public static final String OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY = + "ozone.s3g.webadmin.http-address"; + public static final String OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY = + "ozone.s3g.webadmin.https-address"; + public static final String OZONE_S3G_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; public static final int OZONE_S3G_HTTP_BIND_PORT_DEFAULT = 9878; public static final int OZONE_S3G_HTTPS_BIND_PORT_DEFAULT = 9879; + public static final int OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT = 19878; + public static final int OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT = 19879; + public static final String OZONE_S3G_DOMAIN_NAME = "ozone.s3g.domain.name"; public static final String OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index 8b6af74e072..ae7b428d363 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -17,34 +17,12 @@ */ package org.apache.hadoop.ozone.s3; -import com.google.common.base.Strings; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.server.http.BaseHttpServer; -import org.apache.hadoop.hdds.server.http.ServletElementsFactory; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.eclipse.jetty.servlet.FilterHolder; -import org.eclipse.jetty.servlet.FilterMapping; -import org.eclipse.jetty.servlet.ServletHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT; - /** * Http server to provide S3-compatible API. */ @@ -61,50 +39,11 @@ public class S3GatewayHttpServer extends BaseHttpServer { public S3GatewayHttpServer(MutableConfigurationSource conf, String name) throws IOException { super(conf, name); - addServlet("icon", "/favicon.ico", IconServlet.class); - addSecretAuthentication(conf); } - private void addSecretAuthentication(MutableConfigurationSource conf) - throws IOException { - - if (conf.getBoolean(OZONE_S3G_SECRET_HTTP_ENABLED_KEY, - OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT)) { - String authType = conf.get(OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY, - OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT); - - if (UserGroupInformation.isSecurityEnabled() - && authType.equals("kerberos")) { - ServletHandler handler = getWebAppContext().getServletHandler(); - Map params = new HashMap<>(); - - String principalInConf = - conf.get(OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL); - if (!Strings.isNullOrEmpty(principalInConf)) { - params.put("kerberos.principal", SecurityUtil.getServerPrincipal( - principalInConf, conf.get(OZONE_S3G_HTTP_BIND_HOST_KEY))); - } - String httpKeytab = conf.get(OZONE_S3G_KEYTAB_FILE); - if (!Strings.isNullOrEmpty(httpKeytab)) { - params.put("kerberos.keytab", httpKeytab); - } - params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); - - FilterHolder holder = ServletElementsFactory.createFilterHolder( - "secretAuthentication", AuthenticationFilter.class.getName(), - params); - FilterMapping filterMapping = - ServletElementsFactory.createFilterMapping( - "secretAuthentication", - new String[]{"/secret/*"}); - - handler.addFilter(holder, filterMapping); - } else { - LOG.error("Secret Endpoint should be secured with Kerberos"); - throw new IllegalStateException("Secret Endpoint should be secured" - + " with Kerberos"); - } - } + @Override + protected boolean shouldAddDefaultApps() { + return false; } @Override @@ -167,17 +106,4 @@ protected String getHttpAuthConfigPrefix() { return S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; } - /** - * Servlet for favicon.ico. - */ - public static class IconServlet extends HttpServlet { - private static final long serialVersionUID = -1L; - - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - response.setContentType("image/png"); - response.sendRedirect("/static/images/ozone.ico"); - } - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java new file mode 100644 index 00000000000..81c9a4f8ca3 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.s3; + +import com.google.common.base.Strings; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.server.http.BaseHttpServer; +import org.apache.hadoop.hdds.server.http.ServletElementsFactory; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.FilterMapping; +import org.eclipse.jetty.servlet.ServletHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_TYPE; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT; +import static org.apache.hadoop.security.authentication.server.AuthenticationFilter.AUTH_TYPE; + +/** + * HTTP server for serving static content and Ozone-specific endpoints (/conf, etc.). + */ +class S3GatewayWebAdminServer extends BaseHttpServer { + + private static final Logger LOG = + LoggerFactory.getLogger(S3GatewayWebAdminServer.class); + + S3GatewayWebAdminServer(MutableConfigurationSource conf, String name) throws IOException { + super(conf, name); + addServlet("icon", "/favicon.ico", IconServlet.class); + addSecretAuthentication(conf); + } + + private void addSecretAuthentication(MutableConfigurationSource conf) + throws IOException { + + if (conf.getBoolean(OZONE_S3G_SECRET_HTTP_ENABLED_KEY, + OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT)) { + String authType = conf.get(OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY, + OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT); + + if (UserGroupInformation.isSecurityEnabled() + && authType.equals("kerberos")) { + ServletHandler handler = getWebAppContext().getServletHandler(); + Map params = new HashMap<>(); + + String principalInConf = + conf.get(OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL); + if (!Strings.isNullOrEmpty(principalInConf)) { + params.put("kerberos.principal", SecurityUtil.getServerPrincipal( + principalInConf, conf.get(OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY))); + } + String httpKeytab = conf.get(OZONE_S3G_KEYTAB_FILE); + if (!Strings.isNullOrEmpty(httpKeytab)) { + params.put("kerberos.keytab", httpKeytab); + } + params.put(AUTH_TYPE, "kerberos"); + + FilterHolder holder = ServletElementsFactory.createFilterHolder( + "secretAuthentication", AuthenticationFilter.class.getName(), + params); + FilterMapping filterMapping = + ServletElementsFactory.createFilterMapping( + "secretAuthentication", + new String[]{"/secret/*"}); + + handler.addFilter(holder, filterMapping); + } else { + LOG.error("Secret Endpoint should be secured with Kerberos"); + throw new IllegalStateException("Secret Endpoint should be secured" + + " with Kerberos"); + } + } + } + + @Override + protected String getHttpAddressKey() { + return OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY; + } + + @Override + protected String getHttpBindHostKey() { + return OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY; + } + + @Override + protected String getHttpsAddressKey() { + return OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY; + } + + @Override + protected String getHttpsBindHostKey() { + return OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY; + } + + @Override + protected String getBindHostDefault() { + return OZONE_S3G_HTTP_BIND_HOST_DEFAULT; + } + + @Override + protected int getHttpBindPortDefault() { + return OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT; + } + + @Override + protected int getHttpsBindPortDefault() { + return OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT; + } + + @Override + protected String getKeytabFile() { + return OZONE_S3G_KEYTAB_FILE; + } + + @Override + protected String getSpnegoPrincipal() { + return OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; + } + + @Override + protected String getEnabledKey() { + return OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY; + } + + @Override + protected String getHttpAuthType() { + return OZONE_S3G_HTTP_AUTH_TYPE; + } + + @Override + protected String getHttpAuthConfigPrefix() { + return OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; + } + + /** + * Servlet for favicon.ico. + */ + public static class IconServlet extends HttpServlet { + private static final long serialVersionUID = -1L; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + response.setContentType("image/png"); + response.sendRedirect("/images/ozone.ico"); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 32c1eb9eb23..6b4ae47db71 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -63,13 +63,6 @@ public class VirtualHostStyleFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext requestContext) throws IOException { - // Skip this filter if the uri is hitting S3Secret generation or - // revocation endpoint. - if (requestContext.getUriInfo().getRequestUri().getPath() - .startsWith("/secret")) { - return; - } - domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME); if (domains.length == 0) { diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java similarity index 75% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java index f32b47fd823..07c8f919214 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.ozone.s3secret; + +import org.glassfish.jersey.server.ResourceConfig; -window.onload = function () { - document.getElementById('s3gurl').innerHTML = window.location.origin; -}; +/** + * JaxRS resource definition. + */ +public class Application extends ResourceConfig { + public Application() { + packages("org.apache.hadoop.ozone.s3secret"); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 739dadfb28e..e5ad1c3a57f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -38,7 +38,7 @@ /** * Endpoint to manage S3 secret. */ -@Path("/secret") +@Path("/") @S3SecretEnabled @S3AdminEndpoint public class S3SecretManagementEndpoint extends S3SecretEndpointBase { diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml new file mode 100644 index 00000000000..092c8a41af5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml @@ -0,0 +1,33 @@ + + + + secret + org.glassfish.jersey.servlet.ServletContainer + + javax.ws.rs.Application + org.apache.hadoop.ozone.s3secret.Application + + 1 + + + secret + /secret/* + + + org.jboss.weld.environment.servlet.Listener + + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/images/ozone.ico similarity index 100% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico rename to hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/images/ozone.ico diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html similarity index 74% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html rename to hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html index 32e02172a64..7489f93b6f7 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html @@ -21,15 +21,14 @@ - S3 gateway -- Apache Ozone - - - + + + @@ -50,7 +49,7 @@

@@ -63,21 +62,12 @@

S3 gateway

-

This is an endpoint of Apache Ozone S3 gateway. Use it with any - AWS S3 compatible tool - with setting this url as an endpoint

- -

For example with aws-cli:

- -
aws s3api --endpoint  create-bucket --bucket=wordcount
- -

For more information, please check the documentation. +

For more information, please check the documentation.

- - - + + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml index 79bf7b9855c..b3d7a72b2cd 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml @@ -32,22 +32,13 @@ org.apache.hadoop.ozone.s3.EmptyContentTypeFilter - - info-page-redirect - org.apache.hadoop.ozone.s3.RootPageDisplayFilter - - optional-content-type /* - - info-page-redirect - /* - org.jboss.weld.environment.servlet.Listener - \ No newline at end of file + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index c42036cb1a3..294d50fe69f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -207,28 +207,6 @@ void testAuthFilterFailures( DATETIME, "", "/key1" - ), - // S3 secret generation endpoint - arguments( - "POST", - null, - null, - "s3g:9878", - null, - null, - "", - "/secret/generate" - ), - // S3 secret generation endpoint - arguments( - "POST", - null, - null, - "s3g:9878", - null, - null, - "", - "/secret/revoke" ) ); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 3051fb2276e..89679e9f845 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -125,29 +125,6 @@ public void testPathStyle() throws Exception { } - @Test - public void testS3SecretEndpoint() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", "/secret/generate", - null, true); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/secret/generate"); - assertEquals(expected, containerRequest.getRequestUri()); - - containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", "/secret/revoke", - null, true); - virtualHostStyleFilter.filter(containerRequest); - expected = new URI("http://" + s3HttpAddr + "/secret/revoke"); - assertEquals(expected, containerRequest.getRequestUri()); - - } - @Test public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { From f8394cf5b7afa07f4cf1752a82723d49d063bf0b Mon Sep 17 00:00:00 2001 From: Sumit Agrawal Date: Wed, 8 Jan 2025 11:58:29 +0530 Subject: [PATCH 15/15] HDDS-11975. wrap TermIndex in ExecutionContext (#7602) --- .../flowcontrol/ExecutionContext.java | 48 +++++++++++++++++++ .../execution/flowcontrol/package-info.java | 22 +++++++++ .../om/ratis/OzoneManagerStateMachine.java | 4 +- .../ozone/om/request/OMClientRequest.java | 8 ++-- .../request/bucket/OMBucketCreateRequest.java | 6 +-- .../request/bucket/OMBucketDeleteRequest.java | 6 +-- .../bucket/OMBucketSetOwnerRequest.java | 6 +-- .../bucket/OMBucketSetPropertyRequest.java | 6 +-- .../bucket/acl/OMBucketAclRequest.java | 6 +-- .../bucket/acl/OMBucketAddAclRequest.java | 6 +-- .../bucket/acl/OMBucketRemoveAclRequest.java | 6 +-- .../bucket/acl/OMBucketSetAclRequest.java | 6 +-- .../file/OMDirectoryCreateRequest.java | 6 +-- .../file/OMDirectoryCreateRequestWithFSO.java | 6 +-- .../om/request/file/OMFileCreateRequest.java | 6 +-- .../file/OMFileCreateRequestWithFSO.java | 6 +-- .../request/file/OMRecoverLeaseRequest.java | 6 +-- .../request/key/OMAllocateBlockRequest.java | 6 +-- .../key/OMAllocateBlockRequestWithFSO.java | 6 +-- .../key/OMDirectoriesPurgeRequestWithFSO.java | 12 ++--- .../om/request/key/OMKeyCommitRequest.java | 6 +-- .../key/OMKeyCommitRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyCreateRequest.java | 6 +-- .../key/OMKeyCreateRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyDeleteRequest.java | 6 +-- .../key/OMKeyDeleteRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyPurgeRequest.java | 8 ++-- .../om/request/key/OMKeyRenameRequest.java | 6 +-- .../key/OMKeyRenameRequestWithFSO.java | 6 +-- .../om/request/key/OMKeySetTimesRequest.java | 6 +-- .../key/OMKeySetTimesRequestWithFSO.java | 6 +-- .../om/request/key/OMKeysDeleteRequest.java | 6 +-- .../om/request/key/OMKeysRenameRequest.java | 6 +-- .../request/key/OMOpenKeysDeleteRequest.java | 6 +-- .../om/request/key/acl/OMKeyAclRequest.java | 6 +-- .../key/acl/OMKeyAclRequestWithFSO.java | 6 +-- .../request/key/acl/OMKeyAddAclRequest.java | 6 +-- .../key/acl/OMKeyAddAclRequestWithFSO.java | 6 +-- .../key/acl/OMKeyRemoveAclRequest.java | 6 +-- .../key/acl/OMKeyRemoveAclRequestWithFSO.java | 6 +-- .../request/key/acl/OMKeySetAclRequest.java | 6 +-- .../key/acl/OMKeySetAclRequestWithFSO.java | 6 +-- .../key/acl/prefix/OMPrefixAclRequest.java | 6 +-- ...S3ExpiredMultipartUploadsAbortRequest.java | 6 +-- .../S3InitiateMultipartUploadRequest.java | 6 +-- ...InitiateMultipartUploadRequestWithFSO.java | 6 +-- .../S3MultipartUploadAbortRequest.java | 6 +-- .../S3MultipartUploadCommitPartRequest.java | 6 +-- .../S3MultipartUploadCompleteRequest.java | 6 +-- .../s3/security/OMSetSecretRequest.java | 6 +-- .../s3/security/S3GetSecretRequest.java | 6 +-- .../s3/security/S3RevokeSecretRequest.java | 4 +- .../tagging/S3DeleteObjectTaggingRequest.java | 6 +-- .../S3DeleteObjectTaggingRequestWithFSO.java | 6 +-- .../s3/tagging/S3PutObjectTaggingRequest.java | 6 +-- .../S3PutObjectTaggingRequestWithFSO.java | 6 +-- .../OMSetRangerServiceVersionRequest.java | 6 +-- .../s3/tenant/OMTenantAssignAdminRequest.java | 6 +-- .../OMTenantAssignUserAccessIdRequest.java | 6 +-- .../s3/tenant/OMTenantCreateRequest.java | 6 +-- .../s3/tenant/OMTenantDeleteRequest.java | 6 +-- .../s3/tenant/OMTenantRevokeAdminRequest.java | 6 +-- .../OMTenantRevokeUserAccessIdRequest.java | 6 +-- .../OMCancelDelegationTokenRequest.java | 6 +-- .../security/OMGetDelegationTokenRequest.java | 6 +-- .../OMRenewDelegationTokenRequest.java | 6 +-- .../snapshot/OMSnapshotCreateRequest.java | 8 ++-- .../snapshot/OMSnapshotDeleteRequest.java | 6 +-- .../OMSnapshotMoveDeletedKeysRequest.java | 12 ++--- .../OMSnapshotMoveTableKeysRequest.java | 12 ++--- .../snapshot/OMSnapshotPurgeRequest.java | 10 ++-- .../snapshot/OMSnapshotRenameRequest.java | 9 ++-- .../OMSnapshotSetPropertyRequest.java | 6 +-- .../upgrade/OMCancelPrepareRequest.java | 8 ++-- .../upgrade/OMFinalizeUpgradeRequest.java | 6 +-- .../om/request/upgrade/OMPrepareRequest.java | 10 ++-- .../request/util/OMEchoRPCWriteRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 6 +-- .../request/volume/OMVolumeCreateRequest.java | 6 +-- .../request/volume/OMVolumeDeleteRequest.java | 6 +-- .../volume/OMVolumeSetOwnerRequest.java | 6 +-- .../volume/OMVolumeSetQuotaRequest.java | 6 +-- .../volume/acl/OMVolumeAclRequest.java | 6 +-- .../volume/acl/OMVolumeAddAclRequest.java | 6 +-- .../volume/acl/OMVolumeRemoveAclRequest.java | 6 +-- .../volume/acl/OMVolumeSetAclRequest.java | 6 +-- ...ManagerProtocolServerSideTranslatorPB.java | 4 +- .../OzoneManagerRequestHandler.java | 11 +++-- .../ozone/protocolPB/RequestHandler.java | 14 +++--- ...zoneManagerDoubleBufferWithOMResponse.java | 10 ++-- 90 files changed, 362 insertions(+), 282 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java new file mode 100644 index 00000000000..ba21dec10db --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.execution.flowcontrol; + +import org.apache.ratis.server.protocol.TermIndex; + +/** + * Context required for execution of a request. + */ +public final class ExecutionContext { + private final long index; + private final TermIndex termIndex; + + private ExecutionContext(long index, TermIndex termIndex) { + this.index = index; + if (null == termIndex) { + termIndex = TermIndex.valueOf(-1, index); + } + this.termIndex = termIndex; + } + + public static ExecutionContext of(long index, TermIndex termIndex) { + return new ExecutionContext(index, termIndex); + } + + public long getIndex() { + return index; + } + + public TermIndex getTermIndex() { + return termIndex; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java new file mode 100644 index 00000000000..7818bc628d8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.execution.flowcontrol; + +/** + * This package contains classes for the execution flow handling. + */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 6a5274ca01f..62e548f408b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.lock.OMLockDetails; @@ -555,8 +556,9 @@ public void close() { */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { + ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); final OMClientResponse omClientResponse = handler.handleWriteRequest( - request, termIndex, ozoneManagerDoubleBuffer); + request, context, ozoneManagerDoubleBuffer); OMLockDetails omLockDetails = omClientResponse.getOmLockDetails(); OMResponse omResponse = omClientResponse.getOMResponse(); if (omLockDetails != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index c9c664b303f..2fcb19f39d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -23,7 +23,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -140,12 +140,14 @@ public void handleRequestFailure(OzoneManager ozoneManager) { * * @return the response that will be returned to the client. */ - public abstract OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex); + public abstract OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context); /** For testing only. */ @VisibleForTesting public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex) { - return validateAndUpdateCache(ozoneManager, TransactionInfo.getTermIndex(transactionLogIndex)); + ExecutionContext context = ExecutionContext.of(transactionLogIndex, + TransactionInfo.getTermIndex(transactionLogIndex)); + return validateAndUpdateCache(ozoneManager, context); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 3c21a2a851b..38a4d78b538 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; @@ -162,8 +162,8 @@ private static void validateMaxBucket(OzoneManager ozoneManager) } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumBucketCreates(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 22e710dc911..c984c66a259 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -23,7 +23,7 @@ import java.util.Iterator; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; @@ -81,8 +81,8 @@ public OMBucketDeleteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumBucketDeletes(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index 2afab85e9ae..239083a58c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.bucket; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -77,8 +77,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest(); Preconditions.checkNotNull(setBucketPropertyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 941b41ca49b..e76aa0d7093 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -25,7 +25,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -110,8 +110,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 89d9dbf5dfd..23c92b8ae54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -25,7 +25,7 @@ import java.util.function.BiPredicate; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -63,8 +63,8 @@ public OMBucketAclRequest(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); // protobuf guarantees acls are non-null. List ozoneAcls = getAcls(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java index c37f83f0666..4bbf94c6e67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetrics; @@ -133,9 +133,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java index 018692d9f0c..0647f8d58bb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -132,9 +132,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index 813e5a7db3c..b94fbbc4363 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -131,9 +131,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 732886fa0e6..cf07bc7d4d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -26,7 +26,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -122,8 +122,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateDirectoryRequest createDirectoryRequest = getOmRequest() .getCreateDirectoryRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 8bef8e17928..b8d17621701 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -76,8 +76,8 @@ public OMDirectoryCreateRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateDirectoryRequest createDirectoryRequest = getOmRequest() .getCreateDirectoryRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 08b25718288..f3df379103d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -162,8 +162,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index c4967d5af1f..f64454d3962 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -68,8 +68,8 @@ public OMFileCreateRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 73019af112a..0ae92f806ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmFSOFile; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -53,7 +54,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; @@ -128,7 +128,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { RecoverLeaseRequest recoverLeaseRequest = getOmRequest() .getRecoverLeaseRequest(); Preconditions.checkNotNull(recoverLeaseRequest); @@ -156,7 +156,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn acquiredLock = getOmLockDetails().isLockAcquired(); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - RecoverLeaseResponse recoverLeaseResponse = doWork(ozoneManager, termIndex.getIndex()); + RecoverLeaseResponse recoverLeaseResponse = doWork(ozoneManager, context.getIndex()); // Prepare response omResponse.setRecoverLeaseResponse(recoverLeaseResponse).setCmdType(RecoverLease); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index bbf5ec0afaf..b995f793453 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -26,7 +26,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; @@ -150,8 +150,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 9db04d48b57..cba650644ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -73,8 +73,8 @@ public OMAllocateBlockRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 29ed5d9fc7b..b24253e6f67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -33,8 +33,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -62,7 +62,7 @@ public OMDirectoriesPurgeRequestWithFSO(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest purgeDirsRequest = getOmRequest().getPurgeDirectoriesRequest(); String fromSnapshot = purgeDirsRequest.hasSnapshotTableKey() ? @@ -121,7 +121,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String ozoneDbKey = omMetadataManager.getOzonePathKey(path.getVolumeId(), path.getBucketId(), keyInfo.getParentObjectID(), keyInfo.getFileName()); omMetadataManager.getDirectoryTable().addCacheEntry(new CacheKey<>(ozoneDbKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); volBucketInfoMap.putIfAbsent(volBucketPair, omBucketInfo); } } @@ -164,15 +164,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String ozoneDbKey = omMetadataManager.getOzonePathKey(path.getVolumeId(), path.getBucketId(), keyInfo.getParentObjectID(), keyInfo.getFileName()); omMetadataManager.getFileTable().addCacheEntry(new CacheKey<>(ozoneDbKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); volBucketInfoMap.putIfAbsent(volBucketPair, omBucketInfo); } } } if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + CacheValue.get(context.getIndex(), fromSnapshotInfo)); } } catch (IOException ex) { // Case of IOException for fromProtobuf will not happen diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 87d126de98a..25b09a203ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneManagerVersion; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -133,8 +133,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index 2a712bd2763..c1e686d3ce0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -23,9 +23,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.om.helpers.WithMetadata; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.util.OmKeyHSyncUtil; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -76,8 +76,8 @@ public OMKeyCommitRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index e817901c22e..4ac619a3a47 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneManagerVersion; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -186,8 +186,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); KeyArgs keyArgs = createKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 87cc151351e..31e9d088f7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -65,8 +65,8 @@ public OMKeyCreateRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index db1adc13893..3885c18aff3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.OMPerformanceMetrics; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -108,8 +108,8 @@ protected KeyArgs resolveBucketAndCheckAcls(OzoneManager ozoneManager, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index e8960cd02b1..19fed5d85db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -71,8 +71,8 @@ public OMKeyDeleteRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index a5e8cb14525..5d0af563bb2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -61,7 +61,7 @@ public OMKeyPurgeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); List bucketDeletedKeysList = purgeKeysRequest.getDeletedKeysList(); List keysToUpdateList = purgeKeysRequest.getKeysToUpdateList(); @@ -107,9 +107,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // services. try { if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + CacheValue.get(context.getIndex(), fromSnapshotInfo)); } } catch (IOException e) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 35940f5a770..1c99fc1814a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -121,8 +121,8 @@ protected KeyArgs resolveBucketAndCheckAcls(KeyArgs keyArgs, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index e57b6d99fd4..5919290062e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -76,8 +76,8 @@ public OMKeyRenameRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); KeyArgs keyArgs = renameKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 78b5c258fa4..7c548029ce0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -23,7 +23,7 @@ import java.util.LinkedHashMap; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -173,8 +173,8 @@ protected void apply(OmKeyInfo omKeyInfo) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); ozoneManager.getMetrics().incNumSetTime(); OmKeyInfo omKeyInfo; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 5e3a00886b7..630e0987aed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -63,8 +63,8 @@ public OMKeySetTimesRequestWithFSO( } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index e090d61afd3..27fcf55ef90 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -21,7 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.util.Time; @@ -88,8 +88,8 @@ public OMKeysDeleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { } @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest(); OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index 0d105c1d227..64da8241256 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -81,8 +81,8 @@ public OMKeysRenameRequest(OMRequest omRequest, BucketLayout bucketLayout) { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeysRequest renameKeysRequest = getOmRequest().getRenameKeysRequest(); RenameKeysArgs renameKeysArgs = renameKeysRequest.getRenameKeysArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index c1bc66ade90..f6f6a0cef23 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -60,8 +60,8 @@ public OMOpenKeysDeleteRequest(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumOpenKeyDeleteRequests(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index dbc94646330..c847caa9481 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -64,8 +64,8 @@ public OMKeyAclRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index 5df1c0c0042..8f9cbbc6d8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -58,8 +58,8 @@ public OMKeyAclRequestWithFSO(OzoneManagerProtocolProtos.OMRequest omReq, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java index 90a6dfa31ad..b9269d0c7ab 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -149,9 +149,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java index 854fa60089f..a3edd9ebcb1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -133,9 +133,9 @@ public OMKeyAddAclRequestWithFSO( } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java index 00af126e1e4..fd5f9ea63f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -150,9 +150,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java index e4ba84bf4e9..171b2ed277b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -143,9 +143,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java index 2f50fde5cb9..674dca2581b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -146,9 +146,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java index 2fb2aee0bb7..13c95db5f18 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -136,9 +136,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index a8490b11152..0369c0bbbcc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -53,8 +53,8 @@ public OMPrefixAclRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmPrefixInfo omPrefixInfo = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index a2f68a13774..85dc33b18c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.OMAction; @@ -73,8 +73,8 @@ public S3ExpiredMultipartUploadsAbortRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumExpiredMPUAbortRequests(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 0a2703c769e..de9ff1db343 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; @@ -111,8 +111,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index d55a7b41918..962ac06f4d7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.OzoneManager; @@ -68,8 +68,8 @@ public S3InitiateMultipartUploadRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 268c92dbd6e..c44d95492c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; @@ -100,8 +100,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartUploadAbortRequest multipartUploadAbortRequest = getOmRequest() .getAbortMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 16ab458a014..4997af5d7d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import com.google.common.annotations.VisibleForTesting; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -105,8 +105,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 2bb77005c95..17b96eaf9d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -33,9 +33,9 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.OzoneConfigUtil; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -140,8 +140,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartUploadCompleteRequest multipartUploadCompleteRequest = getOmRequest().getCompleteMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java index 58809723125..e9a1c7bb04c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.security; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -100,7 +100,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -124,7 +124,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update S3SecretTable cache entry in this case // Set the transactionLogIndex to be used for updating. - final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, termIndex.getIndex()); + final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, context.getIndex()); s3SecretManager.updateCache(accessId, newS3SecretValue); // Compose response diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index 31df897513e..b7f4be1c5e3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OMMultiTenantManager; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.slf4j.Logger; @@ -127,7 +127,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -157,7 +157,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Not found in S3SecretTable. if (createIfNotExist) { // Add new entry in this case - assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), termIndex.getIndex()); + assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), context.getIndex()); // Add cache entry first. s3SecretManager.updateCache(accessId, assignS3SecretValue); } else { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java index a1077c0e70e..c2cd011405d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -78,7 +78,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index 6146e1ac105..9199494a4ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,8 +86,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index fb0561702a6..f80c79e8728 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -36,7 +37,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,8 +59,8 @@ public S3DeleteObjectTaggingRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index aab67830383..4cfbe68a183 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -39,7 +40,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,8 +87,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index 2b6ca8601cb..a7b7c363f06 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,8 +60,8 @@ public S3PutObjectTaggingRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java index 189d39e52cb..ff866bb396e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.ozone.om.request.s3.tenant; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -52,7 +52,7 @@ public OMSetRangerServiceVersionRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse; final OMResponse.Builder omResponse = @@ -65,7 +65,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getMetaTable().addCacheEntry( new CacheKey<>(OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY), - CacheValue.get(termIndex.getIndex(), proposedVersionStr)); + CacheValue.get(context.getIndex(), proposedVersionStr)); omResponse.setSetRangerServiceVersionResponse( SetRangerServiceVersionResponse.newBuilder().build()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 15292f61c65..ecea3e6cd14 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -150,7 +150,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); @@ -205,7 +205,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .build(); omMetadataManager.getTenantAccessIdTable().addCacheEntry( new CacheKey<>(accessId), - CacheValue.get(termIndex.getIndex(), newOmDBAccessIdInfo)); + CacheValue.get(context.getIndex(), newOmDBAccessIdInfo)); // Update tenant cache multiTenantManager.getCacheOp().assignTenantAdmin(accessId, delegated); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index aefd056715e..3508ba51f13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -192,8 +192,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 4cab83c0a33..b68279683a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -212,8 +212,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index fa630183006..7cc7f109d43 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -103,8 +103,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index ba82ab12144..21e7cc57671 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -141,7 +141,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); @@ -195,7 +195,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .build(); omMetadataManager.getTenantAccessIdTable().addCacheEntry( new CacheKey<>(accessId), - CacheValue.get(termIndex.getIndex(), newOmDBAccessIdInfo)); + CacheValue.get(context.getIndex(), newOmDBAccessIdInfo)); // Update tenant cache multiTenantManager.getCacheOp().revokeTenantAdmin(accessId); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index d2b55678490..5787b44f835 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -152,8 +152,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java index 0ee5f6ab6b2..f57b9db5d38 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -85,8 +85,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); Token token = getToken(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java index 8e2c56ab2cd..077e2bde28e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -129,7 +129,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = getOmRequest().getUpdateGetDelegationTokenRequest(); @@ -180,7 +180,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Cache. omMetadataManager.getDelegationTokenTable().addCacheEntry( new CacheKey<>(ozoneTokenIdentifier), - CacheValue.get(termIndex.getIndex(), renewTime)); + CacheValue.get(context.getIndex(), renewTime)); omClientResponse = new OMGetDelegationTokenResponse(ozoneTokenIdentifier, renewTime, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java index 7985c762d6f..e25bc57ec39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java @@ -22,7 +22,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -127,7 +127,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { UpdateRenewDelegationTokenRequest updateRenewDelegationTokenRequest = getOmRequest().getUpdatedRenewDelegationTokenRequest(); @@ -166,7 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Cache. omMetadataManager.getDelegationTokenTable().addCacheEntry( new CacheKey<>(ozoneTokenIdentifier), - CacheValue.get(termIndex.getIndex(), renewTime)); + CacheValue.get(context.getIndex(), renewTime)); omClientResponse = new OMRenewDelegationTokenResponse(ozoneTokenIdentifier, renewTime, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 59cc02b6fdb..94d89c70b5b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.ResolvedBucket; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -131,7 +131,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumSnapshotCreates(); @@ -173,7 +173,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ((RDBStore) omMetadataManager.getStore()).getDb() .getLatestSequenceNumber(); snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); // Snapshot referenced size should be bucket's used bytes OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); @@ -190,7 +190,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // pre-replicated key size counter in OmBucketInfo. snapshotInfo.setReferencedSize(estimateBucketDataSize(omBucketInfo)); - addSnapshotInfoToSnapshotChainAndCache(omMetadataManager, termIndex.getIndex()); + addSnapshotInfoToSnapshotChainAndCache(omMetadataManager, context.getIndex()); omResponse.setCreateSnapshotResponse( CreateSnapshotResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 95f99c627c4..b6832545ada 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -20,7 +20,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.om.ResolvedBucket; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -115,7 +115,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumSnapshotDeletes(); @@ -185,7 +185,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(tableKey), - CacheValue.get(termIndex.getIndex(), snapshotInfo)); + CacheValue.get(context.getIndex(), snapshotInfo)); omResponse.setDeleteSnapshotResponse( DeleteSnapshotResponse.newBuilder()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 18055bdda40..108128b4a09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; @@ -61,7 +61,7 @@ public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -91,13 +91,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List movedDirs = moveDeletedKeysRequest.getDeletedDirsToMoveList(); // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); + CacheValue.get(context.getIndex(), fromSnapshot)); if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); + CacheValue.get(context.getIndex(), nextSnapshot)); } omClientResponse = new OMSnapshotMoveDeletedKeysResponse( omResponse.build(), fromSnapshot, nextSnapshot, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java index 0eb0d3cd166..ef9c0261d63 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -143,7 +143,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); @@ -164,13 +164,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); + CacheValue.get(context.getIndex(), fromSnapshot)); if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); + CacheValue.get(context.getIndex(), nextSnapshot)); } omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 62fbb39417b..af701d361f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -67,10 +67,10 @@ public OMSnapshotPurgeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); - final long trxnLogIndex = termIndex.getIndex(); + final long trxnLogIndex = context.getIndex(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -116,9 +116,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } // Update the snapshotInfo lastTransactionInfo. for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), snapshotInfo)); + CacheValue.get(context.getIndex(), snapshotInfo)); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 8cf0579647c..0c721c1035f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -52,7 +53,6 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; /** * Changes snapshot name. @@ -111,8 +111,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { boolean acquiredBucketLock = false; boolean acquiredSnapshotOldLock = false; boolean acquiredSnapshotNewLock = false; @@ -188,11 +187,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotOldTableKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotNewTableKey), - CacheValue.get(termIndex.getIndex(), snapshotOldInfo)); + CacheValue.get(context.getIndex(), snapshotOldInfo)); omMetadataManager.getSnapshotChainManager().updateSnapshot(snapshotOldInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 53047fd8026..5fb6cb71c36 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -51,7 +51,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse; @@ -98,7 +98,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Table Cache metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotKey), - CacheValue.get(termIndex.getIndex(), updatedSnapInfo)); + CacheValue.get(context.getIndex(), updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); omMetrics.incNumSnapshotSetProperties(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java index c7b348c06f0..2a334d8c99c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java @@ -20,8 +20,8 @@ import java.util.HashMap; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -53,9 +53,9 @@ public OMCancelPrepareRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { - LOG.info("OM {} Received cancel prepare request with log {}", ozoneManager.getOMNodeId(), termIndex); + LOG.info("OM {} Received cancel prepare request with log {}", ozoneManager.getOMNodeId(), context.getTermIndex()); OMRequest omRequest = getOmRequest(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -87,7 +87,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getPrepareState().cancelPrepare(); LOG.info("OM {} prepare state cancelled at log {}. Returning response {}", - ozoneManager.getOMNodeId(), termIndex, omResponse); + ozoneManager.getOMNodeId(), context.getTermIndex(), omResponse); } catch (IOException e) { exception = e; LOG.error("Cancel Prepare Request apply failed in {}. ", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java index 866e7b6c67f..580d06a147f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java @@ -25,8 +25,8 @@ .UpgradeFinalizationStatus; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -60,7 +60,7 @@ public OMFinalizeUpgradeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { LOG.trace("Request: {}", getOmRequest()); AuditLogger auditLogger = ozoneManager.getAuditLogger(); OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); @@ -99,7 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn int lV = ozoneManager.getVersionManager().getMetadataLayoutVersion(); omMetadataManager.getMetaTable().addCacheEntry( new CacheKey<>(LAYOUT_VERSION_KEY), - CacheValue.get(termIndex.getIndex(), String.valueOf(lV))); + CacheValue.get(context.getIndex(), String.valueOf(lV))); FinalizeUpgradeResponse omResponse = FinalizeUpgradeResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java index f7c223eae09..654ee55b16d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java @@ -20,7 +20,7 @@ import java.util.HashMap; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; @@ -66,10 +66,10 @@ public OMPrepareRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); - LOG.info("OM {} Received prepare request with log {}", ozoneManager.getOMNodeId(), termIndex); + LOG.info("OM {} Received prepare request with log {}", ozoneManager.getOMNodeId(), context.getTermIndex()); OMRequest omRequest = getOmRequest(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -104,7 +104,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // the snapshot index in the prepared state. OzoneManagerDoubleBuffer doubleBuffer = ozoneManager.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer(); - doubleBuffer.add(response, termIndex); + doubleBuffer.add(response, context.getTermIndex()); OzoneManagerRatisServer omRatisServer = ozoneManager.getOmRatisServer(); final RaftServer.Division division = omRatisServer.getServerDivision(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java index e15782acafd..6d9740f399a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.util; import com.google.protobuf.ByteString; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -40,7 +40,7 @@ public OMEchoRPCWriteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { EchoRPCRequest echoRPCRequest = getOmRequest().getEchoRPCRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index e307a1f95fd..5eadd47595f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -40,7 +41,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,8 +72,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OzoneManagerProtocolProtos.QuotaRepairRequest quotaRepairRequest = getOmRequest().getQuotaRepairRequest(); Preconditions.checkNotNull(quotaRepairRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index a22775107b9..f6cb32a45d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -28,8 +28,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -98,8 +98,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); CreateVolumeRequest createVolumeRequest = getOmRequest().getCreateVolumeRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index d2db7ed3d4e..9f1ad0f30c7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -22,7 +22,7 @@ import java.nio.file.InvalidPathException; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.slf4j.Logger; @@ -65,8 +65,8 @@ public OMVolumeDeleteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); DeleteVolumeRequest deleteVolumeRequest = getOmRequest().getDeleteVolumeRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 8481f2201fa..090b0186974 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.volume; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -76,8 +76,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetVolumePropertyRequest setVolumePropertyRequest = getOmRequest().getSetVolumePropertyRequest(); Preconditions.checkNotNull(setVolumePropertyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 7a962a0e2b5..2174acf63e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -24,7 +24,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -83,8 +83,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetVolumePropertyRequest setVolumePropertyRequest = getOmRequest().getSetVolumePropertyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index b431d70fa7e..5a83720e0b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -66,8 +66,8 @@ public interface VolumeAclOp extends } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); // protobuf guarantees volume and acls are non-null. String volume = getVolumeName(); List ozoneAcls = getAcls(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index 3eff4da0caf..3fad018ea8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -141,8 +141,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index e0b14b4e2b2..2d862dbad4b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -140,8 +140,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 687210982f7..53ba7778c89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -138,8 +138,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 654610f81dc..91d234d4d0b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus; @@ -303,7 +304,8 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { throw ex; } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionIndex.incrementAndGet()); - omClientResponse = handler.handleWriteRequest(request, termIndex, ozoneManagerDoubleBuffer); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + omClientResponse = handler.handleWriteRequest(request, context, ozoneManagerDoubleBuffer); } } catch (IOException ex) { // As some preExecute returns error. So handle here. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index ab1f68d9928..09865ace27a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.util.PayloadUtils; @@ -170,7 +171,6 @@ import org.apache.hadoop.ozone.snapshot.ListSnapshotResponse; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; import org.apache.hadoop.ozone.util.ProtobufUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -420,19 +420,20 @@ public OMResponse handleReadRequest(OMRequest request) { } @Override - public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException { + public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, ExecutionContext context) throws IOException { injectPause(); OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); try { OMClientResponse omClientResponse = captureLatencyNs( impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), - () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex), + () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), context), "omClientResponse returned by validateAndUpdateCache cannot be null")); - OMAuditLogger.log(omClientRequest.getAuditBuilder(), termIndex); + OMAuditLogger.log(omClientRequest.getAuditBuilder(), context.getTermIndex()); return omClientResponse; } catch (Throwable th) { - OMAuditLogger.log(omClientRequest.getAuditBuilder(), omClientRequest, getOzoneManager(), termIndex, th); + OMAuditLogger.log(omClientRequest.getAuditBuilder(), omClientRequest, getOzoneManager(), context.getTermIndex(), + th); throw th; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 76546f2e480..033911364d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -18,12 +18,12 @@ package org.apache.hadoop.ozone.protocolPB; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; @@ -54,15 +54,15 @@ public interface RequestHandler { * In non-HA this will be called from {@link OzoneManagerProtocolServerSideTranslatorPB}. * * @param omRequest the write request - * @param termIndex - ratis transaction term and index + * @param context - context containing ratis term index and index * @param ozoneManagerDoubleBuffer for adding response * @return OMClientResponse */ - default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex, + default OMClientResponse handleWriteRequest(OMRequest omRequest, ExecutionContext context, OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) throws IOException { - final OMClientResponse response = handleWriteRequestImpl(omRequest, termIndex); + final OMClientResponse response = handleWriteRequestImpl(omRequest, context); if (omRequest.getCmdType() != Type.Prepare) { - ozoneManagerDoubleBuffer.add(response, termIndex); + ozoneManagerDoubleBuffer.add(response, context.getTermIndex()); } return response; } @@ -71,8 +71,8 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI * Implementation of {@link #handleWriteRequest}. * * @param omRequest the write request - * @param termIndex - ratis transaction term and index + * @param context - context containing ratis term index and index * @return OMClientResponse */ - OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException; + OMClientResponse handleWriteRequestImpl(OMRequest omRequest, ExecutionContext context) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index eb13f97d237..6b9f93e08c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; @@ -316,7 +317,8 @@ private OMClientResponse deleteBucket(String volumeName, String bucketName, new OMBucketDeleteRequest(omRequest); final TermIndex termIndex = TermIndex.valueOf(term, transactionID); - OMClientResponse omClientResponse = omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return omClientResponse; } @@ -459,7 +461,8 @@ private OMClientResponse createVolume(String volumeName, } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionId); - OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return omClientResponse; } @@ -485,7 +488,8 @@ private OMBucketCreateResponse createBucket(String volumeName, } final TermIndex termIndex = TermIndex.valueOf(term, transactionID); - OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return (OMBucketCreateResponse) omClientResponse; }