From 1774ca99da17f8702b0844bfed4b626bd018c895 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Tue, 17 Dec 2024 11:45:07 -0500 Subject: [PATCH 1/8] fix package name --- .../java/gov/nist/oar/distrib/cachemgr/pdr/package-info.java | 1 + .../oar/distrib/cachemgr/storage/FilesystemCacheVolumeTest.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/java/gov/nist/oar/distrib/cachemgr/pdr/package-info.java b/src/main/java/gov/nist/oar/distrib/cachemgr/pdr/package-info.java index 6ed8540f..972cac7c 100644 --- a/src/main/java/gov/nist/oar/distrib/cachemgr/pdr/package-info.java +++ b/src/main/java/gov/nist/oar/distrib/cachemgr/pdr/package-info.java @@ -1,3 +1,4 @@ +package gov.nist.oar.distrib.cachemgr.pdr; /* * This software was developed at the National Institute of Standards and Technology by employees of * the Federal Government in the course of their official duties. Pursuant to title 17 Section 105 diff --git a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/FilesystemCacheVolumeTest.java b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/FilesystemCacheVolumeTest.java index 4258e4d1..f2caeb20 100644 --- a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/FilesystemCacheVolumeTest.java +++ b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/FilesystemCacheVolumeTest.java @@ -11,7 +11,7 @@ * * @author: Raymond Plante */ -package gov.nist.oar.distrib.cachemgr.inventory; +package gov.nist.oar.distrib.cachemgr.storage; import org.junit.Test; import org.junit.Before; From 610584105b2643fcb7e04ae0f3bd03b86cf84330 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Tue, 17 Dec 2024 13:24:07 -0500 Subject: [PATCH 2/8] migrate aws from v1 to v2 --- pom.xml | 84 ++-- .../cachemgr/storage/AWSS3CacheVolume.java | 401 ++++++++++------- .../distrib/storage/AWSS3ClientProvider.java | 130 +++--- .../distrib/storage/AWSS3LongTermStorage.java | 403 +++++------------- .../oar/distrib/web/CacheManagerProvider.java | 6 +- .../distrib/web/NISTCacheManagerConfig.java | 8 +- .../distrib/web/NISTDistribServiceConfig.java | 101 +++-- .../web/RPACachingServiceProvider.java | 31 +- .../distrib/web/RPADataCachingController.java | 5 +- .../web/RPARequestHandlerController.java | 8 +- .../storage/AWSS3CacheVolumeTest.java | 375 +++++++++------- .../storage/AWSS3ClientProviderTest.java | 206 +++++---- .../storage/AWSS3LongTermStorageTest.java | 163 ++++--- .../distrib/web/CacheVolumeConfigTest.java | 4 - 14 files changed, 996 insertions(+), 929 deletions(-) diff --git a/pom.xml b/pom.xml index 9e067385..9ffb1efa 100644 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,7 @@ - + 4.0.0 oar-dist-service oar-dist-service @@ -13,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 2.1.0.RELEASE + 2.1.0.RELEASE @@ -21,7 +22,7 @@ UTF-8 Greenwich.RELEASE - + java,xml,css,web jacoco @@ -55,15 +56,18 @@ spring-cloud-aws-context --> + - com.amazonaws - aws-java-sdk-core - 1.12.100 + software.amazon.awssdk + s3 + 2.25.16 + + - com.amazonaws - aws-java-sdk-s3 - 1.12.261 + software.amazon.awssdk + aws-sdk-java + 2.29.35 @@ -90,6 +94,12 @@ org.springframework.boot spring-boot-starter-test test + + + com.vaadin.external.google + android-json + + org.springframework.boot @@ -97,53 +107,53 @@ - org.springframework - spring-test - test + org.springframework + spring-test + test org.springframework.data spring-data-commons - + org.springframework.boot spring-boot-starter-security - + commons-io commons-io - 2.8.0 + 2.17.0 - org.apache.commons - commons-lang3 - 3.12.0 + org.apache.commons + commons-lang3 + 3.12.0 - org.apache.commons - commons-lang3 - 3.8.1 + org.apache.commons + commons-lang3 + 3.8.1 - javax.inject - javax.inject - 1 + javax.inject + javax.inject + 1 - org.xerial - sqlite-jdbc - 3.41.2.2 + org.xerial + sqlite-jdbc + 3.41.2.2 - org.springdoc + org.springdoc springdoc-openapi-ui 1.5.10 - + org.springframework.boot spring-boot-devtools @@ -211,7 +221,7 @@ ${java.version} -Xlint:deprecation - -Xlint:unchecked + -Xlint:unchecked @@ -246,7 +256,7 @@ org.apache.maven.plugins maven-surefire-plugin - + ${surefireArgLine} -Djava.io.tmpdir=${basedir}/target/tmp @@ -255,8 +265,8 @@ 0 - ${user.home}/spm - ${project.basedir}/src/test/resources + ${user.home}/spm + ${project.basedir}/src/test/resources false @@ -327,13 +337,13 @@ org.apache.maven.plugins maven-javadoc-plugin - + protected ${basedir}/src/main/etc/stylesheet.css - + - + \ No newline at end of file diff --git a/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java b/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java index 7af0235c..6e5e9974 100644 --- a/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java +++ b/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java @@ -14,6 +14,20 @@ package gov.nist.oar.distrib.cachemgr.storage; import gov.nist.oar.distrib.cachemgr.CacheVolume; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; import gov.nist.oar.distrib.cachemgr.CacheObject; import gov.nist.oar.distrib.StorageVolumeException; import gov.nist.oar.distrib.StorageStateException; @@ -24,23 +38,10 @@ import java.io.ByteArrayInputStream; import java.io.FileNotFoundException; import java.net.URL; +import java.time.Duration; import java.net.MalformedURLException; import org.json.JSONObject; -import org.json.JSONException; -import org.slf4j.LoggerFactory; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.HeadBucketRequest; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.TransferManagerBuilder; -import com.amazonaws.services.s3.transfer.Upload; /** * an implementation of the CacheVolume interface that stores its data @@ -56,7 +57,7 @@ public class AWSS3CacheVolume implements CacheVolume { public final String bucket; public final String folder; public final String name; - protected AmazonS3 s3client = null; + protected S3Client s3client = null; protected String baseurl = null; /** @@ -73,15 +74,15 @@ public class AWSS3CacheVolume implements CacheVolume { * not be automatically inserted; if a slash is needed, * it should be included as part of this base URL. * @throws FileNotFoundException if the specified bucket does not exist - * @throws AmazonServiceException if there is a problem accessing the S3 service. While + * @throws SdkServiceException if there is a problem accessing the S3 service. While * this is a runtime exception that does not have to be caught * by the caller, catching it is recommended to address * connection problems early. * @throws MalformedURLException if the given redirectBaseURL cannot be used to form * legal URLs */ - public AWSS3CacheVolume(String bucketname, String folder, AmazonS3 s3, String redirectBaseURL) - throws FileNotFoundException, AmazonServiceException, MalformedURLException + public AWSS3CacheVolume(String bucketname, String folder, S3Client s3, String redirectBaseURL) + throws FileNotFoundException, SdkServiceException, MalformedURLException { this(bucketname, folder, null, s3, redirectBaseURL); } @@ -101,15 +102,15 @@ public AWSS3CacheVolume(String bucketname, String folder, AmazonS3 s3, String re * not be automatically inserted; if a slash is needed, * it should be included as part of this base URL. * @throws FileNotFoundException if the specified bucket does not exist - * @throws AmazonServiceException if there is a problem accessing the S3 service. While + * @throws SdkServiceException if there is a problem accessing the S3 service. While * this is a runtime exception that does not have to be caught * by the caller, catching it is recommended to address * connection problems early. * @throws MalformedURLException if the given redirectBaseURL cannot be used to form * legal URLs */ - public AWSS3CacheVolume(String bucketname, String folder, String name, AmazonS3 s3, String redirectBaseURL) - throws FileNotFoundException, AmazonServiceException, MalformedURLException + public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client s3, String redirectBaseURL) + throws FileNotFoundException, S3Exception, MalformedURLException { this(bucketname, folder, name, s3); @@ -128,13 +129,13 @@ public AWSS3CacheVolume(String bucketname, String folder, String name, AmazonS3 * root of the bucket. * @param s3 the AmazonS3 client instance to use to access the bucket * @throws FileNotFoundException if the specified bucket does not exist - * @throws AmazonServiceException if there is a problem accessing the S3 service. While + * @throws SdkServiceException if there is a problem accessing the S3 service. While * this is a runtime exception that does not have to be caught * by the caller, catching it is recommended to address * connection problems early. */ - public AWSS3CacheVolume(String bucketname, String folder, AmazonS3 s3) - throws FileNotFoundException, AmazonServiceException + public AWSS3CacheVolume(String bucketname, String folder, S3Client s3) + throws FileNotFoundException, SdkServiceException { this(bucketname, folder, null, s3); } @@ -149,38 +150,53 @@ public AWSS3CacheVolume(String bucketname, String folder, AmazonS3 s3) * @param name a name to refer to this volume by * @param s3 the AmazonS3 client instance to use to access the bucket * @throws FileNotFoundException if the specified bucket does not exist - * @throws AmazonServiceException if there is a problem accessing the S3 service. While + * @throws SdkServiceException if there is a problem accessing the S3 service. While * this is a runtime exception that does not have to be caught * by the caller, catching it is recommended to address * connection problems early. */ - public AWSS3CacheVolume(String bucketname, String folder, String name, AmazonS3 s3) - throws FileNotFoundException, AmazonServiceException - { + public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client s3) + throws FileNotFoundException { bucket = bucketname; - if (folder != null && folder.length() == 0) + + if (folder != null && folder.length() == 0) { folder = null; + } this.folder = folder; s3client = s3; - // does bucket exist? + // Check if the bucket exists try { - s3client.headBucket(new HeadBucketRequest(bucket)); - } - catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new FileNotFoundException("Not an existing bucket: "+bucket+ - " ("+ex.getMessage()+")"); + s3client.headBucket(HeadBucketRequest.builder().bucket(bucket).build()); + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + throw new FileNotFoundException("Not an existing bucket: " + bucket + " (" + ex.getMessage() + ")"); + } throw ex; } - // does folder exist in the bucket? - if (! s3client.doesObjectExist(bucket, folder+"/")) - throw new FileNotFoundException("Not an existing folder in "+bucket+" bucket: "+folder); + // Check if the folder exists (folder is a zero-byte object with a trailing '/') + if (folder != null) { + String folderKey = folder.endsWith("/") ? folder : folder + "/"; + try { + s3client.headObject(HeadObjectRequest.builder() + .bucket(bucket) + .key(folderKey) + .build()); + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + throw new FileNotFoundException("Not an existing folder in " + bucket + " bucket: " + folder); + } + throw ex; + } + } + // Set the name field if (name == null) { name = "s3:/" + bucket + "/"; - if (folder != null) name += folder + "/"; + if (folder != null) { + name += folder + "/"; + } } this.name = name; } @@ -204,9 +220,19 @@ private String s3name(String name) { */ public boolean exists(String name) throws StorageVolumeException { try { - return s3client.doesObjectExist(bucket, s3name(name)); - } catch (AmazonServiceException ex) { - throw new StorageVolumeException("Trouble accessing bucket "+bucket+": "+ex.getMessage(), ex); + // Use headObject to check if the object exists + s3client.headObject(HeadObjectRequest.builder() + .bucket(bucket) + .key(s3name(name)) + .build()); + return true; // If no exception, the object exists + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + return false; // Object does not exist + } + throw new StorageVolumeException("Trouble accessing bucket " + bucket + ": " + ex.getMessage(), ex); + } catch (Exception ex) { + throw new StorageVolumeException("Unexpected error checking object existence: " + ex.getMessage(), ex); } } @@ -235,86 +261,72 @@ public boolean exists(String name) throws StorageVolumeException { * and must include the object size. * @throws StorageVolumeException if the method fails to save the object correctly. */ - public void saveAs(InputStream from, String name, JSONObject md) - throws StorageVolumeException - { - if (name == null || name.length() == 0) + public void saveAs(InputStream from, String name, JSONObject md) throws StorageVolumeException { + if (name == null || name.isEmpty()) { throw new IllegalArgumentException("AWSS3CacheVolume.saveAs(): must provide name"); + } + long size = -1L; - String ct = null, cmd5 = null; + String contentType = null; + String contentMD5 = null; + + // Extract metadata if (md != null) { try { size = md.getLong("size"); - } catch (JSONException ex) { } - try { - ct = md.getString("contentType"); - } catch (JSONException ex) { } - try { - cmd5 = md.getString("contentMD5"); - } catch (JSONException ex) { } + } catch (Exception e) { + // ignore, size is required + } + contentType = md.optString("contentType", null); + contentMD5 = md.optString("contentMD5", null); + } + + if (size <= 0) { + throw new IllegalArgumentException("AWSS3CacheVolume.saveAs(): metadata must include size property"); } - if (size < 0) - throw new IllegalArgumentException("AWSS3CacheVolume.saveAs(): metadata must be provided with " + - "size property"); - - // set some metadata for the object - ObjectMetadata omd = new ObjectMetadata(); - omd.setContentLength(size); // required - if (ct != null) - omd.setContentType(ct); // for redirect web server - if (cmd5 != null) - omd.setContentMD5(cmd5); // for on-the-fly checksum checking - - // set the name to download as (for benefit of redirect web server) - if (name.endsWith("/")) name = name.substring(0, name.length()-1); - String[] nmflds = name.split("/"); - omd.setContentDisposition(nmflds[nmflds.length-1]); - - Upload uplstat = null; + try { - TransferManager trxmgr = TransferManagerBuilder.standard().withS3Client(s3client) - .withMultipartUploadThreshold(200000000L) - .withMinimumUploadPartSize(100000000L) - .build(); - uplstat = trxmgr.upload(bucket, s3name(name), from, omd); - uplstat.waitForUploadResult(); - } catch (InterruptedException ex) { - throw new StorageVolumeException("Upload interrupted for object, " + s3name(name) + - ", to s3:/"+bucket+": " + ex.getMessage(), ex); - } catch (AmazonServiceException ex) { - throw new StorageVolumeException("Failure to save object, " + s3name(name) + - ", to s3:/"+bucket+": " + ex.getMessage(), ex); - } catch (AmazonClientException ex) { - if (ex.getMessage().contains("verify integrity") && ex.getMessage().contains("contentMD5")) { - // unfortunately this is how we identify a checksum error - // clean-up badly transfered file. - try { remove(name); } - catch (StorageVolumeException e) { } - throw new StorageVolumeException("Failure to save object, " + s3name(name) + - ", to s3:/"+bucket+": md5 transfer checksum failed"); + // Prepare the PutObjectRequest + PutObjectRequest.Builder putRequestBuilder = PutObjectRequest.builder() + .bucket(bucket) + .key(s3name(name)) + .contentLength(size); + + if (contentType != null) { + putRequestBuilder.contentType(contentType); } - if (ex.getMessage().contains("dataLength=") && ex.getMessage().contains("expectedLength=")) { - throw new StorageVolumeException("Failure to transfer correct number of bytes for " + - s3name(name) + " to s3:/"+bucket+" ("+ex.getMessage()+")."); + if (contentMD5 != null) { + putRequestBuilder.contentMD5(contentMD5); } - throw new StorageVolumeException("AWS client error: "+ex.getMessage()+"; object status unclear"); - } - - if (md != null) { - try { + + // Add Content-Disposition header (e.g., file name for web servers) + if (name.endsWith("/")) { + name = name.substring(0, name.length() - 1); + } + String[] nameFields = name.split("/"); + putRequestBuilder.contentDisposition(nameFields[nameFields.length - 1]); + + // Perform the upload + s3client.putObject(putRequestBuilder.build(), RequestBody.fromInputStream(from, size)); + + // Update metadata if provided + if (md != null) { CacheObject co = get(name); - long mod = co.getLastModified(); - if (mod > 0L) - md.put("modified", mod); - if (co.hasMetadatum("volumeChecksum")) + long modifiedTime = co.getLastModified(); + if (modifiedTime > 0L) { + md.put("modified", modifiedTime); + } + if (co.hasMetadatum("volumeChecksum")) { md.put("volumeChecksum", co.getMetadatumString("volumeChecksum", " ")); + } } - catch (ObjectNotFoundException ex) { - throw new StorageStateException("Upload apparently failed: "+ex.getMessage(), ex); - } - catch (StorageVolumeException ex) { - throw new StorageStateException("Uploaded object status unclear: "+ex.getMessage(), ex); + } catch (S3Exception e) { + if (e.awsErrorDetails() != null && e.awsErrorDetails().errorCode().equals("InvalidDigest")) { + throw new StorageVolumeException("MD5 checksum mismatch for object: " + s3name(name), e); } + throw new StorageVolumeException("Failed to upload object: " + s3name(name) + " (" + e.getMessage() + ")", e); + } catch (Exception e) { + throw new StorageVolumeException("Unexpected error saving object " + s3name(name) + ": " + e.getMessage(), e); } } @@ -361,15 +373,25 @@ public synchronized void saveAs(CacheObject obj, String name) throws StorageVolu * named object */ public InputStream getStream(String name) throws StorageVolumeException { - String use = s3name(name); + String key = s3name(name); try { - GetObjectRequest getObjectRequest = new GetObjectRequest(bucket, use); - S3Object s3Object = s3client.getObject(getObjectRequest); - return s3Object.getObjectContent(); - } catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new ObjectNotFoundException("Object not found: s3:/"+bucket+"/"+use, this.getName()); - throw new StorageStateException("Trouble accessing "+name+": "+ex.getMessage(), ex); + // Create a GetObjectRequest + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + // Get the object as a stream + ResponseInputStream s3InputStream = s3client.getObject(getObjectRequest); + + return s3InputStream; + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + throw new ObjectNotFoundException("Object not found: s3:/" + bucket + "/" + key, this.getName()); + } + throw new StorageStateException("Trouble accessing " + name + ": " + ex.getMessage(), ex); + } catch (Exception ex) { + throw new StorageVolumeException("Unexpected error accessing " + name + ": " + ex.getMessage(), ex); } } @@ -380,25 +402,36 @@ public InputStream getStream(String name) throws StorageVolumeException { * volume */ public CacheObject get(String name) throws StorageVolumeException { - String use = s3name(name); - ObjectMetadata omd = null; + String key = s3name(name); try { - omd = s3client.getObjectMetadata(bucket, use); - } catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new ObjectNotFoundException("Object not found: s3:/"+bucket+"/"+use, this.getName()); - throw new StorageStateException("Trouble accessing "+name+": "+ex.getMessage(), ex); + // Use headObject to fetch metadata + HeadObjectRequest headRequest = HeadObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + HeadObjectResponse headResponse = s3client.headObject(headRequest); + + // Extract metadata + JSONObject md = new JSONObject(); + md.put("size", headResponse.contentLength()); + md.put("contentType", headResponse.contentType()); + md.put("modified", headResponse.lastModified().toEpochMilli()); + md.put("volumeChecksum", "etag " + headResponse.eTag()); + + return new CacheObject(name, md, this); + + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + throw new ObjectNotFoundException("Object not found: s3:/" + bucket + "/" + key, this.getName()); + } + throw new StorageStateException("Trouble accessing " + name + ": " + ex.getMessage(), ex); + } catch (Exception ex) { + throw new StorageVolumeException("Unexpected error accessing " + name + ": " + ex.getMessage(), ex); } - - JSONObject md = new JSONObject(); - md.put("size", omd.getContentLength()); - md.put("contentType", omd.getContentType()); - md.put("modified", omd.getLastModified().getTime()); - md.put("volumeChecksum", "etag " + omd.getETag()); - - return new CacheObject(name, md, this); } + /** * remove the object with the give name from this storage volume * @param name the name of the object to get @@ -408,14 +441,24 @@ public CacheObject get(String name) throws StorageVolumeException { * remove the Object */ public boolean remove(String name) throws StorageVolumeException { - String use = s3name(name); + String key = s3name(name); try { - s3client.deleteObject(bucket, use); - return true; - } catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - return false; - throw new StorageStateException("Trouble accessing "+name+": "+ex.getMessage(), ex); + // Create the delete object request + DeleteObjectRequest deleteRequest = DeleteObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + // Delete the object + s3client.deleteObject(deleteRequest); + return true; // If no exception, the object was successfully deleted + } catch (S3Exception ex) { + if (ex.statusCode() == 404) { + return false; // Object not found, return false + } + throw new StorageStateException("Trouble deleting " + name + ": " + ex.getMessage(), ex); + } catch (Exception ex) { + throw new StorageVolumeException("Unexpected error deleting object " + name + ": " + ex.getMessage(), ex); } } @@ -434,28 +477,42 @@ public boolean remove(String name) throws StorageVolumeException { * @throws UnsupportedOperationException always as this function is not supported */ public URL getRedirectFor(String name) throws StorageVolumeException, UnsupportedOperationException { - if (baseurl == null) + if (baseurl == null) { throw new UnsupportedOperationException("AWSS3CacheVolume: getRedirectFor not supported"); + } if (exists(name)) { try { - return s3client.getUrl(bucket, s3name(name)); - } - catch (AmazonServiceException ex) { - throw new StorageVolumeException("Failed to determine redirect URL for name="+name+": "+ - ex.getMessage(), ex); + // Generate a presigned URL using S3Presigner + // S3Presigner replaces getUrl for presigned URL generation + try (S3Presigner presigner = S3Presigner.create()) { + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .bucket(bucket) + .key(s3name(name)) + .build(); + + GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder() + .signatureDuration(Duration.ofMinutes(15)) // URL validity duration + .getObjectRequest(getObjectRequest) + .build(); + + URL presignedUrl = presigner.presignGetObject(presignRequest).url(); + return presignedUrl; + } + } catch (S3Exception ex) { + throw new StorageVolumeException("Failed to determine redirect URL for name=" + name + ": " + + ex.getMessage(), ex); } - } - else { + } else { try { return new URL(baseurl + name.replace(" ", "%20")); - } - catch (MalformedURLException ex) { - throw new StorageVolumeException("Failed to form legal URL: "+ex.getMessage(), ex); + } catch (MalformedURLException ex) { + throw new StorageVolumeException("Failed to form legal URL: " + ex.getMessage(), ex); } } } + /** * create a folder/subdirectory in a bucket if it already doesn't exist * @@ -463,21 +520,35 @@ public URL getRedirectFor(String name) throws StorageVolumeException, Unsupporte * @param folder the name of the folder to ensure exists * @param s3 the authenticated AmazonS3 client to use to access the bucket */ - public static boolean ensureBucketFolder(AmazonS3 s3, String bucketname, String folder) - throws AmazonServiceException - { - if (! folder.endsWith("/")) folder += "/"; - if (! s3.doesObjectExist(bucketname, folder)) { - ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(0); - InputStream mt = new ByteArrayInputStream(new byte[0]); - try { - s3.putObject(bucketname, folder, mt, md); - return true; - } finally { - try { mt.close(); } catch (IOException ex) { } + public static boolean ensureBucketFolder(S3Client s3, String bucketname, String folder) throws S3Exception { + if (!folder.endsWith("/")) { + folder += "/"; + } + + try { + // Check if the folder exists by calling headObject + s3.headObject(HeadObjectRequest.builder() + .bucket(bucketname) + .key(folder) + .build()); + return false; // Folder already exists + } catch (S3Exception ex) { + if (ex.statusCode() != 404) { + throw ex; // Re-throw exception if it's not a 404 (Not Found) error } } - return false; + + // Folder does not exist, create it as a zero-byte object + try (InputStream emptyContent = new ByteArrayInputStream(new byte[0])) { + s3.putObject(PutObjectRequest.builder() + .bucket(bucketname) + .key(folder) + .contentLength(0L) + .build(), + RequestBody.fromInputStream(emptyContent, 0)); + return true; // Folder created successfully + } catch (Exception e) { + throw new RuntimeException("Failed to create folder in bucket " + bucketname + ": " + e.getMessage(), e); + } } } diff --git a/src/main/java/gov/nist/oar/distrib/storage/AWSS3ClientProvider.java b/src/main/java/gov/nist/oar/distrib/storage/AWSS3ClientProvider.java index 981e5144..da30e69f 100644 --- a/src/main/java/gov/nist/oar/distrib/storage/AWSS3ClientProvider.java +++ b/src/main/java/gov/nist/oar/distrib/storage/AWSS3ClientProvider.java @@ -12,14 +12,16 @@ */ package gov.nist.oar.distrib.storage; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; +import java.net.URI; -import org.slf4j.LoggerFactory; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.S3Configuration; /** * A wrapper for an AmazonS3Client that regulates is resource usage. @@ -33,104 +35,100 @@ */ public class AWSS3ClientProvider implements Cloneable { - protected AmazonS3 s3 = null; - protected String reg = null; - protected AWSCredentialsProvider credpro = null; - protected int acclim = 25; + protected S3Client s3 = null; + protected String region = null; + protected AwsCredentialsProvider credProvider = null; + protected int accessLimit = 25; protected int accesses = 0; - private String ep = null; + private String endpoint = null; + static Logger log = LoggerFactory.getLogger(AWSS3ClientProvider.class); /** - * create the provider - * @param creds the credentials to use when recreating the client - * @param region the AWS region to connect to - * @param accessLimit the maximum number of accesses allowed before the client is recreated; - * a non-positive number will result in a new client with every access. + * Create the provider. + * @param creds The credentials to use for S3. + * @param region The AWS region to connect to. + * @param accessLimit Maximum number of accesses before recreating the client. */ - public AWSS3ClientProvider(AWSCredentialsProvider creds, String region, int accessLimit) { + public AWSS3ClientProvider(AwsCredentialsProvider creds, String region, int accessLimit) { this(creds, region, accessLimit, null); } /** - * create the provider. This constructor is intended for use with an mock S3 service for - * testing purposes. - * @param creds the credentials to use when recreating the client - * @param region the AWS region to connect to - * @param accessLimit the maximum number of accesses allowed before the client is recreated; - * a non-positive number will result in a new client with every access. - * @param endpoint the endpoint to use for the AWS s3 service + * Create the provider with a custom endpoint (e.g., for testing with a mock S3 service). + * @param creds The credentials to use for S3. + * @param region The AWS region to connect to. + * @param accessLimit Maximum number of accesses before recreating the client. + * @param endpoint The custom endpoint URL. */ - public AWSS3ClientProvider(AWSCredentialsProvider creds, String region, - int accessLimit, String endpoint) - { - credpro = creds; - reg = region; - acclim = accessLimit; - ep = endpoint; + public AWSS3ClientProvider(AwsCredentialsProvider creds, String region, int accessLimit, String endpoint) { + this.credProvider = creds; + this.region = region; + this.accessLimit = accessLimit; + this.endpoint = endpoint; refresh(); } - - /** - * return the maximum number of accesses allowed before the client is recreated - */ - public int getAccessLimit() { return acclim; } /** - * return the S3 client + * Return the S3 client. */ - public synchronized AmazonS3 client() { - if (accesses >= acclim || s3 == null) + public synchronized S3Client client() { + if (accesses >= accessLimit || s3 == null) { refresh(); + } accesses++; return s3; } /** - * free up the client resources and recreate the client + * Refresh the S3 client (recreate it). */ public synchronized void refresh() { - /* - if (s3 != null) - s3.shutdown(); - */ - log.info("FYI: Refreshing the S3 client"); - AmazonS3ClientBuilder bldr = AmazonS3Client.builder().standard() - .withCredentials(credpro); - if (ep == null) - bldr.withRegion(reg); - else - bldr.withEndpointConfiguration(new EndpointConfiguration(ep, reg)) - .enablePathStyleAccess(); - s3 = bldr.build(); + log.info("Refreshing the S3 client"); + S3ClientBuilder builder = S3Client.builder() + .credentialsProvider(credProvider) + .region(Region.of(region)); - accesses = 0; - } + if (endpoint != null) { + builder.endpointOverride(URI.create(endpoint)) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()); + } - /** - * return the number of accesses are left before the client is refreshed - */ - public int accessesLeft() { - if (s3 == null) - return 0; - return acclim - accesses; + if (s3 != null) { + s3.close(); + } + + s3 = builder.build(); + accesses = 0; } /** - * free up resources by shutting down the client + * Shut down the S3 client and free resources. */ public synchronized void shutdown() { if (s3 != null) { - s3.shutdown(); + s3.close(); s3 = null; } accesses = 0; } + /** + * Return the number of accesses left before a refresh is triggered. + */ + public int accessesLeft() { + if (s3 == null) return 0; + return accessLimit - accesses; + } + + @Override public Object clone() { - return new AWSS3ClientProvider(credpro, reg, acclim, ep); + return new AWSS3ClientProvider(credProvider, region, accessLimit, endpoint); } + public AWSS3ClientProvider cloneMe() { return (AWSS3ClientProvider) clone(); } -} +} \ No newline at end of file diff --git a/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java b/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java index 432732c3..d726b530 100644 --- a/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java +++ b/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java @@ -16,34 +16,28 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.io.FilterInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; -import java.util.regex.Pattern; -import java.util.function.Consumer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.HeadBucketRequest; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ListObjectsV2Request; -import com.amazonaws.services.s3.model.ListObjectsV2Result; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; +import gov.nist.oar.bags.preservation.BagUtils; import gov.nist.oar.distrib.Checksum; -import gov.nist.oar.distrib.LongTermStorage; -import gov.nist.oar.distrib.storage.PDRBagStorageBase; import gov.nist.oar.distrib.ResourceNotFoundException; -import gov.nist.oar.distrib.StorageVolumeException; import gov.nist.oar.distrib.StorageStateException; -import gov.nist.oar.bags.preservation.BagUtils; +import gov.nist.oar.distrib.StorageVolumeException; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.S3Object; /** @@ -53,336 +47,169 @@ */ public class AWSS3LongTermStorage extends PDRBagStorageBase { - public static long defaultChecksumSizeLimit = 50000000L; // 50 MB + public static long defaultChecksumSizeLimit = 50_000_000L; // 50 MB public final String bucket; - protected AmazonS3 s3client = null; - protected Integer pagesz = null; // null means use default page size (1000) + protected S3Client s3client; + protected Integer pagesz = null; // null means use default page size private long checksumSizeLim = defaultChecksumSizeLimit; - /** - * set the number of objects returned in a page of listing results. This can be used for testing. - * A null value means use the AWS default. - */ - public void setPageSize(Integer sz) { pagesz = sz; } + public void setPageSize(Integer sz) { + this.pagesz = sz; + } - /** - * create the storage instance - * @param bucketname the name of the S3 bucket that provides the storage for this interface - * @param s3 the AmazonS3 client instance to use to access the bucket - * @throws FileNotFoundException if the specified bucket does not exist - * @throws AmazonServiceException if there is a problem accessing the S3 service. While - * this is a runtime exception that does not have to be caught - * by the caller, catching it is recommended to address - * connection problems early. - */ - public AWSS3LongTermStorage(String bucketname, AmazonS3 s3) - throws FileNotFoundException, AmazonServiceException - { + public AWSS3LongTermStorage(String bucketname, S3Client s3Client) + throws FileNotFoundException, StorageVolumeException { super(bucketname); - bucket = bucketname; - s3client = s3; + this.bucket = bucketname; + this.s3client = s3Client; - // does bucket exist? + // Check if bucket exists try { - s3client.headBucket(new HeadBucketRequest(bucket)); + s3client.headBucket(HeadBucketRequest.builder().bucket(bucketname).build()); + } catch (NoSuchBucketException ex) { + throw new FileNotFoundException("Bucket not found: " + bucketname); + } catch (S3Exception ex) { + throw new StorageStateException("Error accessing bucket: " + bucketname, ex); } - catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new FileNotFoundException("Not an existing bucket: "+bucket+ - "("+ex.getMessage()+")"); - throw ex; - } - logger.info("Creating AWSS3LongTermStorage for the bucket, " + bucket); + logger.info("Initialized AWSS3LongTermStorage for bucket: {}", bucket); } - /** - * return true if a file with the given name exists in the storage - * @param filename The name of the desired file. Note that this does not refer to files that - * may reside inside a serialized bag or other archive (e.g. zip) file. - */ @Override public boolean exists(String filename) throws StorageVolumeException { try { - return s3client.doesObjectExist(bucket, filename); - } catch (AmazonServiceException ex) { - throw new StorageStateException("Trouble accessing bucket "+bucket+": "+ex.getMessage(), ex); + s3client.headObject(HeadObjectRequest.builder().bucket(bucket).key(filename).build()); + return true; + } catch (NoSuchKeyException ex) { + return false; + } catch (S3Exception ex) { + throw new StorageStateException("Error checking existence of " + filename + ": " + ex.getMessage(), ex); } } - /** - * Given an exact file name in the storage, return an InputStream open at the start of the file - * @param filename The name of the desired file. Note that this does not refer to files that - may reside inside a serialized bag or other archive (e.g. zip) file. - * @return InputStream - open at the start of the file - * @throws FileNotFoundException if the file with the given filename does not exist - */ @Override public InputStream openFile(String filename) throws FileNotFoundException, StorageVolumeException { try { - GetObjectRequest getObjectRequest = new GetObjectRequest(bucket, filename); - S3Object s3Object = s3client.getObject(getObjectRequest); - return new DrainingInputStream(s3Object, logger, filename); - } catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new FileNotFoundException("File not found in S3 bucket: "+filename); - throw new StorageStateException("Trouble accessing "+filename+": "+ex.getMessage(), ex); + GetObjectRequest request = GetObjectRequest.builder().bucket(bucket).key(filename).build(); + ResponseInputStream response = s3client.getObject(request); + return response; + } catch (NoSuchKeyException ex) { + throw new FileNotFoundException("File not found in S3 bucket: " + filename); + } catch (S3Exception ex) { + throw new StorageStateException("Error accessing " + filename + ": " + ex.getMessage(), ex); } } - /** - * return the checksum for the given file - * @param filename The name of the desired file. Note that this does not refer to files that - * may reside inside a serialized bag or other archive (e.g. zip) file. - * @return Checksum, a container for the checksum value - * @throws FileNotFoundException if the file with the given filename does not exist - */ @Override public Checksum getChecksum(String filename) throws FileNotFoundException, StorageVolumeException { - S3Object s3Object = null; - GetObjectRequest getObjectRequest = null; - try { - getObjectRequest = new GetObjectRequest(bucket, filename+".sha256"); - s3Object = s3client.getObject(getObjectRequest); - } - catch (AmazonServiceException ex) { - if (ex.getStatusCode() != 404) - throw new StorageStateException("Trouble accessing "+filename+": "+ex.getMessage(), ex); - } - - if (s3Object == null) { - // no cached checksum, calculate it the file is not too big - if (! filename.endsWith(".sha256")) - logger.warn("No cached checksum available for "+filename); - - if (getSize(filename) > checksumSizeLim) // 10x smaller limit than for local files - throw new StorageStateException("No cached checksum for large file: "+filename); - - // ok, calculate it on the fly - try (InputStream is = openFile(filename)) { - return Checksum.calcSHA256(is); - } catch (Exception ex) { - throw new StorageStateException("Unable to calculate checksum for small file: " + - filename + ": " + ex.getMessage()); + String checksumKey = filename + ".sha256"; + try (InputStream is = openFile(checksumKey); + BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { + String checksumValue = reader.readLine(); + return Checksum.sha256(checksumValue); + } catch (FileNotFoundException ex) { + if (getSize(filename) > checksumSizeLim) { + throw new StorageStateException("No cached checksum for large file: " + filename); } - } - - try (InputStreamReader csrdr = new InputStreamReader(s3Object.getObjectContent())) { - return Checksum.sha256(readHash(csrdr)); - } - catch (IOException ex) { - throw new StorageStateException("Failed to read cached checksum value from "+ filename+".sha256" + - ": " + ex.getMessage(), ex); - } - finally { - try { s3Object.close(); } - catch (IOException ex) { - logger.warn("Trouble closing S3Object (double close?): "+ex.getMessage()); + try (InputStream fileStream = openFile(filename)) { + return Checksum.calcSHA256(fileStream); + } catch (IOException e) { + throw new StorageStateException("Unable to calculate checksum: " + filename, e); } + } catch (IOException ex) { + throw new StorageStateException("Error reading checksum for " + filename, ex); } } - /** - * Return the size of the named file in bytes - * @param filename The name of the desired file. Note that this does not refer to files that - * may reside inside a serialized bag or other archive (e.g. zip) file. - * @return long, the size of the file in bytes - * @throws FileNotFoundException if the file with the given filename does not exist - */ @Override public long getSize(String filename) throws FileNotFoundException, StorageVolumeException { try { - return s3client.getObjectMetadata(bucket, filename).getContentLength(); - } catch (AmazonServiceException ex) { - if (ex.getStatusCode() == 404) - throw new FileNotFoundException("File not found in S3 bucket: "+filename); - throw new StorageStateException("Trouble accessing "+filename+": "+ex.getMessage(), ex); + HeadObjectRequest request = HeadObjectRequest.builder().bucket(bucket).key(filename).build(); + HeadObjectResponse response = s3client.headObject(request); + return response.contentLength(); + } catch (NoSuchKeyException ex) { + throw new FileNotFoundException("File not found in S3 bucket: " + filename); + } catch (S3Exception ex) { + throw new StorageStateException("Error retrieving size for " + filename + ": " + ex.getMessage(), ex); } } protected ListObjectsV2Request createListRequest(String keyprefix, Integer pagesize) { - return new ListObjectsV2Request().withBucketName(bucket) - .withPrefix(keyprefix) - .withMaxKeys(pagesize); + ListObjectsV2Request.Builder builder = ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(keyprefix); + if (pagesize != null) { + builder.maxKeys(pagesize); + } + return builder.build(); } - - /** - * Return all the bags associated with the given ID - * @param identifier the AIP identifier for the desired data collection - * @return List, the file names for all bags associated with given ID - * @throws ResourceNotFoundException if there exist no bags with the given identifier - */ @Override public List findBagsFor(String identifier) - throws ResourceNotFoundException, StorageVolumeException - { - // Because of S3 result paging, we need a specialized implementation of this method - - ListObjectsV2Result objectListing = null; - List files = null; - List filenames = new ArrayList(); + throws ResourceNotFoundException, StorageVolumeException { + List filenames = new ArrayList<>(); + ListObjectsV2Request request = createListRequest(identifier + ".", pagesz); - ListObjectsV2Request req = createListRequest(identifier+".", pagesz); - do { - try { - objectListing = s3client.listObjectsV2(req); - files = objectListing.getObjectSummaries(); - } catch (AmazonServiceException ex) { - throw new StorageStateException("Trouble accessing bucket, "+bucket+": "+ex.getMessage(),ex); - } - - for(S3ObjectSummary f : files) { - String name = f.getKey(); - if (! name.endsWith(".sha256") && BagUtils.isLegalBagName(name)) - filenames.add(name); - } - - req.setContinuationToken(objectListing.getNextContinuationToken()); + try { + ListObjectsV2Response response; + do { + response = s3client.listObjectsV2(request); + for (S3Object obj : response.contents()) { + String name = obj.key(); + if (!name.endsWith(".sha256") && BagUtils.isLegalBagName(name)) { + filenames.add(name); + } + } + request = request.toBuilder().continuationToken(response.nextContinuationToken()).build(); + } while (response.isTruncated()); + } catch (S3Exception ex) { + throw new StorageStateException("Error accessing bucket: " + bucket, ex); } - while (objectListing.isTruncated()); // are there more pages to fetch? - if (filenames.size() == 0) + if (filenames.isEmpty()) { throw ResourceNotFoundException.forID(identifier); + } return filenames; } - /** - * Return the head bag associated with the given ID - * @param identifier the AIP identifier for the desired data collection - * @return String, the head bag's file name - * @throws ResourceNotFoundException if there exist no bags with the given identifier - */ - @Override - public String findHeadBagFor(String identifier) - throws ResourceNotFoundException, StorageStateException - { - return findHeadBagFor(identifier, null); - } - - /** - * Return the name of the head bag for the identifier for given version - * @param identifier the AIP identifier for the desired data collection - * @param version the desired version of the AIP; if null, assume the latest version. - * If the version is an empty string, the head bag for bags without a - * version designation will be selected. - * @return String, the head bag's file name, or null if version is not found - * @throws ResourceNotFoundException if there exist no bags with the given identifier or version - */ @Override public String findHeadBagFor(String identifier, String version) - throws ResourceNotFoundException, StorageStateException - { - // Because of S3 result paging, we need a specialized implementation of this method - - // Be efficient in selecting files via a key; if possible include a version designator - String prefix = identifier+"."; + throws ResourceNotFoundException, StorageStateException { + String prefix = identifier + "."; if (version != null) { - version = Pattern.compile("\\.").matcher(version).replaceAll("_"); - if (! Pattern.compile("^[01](_0)*$").matcher(version).find()) - prefix = prefix + Pattern.compile("(_0)+$").matcher(version).replaceAll(""); + version = version.replace(".", "_"); + prefix += version.replaceAll("(_0)+$", ""); } String selected = null; - int maxseq = -1; - ListObjectsV2Result objectListing = null; - List files = null; + int maxSeq = -1; + ListObjectsV2Request request = createListRequest(prefix, pagesz); - ListObjectsV2Request req = createListRequest(prefix, pagesz); - do { - try { - objectListing = s3client.listObjectsV2(req); - files = objectListing.getObjectSummaries(); - } catch (AmazonServiceException ex) { - throw new StorageStateException("Trouble accessing bucket, "+bucket+": "+ex.getMessage(),ex); - } - - if (! files.isEmpty()) { - int seq = -1; - for(S3ObjectSummary f : files) { - String name = f.getKey(); - if (! name.endsWith(".sha256") && BagUtils.isLegalBagName(name)) { - if (version != null && ! BagUtils.matchesVersion(name, version)) - continue; - seq = BagUtils.sequenceNumberIn(name); - if (seq > maxseq) { - maxseq = seq; + try { + ListObjectsV2Response response; + do { + response = s3client.listObjectsV2(request); + for (S3Object obj : response.contents()) { + String name = obj.key(); + if (!name.endsWith(".sha256") && BagUtils.isLegalBagName(name)) { + int seq = BagUtils.sequenceNumberIn(name); + if (seq > maxSeq) { + maxSeq = seq; selected = name; } } } - } - - req.setContinuationToken(objectListing.getNextContinuationToken()); + request = request.toBuilder().continuationToken(response.nextContinuationToken()).build(); + } while (response.isTruncated()); + } catch (S3Exception ex) { + throw new StorageStateException("Error accessing bucket: " + bucket, ex); } - while (objectListing.isTruncated()); // are there more pages to fetch? - if (selected == null) + if (selected == null) { throw ResourceNotFoundException.forID(identifier, version); - - return selected; - } - - /* - * AWS urges that opened S3 objects be fully streamed. - */ - static class DrainingInputStream extends FilterInputStream implements Runnable { - private Logger logger = null; - private String name = null; - private S3Object s3o = null; - - public DrainingInputStream(S3Object s3object, Logger log, String name) { - super(s3object.getObjectContent()); - s3o = s3object; - logger = log; - this.name = name; - } - public DrainingInputStream(S3Object s3object, Logger log) { - this(s3object, log, null); - } - - public void close() { - /* - * does not work under heavy load - * - Thread t = new Thread(this, "S3Object closer"); - t.start(); - */ - runClose(); } - public void run() { runClose(); } - - void runClose() { - long start = System.currentTimeMillis(); - String what = (name == null) ? "" : name+" "; - try { - byte[] buf = new byte[100000]; - int len = 0; - logger.debug("Draining {}S3 Object stream ({})", what, in.toString()); - while ((len = read(buf)) != -1) { /* fugetaboutit */ } - if (logger.isInfoEnabled()) { - String[] flds = in.toString().split("\\."); - logger.info("Drained {}S3 object stream ({}) in {} millseconds", what, - flds[flds.length-1], (System.currentTimeMillis() - start)); - } - } - catch (IOException ex) { - logger.warn("Trouble draining {}S3 object stream ({}): {}", - what, in.toString(), ex.getMessage()); - } - finally { - try { super.close(); } - catch (IOException ex) { - logger.warn("Trouble closing {}S3 object stream ({}): {}", - what, in.toString(), ex.getMessage()); - } - try { s3o.close(); } - catch (IOException ex) { - logger.warn("Trouble closing S3Object {}(double close?): "+ex.getMessage()); - } - } - } + return selected; } -} +} \ No newline at end of file diff --git a/src/main/java/gov/nist/oar/distrib/web/CacheManagerProvider.java b/src/main/java/gov/nist/oar/distrib/web/CacheManagerProvider.java index 53ab42a2..d3357797 100644 --- a/src/main/java/gov/nist/oar/distrib/web/CacheManagerProvider.java +++ b/src/main/java/gov/nist/oar/distrib/web/CacheManagerProvider.java @@ -16,6 +16,7 @@ import gov.nist.oar.distrib.service.CacheEnabledFileDownloadService; import gov.nist.oar.distrib.service.NerdmDrivenFromBagFileDownloadService; import gov.nist.oar.distrib.service.PreservationBagService; +import software.amazon.awssdk.services.s3.S3Client; import gov.nist.oar.distrib.cachemgr.CacheManagementException; import gov.nist.oar.distrib.cachemgr.BasicCache; import gov.nist.oar.distrib.cachemgr.pdr.PDRDatasetRestorer; @@ -28,7 +29,6 @@ import org.springframework.lang.Nullable; import org.slf4j.LoggerFactory; import org.slf4j.Logger; -import com.amazonaws.services.s3.AmazonS3; /** * A factory for creating the PDR's CacheManager that can work with the Spring framework's configuration @@ -44,14 +44,14 @@ public class CacheManagerProvider { private BagStorage bagstore = null; private HeadBagCacheManager hbcmgr = null; private PDRCacheManager cmgr = null; - private AmazonS3 s3client = null; + private S3Client s3client = null; /** * create the factory. * @param config the cache configuration data * @param bagstorage the long-term bag storage */ - public CacheManagerProvider(NISTCacheManagerConfig config, BagStorage bagstorage, AmazonS3 s3c) { + public CacheManagerProvider(NISTCacheManagerConfig config, BagStorage bagstorage, S3Client s3c) { cfg = config; bagstore = bagstorage; s3client = s3c; diff --git a/src/main/java/gov/nist/oar/distrib/web/NISTCacheManagerConfig.java b/src/main/java/gov/nist/oar/distrib/web/NISTCacheManagerConfig.java index 4684f5ae..36ffb375 100644 --- a/src/main/java/gov/nist/oar/distrib/web/NISTCacheManagerConfig.java +++ b/src/main/java/gov/nist/oar/distrib/web/NISTCacheManagerConfig.java @@ -19,6 +19,7 @@ import gov.nist.oar.distrib.cachemgr.StorageInventoryDB; import gov.nist.oar.distrib.cachemgr.storage.AWSS3CacheVolume; import gov.nist.oar.distrib.cachemgr.storage.FilesystemCacheVolume; +import software.amazon.awssdk.services.s3.S3Client; import gov.nist.oar.distrib.cachemgr.VolumeStatus; import gov.nist.oar.distrib.cachemgr.VolumeConfig; import gov.nist.oar.distrib.cachemgr.CacheObjectCheck; @@ -55,7 +56,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.amazonaws.services.s3.AmazonS3; /** * The configuration for an application's use of a data cache. @@ -313,7 +313,7 @@ public DeletionStrategy createDeletionStrategy() throws ConfigurationException { /** * create a CacheVolume as prescribed by this configuration */ - public CacheVolume createCacheVolume(NISTCacheManagerConfig mgrcfg, AmazonS3 s3client) + public CacheVolume createCacheVolume(NISTCacheManagerConfig mgrcfg, S3Client s3client) throws ConfigurationException, FileNotFoundException, MalformedURLException, CacheManagementException { if (location == null || location.length() == 0) @@ -371,7 +371,7 @@ else if (m.group(1).equals("file")) { } } - public BasicCache getCache(AmazonS3 s3) + public BasicCache getCache(S3Client s3) throws ConfigurationException, IOException, CacheManagementException { if (theCache == null) @@ -379,7 +379,7 @@ public BasicCache getCache(AmazonS3 s3) return theCache; } - public BasicCache createDefaultCache(AmazonS3 s3) + public BasicCache createDefaultCache(S3Client s3) throws ConfigurationException, IOException, CacheManagementException { // establish the base directory diff --git a/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java b/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java index bdca6877..b9a49a9d 100644 --- a/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java +++ b/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java @@ -11,46 +11,47 @@ */ package gov.nist.oar.distrib.web; -import gov.nist.oar.distrib.BagStorage; -import gov.nist.oar.distrib.service.DataPackagingService; -import gov.nist.oar.distrib.service.DefaultDataPackagingService; -import gov.nist.oar.distrib.service.DefaultPreservationBagService; -import gov.nist.oar.distrib.service.FileDownloadService; -import gov.nist.oar.distrib.service.PreservationBagService; -import gov.nist.oar.distrib.storage.AWSS3LongTermStorage; -import gov.nist.oar.distrib.storage.FilesystemLongTermStorage; -import io.swagger.v3.oas.models.Components; -import io.swagger.v3.oas.models.OpenAPI; -import io.swagger.v3.oas.models.info.Info; -import io.swagger.v3.oas.models.info.License; -import io.swagger.v3.oas.models.servers.Server; - +import java.io.BufferedReader; +import java.io.FileNotFoundException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; -import java.io.BufferedReader; -import java.io.FileNotFoundException; + import javax.activation.MimetypesFileTypeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Bean; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; import org.springframework.web.servlet.config.annotation.CorsRegistry; +import org.springframework.web.servlet.config.annotation.PathMatchConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; -import org.springframework.web.servlet.config.annotation.PathMatchConfigurer; import org.springframework.web.util.UrlPathHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import gov.nist.oar.distrib.BagStorage; +import gov.nist.oar.distrib.StorageVolumeException; +import gov.nist.oar.distrib.service.DataPackagingService; +import gov.nist.oar.distrib.service.DefaultDataPackagingService; +import gov.nist.oar.distrib.service.DefaultPreservationBagService; +import gov.nist.oar.distrib.service.FileDownloadService; +import gov.nist.oar.distrib.service.PreservationBagService; +import gov.nist.oar.distrib.storage.AWSS3LongTermStorage; +import gov.nist.oar.distrib.storage.FilesystemLongTermStorage; +import io.swagger.v3.oas.models.Components; +import io.swagger.v3.oas.models.OpenAPI; +import io.swagger.v3.oas.models.info.Info; +import io.swagger.v3.oas.models.info.License; +import io.swagger.v3.oas.models.servers.Server; +import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.auth.InstanceProfileCredentialsProvider; /** @@ -172,7 +173,7 @@ public class NISTDistribServiceConfig { @Value("${distrib.packaging.allowedRedirects:1}") int allowedRedirects; - @Autowired AmazonS3 s3client; // set via getter below + @Autowired S3Client s3client; // set via getter below @Autowired BagStorage lts; // set via getter below @Autowired MimetypesFileTypeMap mimemap; // set via getter below @Autowired CacheManagerProvider cmgrprov; // set via getter below @@ -183,17 +184,20 @@ public class NISTDistribServiceConfig { @Bean public BagStorage getLongTermStorage() throws ConfigurationException { try { - if (mode.equals("aws") || mode.equals("remote")) + if (mode.equals("aws") || mode.equals("remote")) { return new AWSS3LongTermStorage(bagstore, s3client); - else if (mode.equals("local")) + } else if (mode.equals("local")) { return new FilesystemLongTermStorage(bagstore); - else + } else { throw new ConfigurationException("distrib.bagstore.mode", - "Unsupported storage mode: "+ mode); - } - catch (FileNotFoundException ex) { + "Unsupported storage mode: " + mode); + } + } catch (FileNotFoundException ex) { throw new ConfigurationException("distrib.bagstore.location", - "Storage Location not found: "+ex.getMessage(), ex); + "Storage Location not found: " + ex.getMessage(), ex); + } catch (StorageVolumeException ex) { + throw new ConfigurationException("distrib.bagstore.aws", + "Storage volume exception: " + ex.getMessage(), ex); } } @@ -202,23 +206,30 @@ else if (mode.equals("local")) * the client for access S3 storage */ @Bean - public AmazonS3 getAmazonS3() throws ConfigurationException { + public S3Client getAmazonS3() throws ConfigurationException { logger.info("Creating S3 client"); - if (mode.equals("remote")) + // Check if "remote" mode is supported + if ("remote".equalsIgnoreCase(mode)) { throw new ConfigurationException("Remote credentials not supported yet"); + } - // import credentials from the EC2 machine we are running on - InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.getInstance(); - - AmazonS3 client = AmazonS3Client.builder() - .standard() - .withCredentials(provider) - .withRegion(region) - .build(); - return client; + try { + // Use default credential provider chain (supports EC2 instance profiles) + S3Client client = S3Client.builder() + .credentialsProvider(InstanceProfileCredentialsProvider.create()) + .region(Region.of(region)) + .build(); + + logger.info("S3 client created successfully for region: {}", region); + return client; + } catch (Exception e) { + logger.error("Failed to create S3 client: {}", e.getMessage(), e); + throw new ConfigurationException("Error creating S3 client: " + e.getMessage(), e); + } } + /** * the MIME type assignments to use when setting content types */ @@ -274,7 +285,7 @@ public NISTCacheManagerConfig getCacheManagerConfig() throws ConfigurationExcept */ @Bean public CacheManagerProvider getCacheManagerProvider(NISTCacheManagerConfig config, - BagStorage bagstor, AmazonS3 s3client) + BagStorage bagstor, S3Client s3client) { return new CacheManagerProvider(config, bagstor, s3client); } @@ -296,7 +307,7 @@ public RPAConfiguration getRPAConfiguration() throws ConfigurationException { @Bean public RPACachingServiceProvider getRPACachingServiceProvider(NISTCacheManagerConfig cmConfig, RPAConfiguration rpaConfig, - BagStorage bagstor, AmazonS3 s3client) + BagStorage bagstor, S3Client s3client) { return new RPACachingServiceProvider(cmConfig, rpaConfig, bagstor, s3client); } diff --git a/src/main/java/gov/nist/oar/distrib/web/RPACachingServiceProvider.java b/src/main/java/gov/nist/oar/distrib/web/RPACachingServiceProvider.java index a59e81e3..d12174dc 100644 --- a/src/main/java/gov/nist/oar/distrib/web/RPACachingServiceProvider.java +++ b/src/main/java/gov/nist/oar/distrib/web/RPACachingServiceProvider.java @@ -12,13 +12,13 @@ package gov.nist.oar.distrib.web; import gov.nist.oar.distrib.BagStorage; +import gov.nist.oar.distrib.StorageVolumeException; import gov.nist.oar.distrib.storage.AWSS3LongTermStorage; import gov.nist.oar.distrib.storage.FilesystemLongTermStorage; +import software.amazon.awssdk.services.s3.S3Client; import gov.nist.oar.distrib.service.RPACachingService; import gov.nist.oar.distrib.cachemgr.BasicCache; import gov.nist.oar.distrib.cachemgr.ConfigurableCache; -import gov.nist.oar.distrib.cachemgr.CacheManager; -import gov.nist.oar.distrib.cachemgr.simple.SimpleCacheManager; import gov.nist.oar.distrib.cachemgr.CacheManagementException; import gov.nist.oar.distrib.cachemgr.pdr.RestrictedDatasetRestorer; import gov.nist.oar.distrib.cachemgr.pdr.HeadBagCacheManager; @@ -33,7 +33,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.Logger; -import com.amazonaws.services.s3.AmazonS3; /** * A factory for creating the {@link gov.nist.oar.distrib.service.RPACachingService} and it @@ -44,7 +43,7 @@ public class RPACachingServiceProvider { RPAConfiguration rpacfg = null; NISTCacheManagerConfig cmcfg = null; BagStorage pubstore = null; - AmazonS3 s3client = null; + S3Client s3client = null; BagStorage rpastore = null; HeadBagCacheManager hbcmgr = null; @@ -53,7 +52,7 @@ public class RPACachingServiceProvider { public RPACachingServiceProvider(NISTCacheManagerConfig cmConfig, RPAConfiguration rpaConfig, BagStorage publicBagStore, - AmazonS3 s3c) + S3Client s3c) { rpacfg = rpaConfig; cmcfg = cmConfig; @@ -152,22 +151,26 @@ public BagStorage createRPBagStorage() throws ConfigurationException { String storeloc = rpacfg.getBagstoreLocation(); if (storeloc == null) throw new ConfigurationException("Missing config parameter: distrib.rpa.bagstore-location"); + String mode = rpacfg.getBagstoreMode(); if (mode == null || mode.length() == 0) throw new ConfigurationException("Missing config parameter: distrib.rpa.bagstore-mode"); try { - if (mode.equals("aws") || mode.equals("remote")) + if (mode.equals("aws") || mode.equals("remote")) { return new AWSS3LongTermStorage(storeloc, s3client); - else if (mode.equals("local")) + } else if (mode.equals("local")) { return new FilesystemLongTermStorage(storeloc); - else + } else { throw new ConfigurationException("distrib.rpa.bagstore-mode", - "Unsupported storage mode: "+ mode); - } - catch (FileNotFoundException ex) { + "Unsupported storage mode: " + mode); + } + } catch (FileNotFoundException ex) { throw new ConfigurationException("distrib.rpa.bagstore-location", - "RP Storage Location not found: "+ex.getMessage(), ex); + "RP Storage Location not found: " + ex.getMessage(), ex); + } catch (StorageVolumeException ex) { + throw new ConfigurationException("distrib.rpa.bagstore", + "Failed to initialize AWS S3 storage: " + ex.getMessage(), ex); } } @@ -200,7 +203,7 @@ public PDRDatasetCacheManager createRPACacheManager(BasicCache cache) * @param s3 an AmazonS3 interface for accessing S3 buckets for storage (as specified in * the configuration) */ - protected RPACachingService createRPACachingService(AmazonS3 s3) + protected RPACachingService createRPACachingService(S3Client s3) throws ConfigurationException, IOException, CacheManagementException { return new RPACachingService(createRPACacheManager(cmcfg.getCache(s3)), rpacfg); @@ -212,7 +215,7 @@ protected RPACachingService createRPACachingService(AmazonS3 s3) * @param s3 an AmazonS3 interface for accessing S3 buckets for storage (as specified in * the configuration); ignored if the service has already been created. */ - public RPACachingService getRPACachingService(AmazonS3 s3) + public RPACachingService getRPACachingService(S3Client s3) throws ConfigurationException, IOException, CacheManagementException { if (cacher == null && canCreateService()) diff --git a/src/main/java/gov/nist/oar/distrib/web/RPADataCachingController.java b/src/main/java/gov/nist/oar/distrib/web/RPADataCachingController.java index e9418f2b..8729ef90 100644 --- a/src/main/java/gov/nist/oar/distrib/web/RPADataCachingController.java +++ b/src/main/java/gov/nist/oar/distrib/web/RPADataCachingController.java @@ -8,6 +8,8 @@ import gov.nist.oar.distrib.service.rpa.exceptions.RequestProcessingException; import io.swagger.v3.oas.annotations.Parameter; import io.swagger.v3.oas.annotations.tags.Tag; +import software.amazon.awssdk.services.s3.S3Client; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -22,7 +24,6 @@ import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; -import com.amazonaws.services.s3.AmazonS3; import javax.servlet.http.HttpServletRequest; import java.util.Map; @@ -50,7 +51,7 @@ public class RPADataCachingController { RPACachingService restrictedSrvc = null; @Autowired - public RPADataCachingController(RPACachingServiceProvider provider, AmazonS3 s3) + public RPADataCachingController(RPACachingServiceProvider provider, S3Client s3) throws ConfigurationException, IOException, CacheManagementException { if (provider != null && provider.canCreateService()) diff --git a/src/main/java/gov/nist/oar/distrib/web/RPARequestHandlerController.java b/src/main/java/gov/nist/oar/distrib/web/RPARequestHandlerController.java index d61175c0..d70e2b0f 100644 --- a/src/main/java/gov/nist/oar/distrib/web/RPARequestHandlerController.java +++ b/src/main/java/gov/nist/oar/distrib/web/RPARequestHandlerController.java @@ -17,6 +17,8 @@ import io.jsonwebtoken.JwtException; import io.swagger.v3.oas.annotations.tags.Tag; import lombok.RequiredArgsConstructor; +import software.amazon.awssdk.services.s3.S3Client; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -35,8 +37,6 @@ import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; -import com.amazonaws.services.s3.AmazonS3; - import java.util.Map; import java.io.IOException; import javax.servlet.http.HttpServletRequest; @@ -95,14 +95,14 @@ public class RPARequestHandlerController { @Autowired public RPARequestHandlerController(RPAServiceProvider rpaServiceProvider, RPACachingServiceProvider cachingProvider, - AmazonS3 s3) + S3Client s3) throws ConfigurationException, IOException, CacheManagementException { this(rpaServiceProvider, getCachingServiceFromProvider(cachingProvider, s3)); } protected static RPACachingService getCachingServiceFromProvider(RPACachingServiceProvider cachingProvider, - AmazonS3 s3) + S3Client s3) throws ConfigurationException, IOException, CacheManagementException { if (cachingProvider == null || ! cachingProvider.canCreateService()) diff --git a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java index 56457c0f..120c54de 100644 --- a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java +++ b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java @@ -13,55 +13,56 @@ package gov.nist.oar.distrib.cachemgr.storage; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.io.ByteArrayInputStream; -import java.io.Reader; -import java.io.BufferedReader; import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.List; -import java.net.URL; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.util.List; +import java.util.stream.Collectors; import org.json.JSONObject; -import org.json.JSONException; - -import org.junit.Before; import org.junit.After; +import org.junit.Before; import org.junit.BeforeClass; -import org.junit.AfterClass; import org.junit.ClassRule; import org.junit.Test; -import org.junit.runner.RunWith; -import static org.junit.Assert.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; - -import gov.nist.oar.distrib.Checksum; -import gov.nist.oar.distrib.DistributionException; -import gov.nist.oar.distrib.ResourceNotFoundException; -import gov.nist.oar.distrib.StorageVolumeException; -import gov.nist.oar.distrib.StorageStateException; +// import org.slf4j.Logger; +// import org.slf4j.LoggerFactory; + import gov.nist.oar.distrib.ObjectNotFoundException; +import gov.nist.oar.distrib.StorageVolumeException; import gov.nist.oar.distrib.cachemgr.CacheObject; - // import io.findify.s3mock.S3Mock; import gov.nist.oar.distrib.storage.S3MockTestRule; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.Delete; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.S3Object; public class AWSS3CacheVolumeTest { @@ -69,96 +70,137 @@ public class AWSS3CacheVolumeTest { @ClassRule public static S3MockTestRule siterule = new S3MockTestRule(); - private static Logger logger = LoggerFactory.getLogger(AWSS3CacheVolumeTest.class); + // private static Logger logger = LoggerFactory.getLogger(AWSS3CacheVolumeTest.class); static int port = 9001; static final String bucket = "oar-cv-test"; static final String folder = "cach"; static String hash = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; - static AmazonS3 s3client = null; + static S3Client s3client = null; AWSS3CacheVolume s3cv = null; @BeforeClass - public static void setUpClass() throws IOException { + public static void setUpClass() { s3client = createS3Client(); - - if (s3client.doesBucketExistV2(bucket)) + + // Check if bucket exists and destroy it if necessary + if (bucketExists(bucket)) { destroyBucket(); - s3client.createBucket(bucket); + } - // create folder - ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(0); - InputStream mt = new ByteArrayInputStream(new byte[0]); - try { - s3client.putObject(bucket, folder+"/", mt, md); - } finally { - try { mt.close(); } catch (IOException ex) { } + // Create the bucket + s3client.createBucket(CreateBucketRequest.builder() + .bucket(bucket) + .build()); + + // Create folder (zero-length object with a trailing slash) + String folderKey = folder + "/"; + try (InputStream emptyContent = new ByteArrayInputStream(new byte[0])) { + s3client.putObject(PutObjectRequest.builder() + .bucket(bucket) + .key(folderKey) + .contentLength(0L) + .contentType("application/x-directory") + .build(), + software.amazon.awssdk.core.sync.RequestBody.fromInputStream(emptyContent, 0L)); + } catch (IOException ex) { + throw new RuntimeException("Failed to create folder in bucket", ex); } } - public static AmazonS3 createS3Client() { - // import credentials from the EC2 machine we are running on - final BasicAWSCredentials credentials = new BasicAWSCredentials("foo", "bar"); + public static S3Client createS3Client() { + final AwsBasicCredentials credentials = AwsBasicCredentials.create("foo", "bar"); final String endpoint = "http://localhost:9090/"; - final String region = "us-east-1"; - EndpointConfiguration epconfig = new EndpointConfiguration(endpoint, region); - - AmazonS3 client = AmazonS3Client.builder() - .withCredentials(new AWSStaticCredentialsProvider(credentials)) - .withEndpointConfiguration(epconfig) - .enablePathStyleAccess() - .build(); - return client; + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(credentials)) + .region(Region.of("us-east-1")) + .endpointOverride(URI.create(endpoint)) + .forcePathStyle(true) // Required for S3Mock + .build(); + } + + private static boolean bucketExists(String bucketName) { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); + return true; + } catch (NoSuchBucketException e) { + return false; + } + } + + private static void destroyBucket() { + ListObjectsV2Response listResponse = s3client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(bucket) + .build()); + + // Delete all objects + List keys = listResponse.contents().stream() + .map(S3Object::key) + .collect(Collectors.toList()); + for (String key : keys) { + s3client.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); + } + + // Delete the bucket + s3client.deleteBucket(DeleteBucketRequest.builder() + .bucket(bucket) + .build()); } @Before - public void setUp() throws IOException { - // confirm that our bucket folder exists - // - // Note that adobe/s3mock behaves differently than the real AWS service when listing objects: - // in the latter, a prefix with a trailing slash will match an AWS "folder" with whose - // name matches the prefix. - // - String prefix = folder; - for (S3ObjectSummary os : s3client.listObjectsV2(bucket, prefix).getObjectSummaries()) - if (os.getKey().equals(prefix+"/")) - prefix = null; // we found the folder - assertNull(prefix); - - s3cv = new AWSS3CacheVolume(bucket, "cach", s3client); + public void setUp() { + // Verify folder exists in the bucket + String folderKey = folder + "/"; + ListObjectsV2Response response = s3client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(folderKey) + .build()); + + boolean folderExists = response.contents().stream() + .anyMatch(object -> object.key().equals(folderKey)); + assertNull("Folder does not exist: " + folder, folderExists ? null : folderKey); + + // Initialize AWSS3CacheVolume + try { + s3cv = new AWSS3CacheVolume(bucket, folder, s3client); + } catch (Exception e) { + throw new RuntimeException("Failed to initialize AWSS3CacheVolume", e); + } } @After public void tearDown() { s3cv = null; - // delete contents of bucket depopulateFolder(); } - @AfterClass - public static void tearDownClass() { - destroyBucket(); - // api.shutdown(); - } - - public static void destroyBucket() { - List files = s3client.listObjects(bucket).getObjectSummaries(); - for (S3ObjectSummary f : files) - s3client.deleteObject(bucket, f.getKey()); - s3client.deleteBucket(bucket); - } - - public void depopulateFolder() throws AmazonServiceException { - List keys = new ArrayList(); - String prefix = folder+"/"; - for (S3ObjectSummary os : s3client.listObjectsV2(bucket, prefix).getObjectSummaries()) { - if (! os.getKey().equals(prefix)) - keys.add(new DeleteObjectsRequest.KeyVersion(os.getKey())); + private void depopulateFolder() { + String folderKey = folder + "/"; + List keysToDelete = s3client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(folderKey) + .build()) + .contents() + .stream() + .filter(obj -> !obj.key().equals(folderKey)) // Skip the folder itself + .map(S3Object::key) + .collect(Collectors.toList()); + + if (!keysToDelete.isEmpty()) { + DeleteObjectsRequest deleteRequest = DeleteObjectsRequest.builder() + .bucket(bucket) + .delete(Delete.builder() + .objects(keysToDelete.stream() + .map(key -> ObjectIdentifier.builder().key(key).build()) + .collect(Collectors.toList())) + .build()) + .build(); + + s3client.deleteObjects(deleteRequest); } - DeleteObjectsRequest dor = new DeleteObjectsRequest(bucket).withKeys(keys); - if (dor.getKeys().size() > 0) - s3client.deleteObjects(dor); } @Test @@ -178,76 +220,113 @@ public void testCtor() throws FileNotFoundException { } @Test - public void testEnsureFolder() throws AmazonServiceException { - String subdir = folder+"/goob"; - assertTrue(! s3client.doesObjectExist(bucket, subdir+"/")); + public void testEnsureFolder() { + String subdir = folder + "/goob"; + String folderKey = subdir + "/"; + + // Ensure the folder doesn't exist initially + assertTrue(!folderExists(bucket, folderKey)); assertTrue(AWSS3CacheVolume.ensureBucketFolder(s3client, bucket, subdir)); - assertTrue(s3client.doesObjectExist(bucket, subdir+"/")); + assertTrue(folderExists(bucket, folderKey)); - String subobj = subdir+"/gurn"; - ObjectMetadata md = new ObjectMetadata(); + // Add an object to the folder + String subobj = subdir + "/gurn"; byte[] obj = "1".getBytes(); - md.setContentLength(obj.length); - InputStream is = new ByteArrayInputStream(obj); - try { - s3client.putObject(bucket, subobj, is, md); - } finally { - try { is.close(); } catch (IOException ex) { } + + try (InputStream is = new ByteArrayInputStream(obj)) { + s3client.putObject(PutObjectRequest.builder() + .bucket(bucket) + .key(subobj) + .contentLength((long) obj.length) + .build(), + RequestBody.fromInputStream(is, obj.length)); + } catch (IOException ex) { + throw new RuntimeException("Failed to upload object", ex); } - assertTrue(s3client.doesObjectExist(bucket, subobj)); - assertTrue(! AWSS3CacheVolume.ensureBucketFolder(s3client, bucket, subdir)); - assertTrue(s3client.doesObjectExist(bucket, subdir+"/")); - assertTrue(s3client.doesObjectExist(bucket, subobj)); + assertTrue(objectExists(bucket, subobj)); + + // Ensuring the folder again should return false + assertTrue(!AWSS3CacheVolume.ensureBucketFolder(s3client, bucket, subdir)); + assertTrue(folderExists(bucket, folderKey)); + assertTrue(objectExists(bucket, subobj)); } @Test public void testExists() throws StorageVolumeException { String objname = String.format("%s/goob", folder); - assertTrue(! s3cv.exists("goob")); - - ObjectMetadata md = new ObjectMetadata(); + assertTrue(!s3cv.exists("goob")); + byte[] obj = "1".getBytes(); - md.setContentLength(obj.length); - InputStream is = new ByteArrayInputStream(obj); - try { - s3client.putObject(bucket, objname, is, md); - } finally { - try { is.close(); } catch (IOException ex) { } + try (InputStream is = new ByteArrayInputStream(obj)) { + s3client.putObject(PutObjectRequest.builder() + .bucket(bucket) + .key(objname) + .contentLength((long) obj.length) + .build(), + RequestBody.fromInputStream(is, obj.length)); + } catch (IOException ex) { + throw new RuntimeException("Failed to upload object", ex); } - assertTrue(s3client.doesObjectExist(bucket, objname)); + + assertTrue(objectExists(bucket, objname)); assertTrue(s3cv.exists("goob")); + // Remove the object s3cv.remove("goob"); - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("goob")); + assertTrue(!objectExists(bucket, objname)); + assertTrue(!s3cv.exists("goob")); } @Test public void testSaveAs() throws StorageVolumeException { String objname = folder + "/test.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("test.txt")); + assertTrue(!objectExists(bucket, objname)); + assertTrue(!s3cv.exists("test.txt")); byte[] obj = "hello world.\n".getBytes(); JSONObject md = new JSONObject(); md.put("size", obj.length); md.put("contentType", "text/plain"); - InputStream is = new ByteArrayInputStream(obj); - try { + try (InputStream is = new ByteArrayInputStream(obj)) { s3cv.saveAs(is, "test.txt", md); - } finally { - try { is.close(); } catch (IOException ex) { } + } catch (IOException ex) { + throw new RuntimeException("Failed to upload object", ex); } - assertTrue(s3client.doesObjectExist(bucket, objname)); + + assertTrue(objectExists(bucket, objname)); assertTrue(s3cv.exists("test.txt")); assertTrue("metadata not updated with 'modified'", md.has("modified")); + long mod = md.getLong("modified"); - assertTrue("Bad mod date: "+Long.toString(mod), mod > 0L); + assertTrue("Bad mod date: " + Long.toString(mod), mod > 0L); String vcs = md.getString("volumeChecksum"); - assertTrue("Bad volume checksum: "+vcs, - vcs.startsWith("etag ") && vcs.length() > 36); + assertTrue("Bad volume checksum: " + vcs, + vcs.startsWith("etag ") && vcs.length() > 36); + } + + // Helper method to check if an object exists + private boolean objectExists(String bucket, String key) { + try { + s3client.headObject(HeadObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); + return true; + } catch (S3Exception e) { + return false; + } + } + + // Helper method to check if a folder exists + private boolean folderExists(String bucket, String folderKey) { + ListObjectsV2Response response = s3client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(folderKey) + .build()); + return response.contents().stream() + .anyMatch(object -> object.key().equals(folderKey)); } @Test @@ -264,8 +343,8 @@ public void testGet() throws StorageVolumeException { @Test public void testSaveAsWithMD5() throws StorageVolumeException { String objname = folder + "/test.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("test.txt")); + assertTrue(!objectExists(bucket, objname)); + assertTrue(!s3cv.exists("test.txt")); byte[] obj = "hello world.\n".getBytes(); JSONObject md = new JSONObject(); @@ -279,7 +358,7 @@ public void testSaveAsWithMD5() throws StorageVolumeException { } finally { try { is.close(); } catch (IOException ex) { } } - assertTrue(s3client.doesObjectExist(bucket, objname)); + assertTrue(objectExists(bucket, objname)); assertTrue(s3cv.exists("test.txt")); assertEquals(md.getString("contentMD5"), "JjJWGp65Tg0F4+AyzFre7Q=="); @@ -326,8 +405,8 @@ public void testSaveAsWithBadSize() throws StorageVolumeException { @Test public void testSaveAsWithBadMD5() throws StorageVolumeException { String objname = folder + "/test.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("test.txt")); + assertTrue(!objectExists(bucket, objname)); + assertTrue(!s3cv.exists("test.txt")); byte[] obj = "hello world.\n".getBytes(); JSONObject md = new JSONObject(); @@ -347,15 +426,15 @@ public void testSaveAsWithBadMD5() throws StorageVolumeException { try { is.close(); } catch (IOException ex) { } } assertTrue("Failed transfered object not deleted from bucket", - ! s3client.doesObjectExist(bucket, objname)); + !objectExists(bucket, objname)); assertTrue("Failed transfered object not deleted from volume", - ! s3cv.exists("test.txt")); + !s3cv.exists("test.txt")); } @Test public void testGetStream() throws StorageVolumeException, IOException { String objname = folder + "/test.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname)); + assertTrue(!objectExists(bucket, objname)); try { s3cv.getStream("test.txt"); @@ -375,16 +454,16 @@ public void testGetStream() throws StorageVolumeException, IOException { } s3cv.remove("test.txt"); - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("test.txt")); + assertTrue(!objectExists(bucket, objname)); + assertTrue(!s3cv.exists("test.txt")); } @Test public void getSaveObject() throws StorageVolumeException { String objname1 = folder + "/test.txt"; String objname2 = folder + "/gurn.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname1)); - assertTrue(! s3client.doesObjectExist(bucket, objname2)); + assertTrue(!objectExists(bucket, objname1)); + assertTrue(!objectExists(bucket, objname2)); try { s3cv.get("test.txt"); @@ -402,8 +481,8 @@ public void getSaveObject() throws StorageVolumeException { assertEquals(co.score, 0.0, 0.0); s3cv.saveAs(co, "gurn.txt"); - assertTrue(s3client.doesObjectExist(bucket, objname1)); - assertTrue(s3client.doesObjectExist(bucket, objname2)); + assertTrue(objectExists(bucket, objname1)); + assertTrue(objectExists(bucket, objname2)); } @Test(expected = UnsupportedOperationException.class) diff --git a/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java b/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java index 951a76db..33aded6c 100644 --- a/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java +++ b/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java @@ -12,136 +12,162 @@ */ package gov.nist.oar.distrib.storage; -import java.io.IOException; -import java.util.List; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import static org.junit.Assert.*; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.AfterClass; import org.junit.Before; -import org.junit.After; import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.ClassRule; import org.junit.Test; -import org.junit.runner.RunWith; -import static org.junit.Assert.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.services.s3.model.S3ObjectSummary; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.S3Object; public class AWSS3ClientProviderTest { - // static S3MockApplication mockServer = null; - @ClassRule - public static S3MockTestRule siterule = new S3MockTestRule(); + private static final String bucket = "oar-lts-test"; + private AWSS3ClientProvider s3Provider = null; - AWSS3ClientProvider s3 = null; - static final String bucket = "oar-lts-test"; - @BeforeClass - public static void setUpClass() throws IOException { - // mockServer = S3MockApplication.start(); // http: port=9090 - AWSS3ClientProvider s3 = createS3Provider(); - - AmazonS3 s3client = s3.client(); - if (s3client.doesBucketExistV2(bucket)) - destroyBucket(); - s3client.createBucket(bucket); - // populateBucket(s3client); + public static void setUpClass() { + S3Client s3client = createS3Provider().client(); + + if (bucketExists(s3client, bucket)) { + destroyBucket(s3client); + } + + // Create bucket + s3client.createBucket(CreateBucketRequest.builder().bucket(bucket).build()); } public static AWSS3ClientProvider createS3Provider() { - // import credentials from the EC2 machine we are running on - final BasicAWSCredentials credentials = new BasicAWSCredentials("foo", "bar"); + final AwsBasicCredentials credentials = AwsBasicCredentials.create("foo", "bar"); final String endpoint = "http://localhost:9090/"; final String region = "us-east-1"; - return new AWSS3ClientProvider(new AWSStaticCredentialsProvider(credentials), region, 2, endpoint); + return new AWSS3ClientProvider(StaticCredentialsProvider.create(credentials), region, 2, endpoint); } - @AfterClass - public static void tearDownClass() { - destroyBucket(); - // mockServer.stop(); + private static boolean bucketExists(S3Client s3client, String bucketName) { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); + return true; + } catch (NoSuchBucketException e) { + return false; + } + } + + private static void destroyBucket(S3Client s3client) { + ListObjectsV2Response listResponse = s3client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(bucket) + .build()); + + // Delete all objects + List keys = listResponse.contents().stream() + .map(S3Object::key) + .collect(Collectors.toList()); + for (String key : keys) { + s3client.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); + } + + // Delete the bucket + s3client.deleteBucket(DeleteBucketRequest.builder() + .bucket(bucket) + .build()); } - public static void destroyBucket() { - AWSS3ClientProvider s3 = createS3Provider(); - AmazonS3 s3client = s3.client(); - List files = s3client.listObjects(bucket).getObjectSummaries(); - for (S3ObjectSummary f : files) - s3client.deleteObject(bucket, f.getKey()); - s3client.deleteBucket(bucket); + @AfterClass + public static void tearDownClass() { + S3Client s3client = createS3Provider().client(); + destroyBucket(s3client); } @Before public void setUp() { - s3 = createS3Provider(); + s3Provider = createS3Provider(); } @Test public void testClient() { - assertNotNull(s3); - assertEquals(2, s3.accessesLeft()); - - AmazonS3 cli = s3.client(); - assertNotNull(cli); - assertEquals(1, s3.accessesLeft()); - - AmazonS3 cli2 = s3.client(); - assertNotNull(cli2); - assertEquals(cli, cli2); - assertEquals(0, s3.accessesLeft()); - - cli2 = s3.client(); - assertNotNull(cli2); - assertNotEquals(cli, cli2); - assertEquals(1, s3.accessesLeft()); - - // make sure the original is still usable - assertTrue(cli.doesBucketExistV2(bucket)); + assertNotNull(s3Provider); + assertEquals(2, s3Provider.accessesLeft()); + + S3Client client1 = s3Provider.client(); + assertNotNull(client1); + assertEquals(1, s3Provider.accessesLeft()); + + S3Client client2 = s3Provider.client(); + assertNotNull(client2); + assertSame(client1, client2); + assertEquals(0, s3Provider.accessesLeft()); + + // Client should reset after limit is exceeded + S3Client client3 = s3Provider.client(); + assertNotNull(client3); + assertNotSame(client1, client3); + assertEquals(1, s3Provider.accessesLeft()); + + // Validate bucket existence + assertTrue(bucketExists(client3, bucket)); } @Test public void testShutdown() { - AmazonS3 cli = s3.client(); - assertNotNull(cli); - assertEquals(1, s3.accessesLeft()); + S3Client client = s3Provider.client(); + assertNotNull(client); + assertEquals(1, s3Provider.accessesLeft()); - s3.shutdown(); - assertEquals(0, s3.accessesLeft()); + s3Provider.shutdown(); + assertEquals(0, s3Provider.accessesLeft()); try { - cli.doesBucketExistV2(bucket); - fail("Failed to fail on disabled client"); - } catch (IllegalStateException ex) { - // okay! + client.listBuckets(); + fail("Expected IllegalStateException after shutdown"); + } catch (IllegalStateException e) { + // Expected } - cli = s3.client(); - assertEquals(1, s3.accessesLeft()); - assertTrue(cli.doesBucketExistV2(bucket)); + // Create a new client after shutdown + S3Client newClient = s3Provider.client(); + assertNotNull(newClient); + assertTrue(bucketExists(newClient, bucket)); } @Test public void testClone() { - assertNotNull(s3); - assertEquals(2, s3.accessesLeft()); - - AmazonS3 cli = s3.client(); - assertNotNull(cli); - assertEquals(1, s3.accessesLeft()); - - AWSS3ClientProvider s32 = s3.cloneMe(); - assertNotEquals(s3, s32); - assertEquals(2, s32.accessesLeft()); - AmazonS3 cli2 = s32.client(); - assertNotNull(cli2); - assertNotEquals(cli, cli2); - assertEquals(1, s3.accessesLeft()); + assertNotNull(s3Provider); + assertEquals(2, s3Provider.accessesLeft()); + + S3Client client1 = s3Provider.client(); + assertNotNull(client1); + assertEquals(1, s3Provider.accessesLeft()); + + AWSS3ClientProvider clonedProvider = s3Provider.cloneMe(); + assertNotSame(s3Provider, clonedProvider); + assertEquals(2, clonedProvider.accessesLeft()); + + S3Client client2 = clonedProvider.client(); + assertNotNull(client2); + assertNotSame(client1, client2); + assertEquals(1, clonedProvider.accessesLeft()); } -} +} \ No newline at end of file diff --git a/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java b/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java index e4f3ce85..7e9fd035 100644 --- a/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java +++ b/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java @@ -12,11 +12,10 @@ */ package gov.nist.oar.distrib.storage; -import static org.junit.Assert.assertEquals; - import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.List; @@ -27,26 +26,23 @@ import org.junit.AfterClass; import org.junit.ClassRule; import org.junit.Test; -import org.junit.runner.RunWith; import static org.junit.Assert.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; - -import gov.nist.oar.distrib.Checksum; -import gov.nist.oar.distrib.BagStorage; + import gov.nist.oar.distrib.DistributionException; import gov.nist.oar.distrib.ResourceNotFoundException; -import gov.nist.oar.bags.preservation.BagUtils; +import gov.nist.oar.distrib.StorageVolumeException; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; // import com.adobe.testing.s3mock.S3MockApplication; // import gov.nist.oar.RequireWebSite; @@ -63,7 +59,7 @@ public class AWSS3LongTermStorageTest { @ClassRule public static S3MockTestRule siterule = new S3MockTestRule(); - static AmazonS3 s3client = null; + static S3Client s3client = null; // private static Logger logger = LoggerFactory.getLogger(AWSS3LongTermStorageTest.class); @@ -74,33 +70,52 @@ public class AWSS3LongTermStorageTest { @BeforeClass public static void setUpClass() throws IOException { - // mockServer = S3MockApplication.start(); // http: port=9090 + // Start S3Mock and initialize the S3 client s3client = createS3Client(); - - if (s3client.doesBucketExistV2(bucket)) + + // Destroy the bucket if it already exists + if (bucketExists(bucket)) { destroyBucket(); - s3client.createBucket(bucket); + } + + // Create the bucket + s3client.createBucket(CreateBucketRequest.builder().bucket(bucket).build()); + + // Populate the bucket populateBucket(); } - public static AmazonS3 createS3Client() { - // import credentials from the EC2 machine we are running on - final BasicAWSCredentials credentials = new BasicAWSCredentials("foo", "bar"); - final String endpoint = "http://localhost:9090/"; - final String region = "us-east-1"; - EndpointConfiguration epconfig = new EndpointConfiguration(endpoint, region); - - AmazonS3 client = AmazonS3Client.builder() - .withCredentials(new AWSStaticCredentialsProvider(credentials)) - .withEndpointConfiguration(epconfig) - .enablePathStyleAccess() - .build(); - return client; + public static boolean bucketExists(String bucketName) { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); + return true; + } catch (NoSuchBucketException e) { + return false; // Bucket does not exist + } catch (S3Exception e) { + throw new RuntimeException("Failed to check bucket existence: " + e.getMessage(), e); + } + } + + public static S3Client createS3Client() { + // Static credentials and mock endpoint configuration + AwsBasicCredentials credentials = AwsBasicCredentials.create("foo", "bar"); + String endpoint = "http://localhost:9090/"; + + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(credentials)) + .region(Region.US_EAST_1) + .endpointOverride(URI.create(endpoint)) // Override to point to mock server + .forcePathStyle(true) // Enable path-style access + .build(); } @Before public void setUp() throws IOException { - s3Storage = new AWSS3LongTermStorage(bucket, s3client); + try { + s3Storage = new AWSS3LongTermStorage(bucket, s3client); + } catch (FileNotFoundException | StorageVolumeException ex) { + throw new IllegalStateException("Failed to initialize AWSS3LongTermStorage for test setup: " + ex.getMessage(), ex); + } } @After @@ -115,13 +130,13 @@ public static void tearDownClass() { } public static void destroyBucket() { - List files = s3client.listObjects(bucket).getObjectSummaries(); - for (S3ObjectSummary f : files) - s3client.deleteObject(bucket, f.getKey()); - s3client.deleteBucket(bucket); + // List and delete all objects in the bucket, then delete the bucket + s3client.listObjectsV2(builder -> builder.bucket(bucket).build()) + .contents() + .forEach(obj -> s3client.deleteObject(builder -> builder.bucket(bucket).key(obj.key()).build())); } - public static void populateBucket() throws IOException { + public static void populateBucket() { String[] bases = { "mds013u4g.1_0_0.mbag0_4-", "mds013u4g.1_0_1.mbag0_4-", "mds013u4g.1_1.mbag0_4-", "mds088kd2.mbag0_3-", "mds088kd2.mbag0_3-", "mds088kd2.1_0_1.mbag0_4-" @@ -129,33 +144,63 @@ public static void populateBucket() throws IOException { int j = 0; for (String base : bases) { - for(int i=0; i < 3; i++) { - String bag = base + Integer.toString(j++) + ((i > 1) ? ".7z" : ".zip"); - String baghash = hash+" "+bag; - - if (! s3client.doesObjectExist(bucket, bag)) { - ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(1); - // md.setContentMD5("1B2M2Y8AsgTpgAmY7PhCfg=="); - md.setContentType("text/plain"); + for (int i = 0; i < 3; i++) { + String bag = base + j++ + ((i > 1) ? ".7z" : ".zip"); + String baghash = hash + " " + bag; + + // Check if the object already exists + if (!objectExists(bucket, bag)) { + // Upload the empty "bag" file try (InputStream ds = new ByteArrayInputStream("0".getBytes())) { - s3client.putObject(bucket, bag, ds, md); + s3client.putObject(PutObjectRequest.builder() + .bucket(bucket) + .key(bag) + .contentType("text/plain") + .contentLength(1L) + .build(), RequestBody.fromInputStream(ds, 1L)); + } catch (Exception e) { + throw new RuntimeException("Failed to upload file: " + bag, e); } - md.setContentLength(baghash.length()); - // md.setContentMD5(null); + // Upload the "baghash" file try (InputStream ds = new ByteArrayInputStream(baghash.getBytes())) { - s3client.putObject(bucket, bag+".sha256", ds, md); + s3client.putObject(PutObjectRequest.builder() + .bucket(bucket) + .key(bag + ".sha256") + .contentType("text/plain") + .contentLength((long) baghash.length()) + .build(), RequestBody.fromInputStream(ds, baghash.length())); + } catch (Exception e) { + throw new RuntimeException("Failed to upload hash file: " + bag + ".sha256", e); } } } } - + } + + private static boolean objectExists(String bucket, String key) { + try { + s3client.headObject(HeadObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); + return true; + } catch (S3Exception e) { + if (e instanceof NoSuchKeyException || e.statusCode() == 404) { + return false; // Object does not exist + } + throw new RuntimeException("Error checking object existence: " + key, e); + } } @Test - public void testCtor() throws FileNotFoundException { - assert(s3client.doesBucketExistV2(bucket)); + public void testCtor() { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(bucket).build()); + assertTrue(true); + } catch (NoSuchBucketException e) { + fail("Bucket does not exist: " + bucket); + } } @Test diff --git a/src/test/java/gov/nist/oar/distrib/web/CacheVolumeConfigTest.java b/src/test/java/gov/nist/oar/distrib/web/CacheVolumeConfigTest.java index 38ec5c1a..d8d51ae4 100644 --- a/src/test/java/gov/nist/oar/distrib/web/CacheVolumeConfigTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/CacheVolumeConfigTest.java @@ -15,12 +15,10 @@ import org.junit.Test; import org.junit.Before; -import org.junit.After; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import static org.junit.Assert.*; -import gov.nist.oar.distrib.BagStorage; import gov.nist.oar.distrib.cachemgr.VolumeStatus; import gov.nist.oar.distrib.cachemgr.CacheVolume; import gov.nist.oar.distrib.cachemgr.CacheManagementException; @@ -29,7 +27,6 @@ import gov.nist.oar.distrib.cachemgr.inventory.OldSelectionStrategy; import gov.nist.oar.distrib.cachemgr.inventory.BigOldSelectionStrategy; import gov.nist.oar.distrib.cachemgr.inventory.BySizeSelectionStrategy; -import gov.nist.oar.distrib.cachemgr.pdr.PDRCacheRoles; import java.util.Map; import java.util.HashMap; @@ -39,7 +36,6 @@ import java.io.FileNotFoundException; import java.io.File; import java.net.MalformedURLException; -import com.amazonaws.services.s3.AmazonS3; public class CacheVolumeConfigTest { From c7cf83fdc33f567add2916c84568711ab8cb7e73 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Wed, 18 Dec 2024 15:43:47 -0500 Subject: [PATCH 3/8] add s3 template file for unit tests --- .../distrib/storage/AWSS3TemplateTest.java | 160 ++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 src/test/java/gov/nist/oar/distrib/storage/AWSS3TemplateTest.java diff --git a/src/test/java/gov/nist/oar/distrib/storage/AWSS3TemplateTest.java b/src/test/java/gov/nist/oar/distrib/storage/AWSS3TemplateTest.java new file mode 100644 index 00000000..ea0e6e19 --- /dev/null +++ b/src/test/java/gov/nist/oar/distrib/storage/AWSS3TemplateTest.java @@ -0,0 +1,160 @@ +package gov.nist.oar.distrib.storage; + +import java.net.URI; +import java.util.List; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Object; + +/** + * Template for AWS S3 unit tests using a mock server. + * This provides a foundation to create new S3 unit tests. + * + * This class shows how to: + * - Set up an S3 mock server. + * - Initializw an S3Client for tests. + * - Create, list, and clean up S3 resources during tests. + */ +public class AWSS3TemplateTest { + + @ClassRule + public static S3MockTestRule s3MockRule = new S3MockTestRule(); + + private static S3Client s3Client; + private static final String BUCKET_NAME = "test-bucket"; + + /** + * Set up the S3 client and create a test bucket. + */ + @BeforeClass + public static void setUpClass() { + // Initialize S3 client + s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("test-key", "test-secret"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:9090/")) // Use mock server endpoint + .forcePathStyle(true) // Path-style access for mock compatibility + .build(); + + // Create test bucket + s3Client.createBucket(CreateBucketRequest.builder().bucket(BUCKET_NAME).build()); + } + + /** + * Clean up the bucket and close the S3 client. + */ + @AfterClass + public static void tearDownClass() { + // Delete all objects in the bucket + s3Client.listObjectsV2(ListObjectsV2Request.builder().bucket(BUCKET_NAME).build()) + .contents() + .forEach(obj -> s3Client.deleteObject(DeleteObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(obj.key()) + .build())); + + // Delete the bucket + s3Client.deleteBucket(DeleteBucketRequest.builder().bucket(BUCKET_NAME).build()); + + // Close the S3 client + s3Client.close(); + } + + /** + * Example test for uploading and verifying an object in S3. + */ + @Test + public void testUploadAndRetrieveObject() { + String key = "example.txt"; + String content = "Hello, S3!"; + + // Upload an object + s3Client.putObject(PutObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(key) + .contentType("text/plain") + .build(), + RequestBody.fromBytes(content.getBytes())); + + // Retrieve the object + ResponseBytes response = s3Client.getObjectAsBytes(GetObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(key) + .build()); + + // Assert content matches + Assert.assertEquals("Uploaded content should match retrieved content", content, response.asUtf8String()); + } + + /** + * Example test for listing objects in a bucket. + */ + @Test + public void testListObjects() { + // Upload some objects + String[] keys = {"file1.txt", "file2.txt", "file3.txt"}; + for (String key : keys) { + s3Client.putObject(PutObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(key) + .contentType("text/plain") + .build(), + RequestBody.fromBytes("dummy content".getBytes())); + } + + // List objects + List objects = s3Client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(BUCKET_NAME) + .build()).contents(); + + // Assert the correct number of objects are listed + Assert.assertEquals("All uploaded objects should be listed", keys.length, objects.size()); + } + + /** + * Example test for deleting an object from S3. + */ + @Test + public void testDeleteObject() { + String key = "delete-me.txt"; + + // Upload an object + s3Client.putObject(PutObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(key) + .contentType("text/plain") + .build(), + RequestBody.fromBytes("temporary content".getBytes())); + + // Delete the object + s3Client.deleteObject(DeleteObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(key) + .build()); + + // Verify the object is deleted + List objects = s3Client.listObjectsV2(ListObjectsV2Request.builder() + .bucket(BUCKET_NAME) + .build()).contents(); + Assert.assertTrue("Deleted object should not exist in the bucket", + objects.stream().noneMatch(obj -> obj.key().equals(key))); + } +} From e694ced2bb209dbe95caa3619aa6439187e31e61 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Wed, 18 Dec 2024 15:44:49 -0500 Subject: [PATCH 4/8] migrate from AWS JAVA SDK V1 to V2, including unit tests --- .../cachemgr/storage/AWSS3CacheVolume.java | 480 +++++++++++------- .../distrib/storage/AWSS3LongTermStorage.java | 174 ++++++- .../storage/AWSS3CacheVolumeTest.java | 214 ++++---- .../storage/AWSS3ClientProviderTest.java | 4 + .../storage/AWSS3LongTermStorageTest.java | 258 ++++++---- .../oar/distrib/storage/S3MockTestRule.java | 33 +- .../distrib/web/AIPAccessControllerTest.java | 3 +- .../web/BundleDownloadPlanControllerTest.java | 3 +- .../web/CacheManagementControllerTest.java | 3 +- .../web/DataBundleAccessControllerTest.java | 3 +- .../web/DatasetAccessControllerTest.java | 1 + .../DatasetAccessControllerWithCacheTest.java | 3 +- .../web/NISTDistribServiceConfigTest.java | 4 +- .../web/NoCacheManagementControllerTest.java | 3 +- 14 files changed, 759 insertions(+), 427 deletions(-) diff --git a/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java b/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java index 6e5e9974..0c811c55 100644 --- a/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java +++ b/src/main/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolume.java @@ -13,6 +13,25 @@ */ package gov.nist.oar.distrib.cachemgr.storage; +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.MessageDigest; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +import org.json.JSONException; +import org.json.JSONObject; + +import gov.nist.oar.distrib.ObjectNotFoundException; +import gov.nist.oar.distrib.StorageStateException; +import gov.nist.oar.distrib.StorageVolumeException; +import gov.nist.oar.distrib.cachemgr.CacheObject; import gov.nist.oar.distrib.cachemgr.CacheVolume; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.exception.SdkServiceException; @@ -21,36 +40,23 @@ import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.GetUrlRequest; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Exception; -import software.amazon.awssdk.services.s3.presigner.S3Presigner; -import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; -import gov.nist.oar.distrib.cachemgr.CacheObject; -import gov.nist.oar.distrib.StorageVolumeException; -import gov.nist.oar.distrib.StorageStateException; -import gov.nist.oar.distrib.ObjectNotFoundException; - -import java.io.InputStream; -import java.io.IOException; -import java.io.ByteArrayInputStream; -import java.io.FileNotFoundException; -import java.net.URL; -import java.time.Duration; -import java.net.MalformedURLException; - -import org.json.JSONObject; /** - * an implementation of the CacheVolume interface that stores its data - * in a folder of an Amazon Web Services S3 bucket. + * an implementation of the CacheVolume interface that stores its data + * in a folder of an Amazon Web Services S3 bucket. * - * The storage model has all data stored under a single folder within the bucket. Within that - * folder, objects are stored with paths matching the name as given via addObject(). - * When that name includes a slash, the object file is stored in a subdirectory - * consistent with directory path implied by the name. + * The storage model has all data stored under a single folder within the + * bucket. Within that + * folder, objects are stored with paths matching the name as given via + * addObject(). + * When that name includes a slash, the object file is stored in a subdirectory + * consistent with directory path implied by the name. */ public class AWSS3CacheVolume implements CacheVolume { @@ -62,56 +68,80 @@ public class AWSS3CacheVolume implements CacheVolume { /** * create the storage instance - * @param bucketname the name of the S3 bucket that provides the storage for this interface - * @param folder the name of the folder within the bucket where objects will be stored. If null - * or an empty string, it will be assumed that the objects should reside at the - * root of the bucket. - * @param s3 the AmazonS3 client instance to use to access the bucket - * @param redirectBaseURL a base URL to use to form redirect URLs based on object names - * when {@link #getRedirectFor(String)} is called. This - * implementation will form the URL by appending the object - * name to this base URL. Note that a delimiting slash will - * not be automatically inserted; if a slash is needed, - * it should be included as part of this base URL. - * @throws FileNotFoundException if the specified bucket does not exist - * @throws SdkServiceException if there is a problem accessing the S3 service. While - * this is a runtime exception that does not have to be caught - * by the caller, catching it is recommended to address - * connection problems early. - * @throws MalformedURLException if the given redirectBaseURL cannot be used to form - * legal URLs + * + * @param bucketname the name of the S3 bucket that provides the storage + * for this interface + * @param folder the name of the folder within the bucket where objects + * will be stored. If null + * or an empty string, it will be assumed that the + * objects should reside at the + * root of the bucket. + * @param s3 the AmazonS3 client instance to use to access the + * bucket + * @param redirectBaseURL a base URL to use to form redirect URLs based on + * object names + * when {@link #getRedirectFor(String)} is called. This + * implementation will form the URL by appending the + * object + * name to this base URL. Note that a delimiting slash + * will + * not be automatically inserted; if a slash is + * needed, + * it should be included as part of this base URL. + * @throws FileNotFoundException if the specified bucket does not exist + * @throws SdkServiceException if there is a problem accessing the S3 service. + * While + * this is a runtime exception that does not have + * to be caught + * by the caller, catching it is recommended to + * address + * connection problems early. + * @throws MalformedURLException if the given redirectBaseURL + * cannot be used to form + * legal URLs */ public AWSS3CacheVolume(String bucketname, String folder, S3Client s3, String redirectBaseURL) - throws FileNotFoundException, SdkServiceException, MalformedURLException - { + throws FileNotFoundException, SdkServiceException, MalformedURLException { this(bucketname, folder, null, s3, redirectBaseURL); } /** * create the storage instance - * @param bucketname the name of the S3 bucket that provides the storage for this interface - * @param folder the name of the folder within the bucket where objects will be stored. If null - * or an empty string, it will be assumed that the objects should reside at the - * root of the bucket. - * @param name a name to refer to this volume by - * @param s3 the AmazonS3 client instance to use to access the bucket - * @param redirectBaseURL a base URL to use to form redirect URLs based on object names - * when {@link #getRedirectFor(String)} is called. This - * implementation will form the URL by appending the object - * name to this base URL. Note that a delimiting slash will - * not be automatically inserted; if a slash is needed, - * it should be included as part of this base URL. - * @throws FileNotFoundException if the specified bucket does not exist - * @throws SdkServiceException if there is a problem accessing the S3 service. While - * this is a runtime exception that does not have to be caught - * by the caller, catching it is recommended to address - * connection problems early. - * @throws MalformedURLException if the given redirectBaseURL cannot be used to form - * legal URLs + * + * @param bucketname the name of the S3 bucket that provides the storage + * for this interface + * @param folder the name of the folder within the bucket where objects + * will be stored. If null + * or an empty string, it will be assumed that the + * objects should reside at the + * root of the bucket. + * @param name a name to refer to this volume by + * @param s3 the AmazonS3 client instance to use to access the + * bucket + * @param redirectBaseURL a base URL to use to form redirect URLs based on + * object names + * when {@link #getRedirectFor(String)} is called. This + * implementation will form the URL by appending the + * object + * name to this base URL. Note that a delimiting slash + * will + * not be automatically inserted; if a slash is + * needed, + * it should be included as part of this base URL. + * @throws FileNotFoundException if the specified bucket does not exist + * @throws SdkServiceException if there is a problem accessing the S3 service. + * While + * this is a runtime exception that does not have + * to be caught + * by the caller, catching it is recommended to + * address + * connection problems early. + * @throws MalformedURLException if the given redirectBaseURL + * cannot be used to form + * legal URLs */ public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client s3, String redirectBaseURL) - throws FileNotFoundException, S3Exception, MalformedURLException - { + throws FileNotFoundException, S3Exception, MalformedURLException { this(bucketname, folder, name, s3); baseurl = redirectBaseURL; @@ -120,43 +150,54 @@ public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client new URL(baseurl + "test"); } - /** * create the storage instance - * @param bucketname the name of the S3 bucket that provides the storage for this interface - * @param folder the name of the folder within the bucket where objects will be stored. If null - * or an empty string, it will be assumed that the objects should reside at the - * root of the bucket. - * @param s3 the AmazonS3 client instance to use to access the bucket - * @throws FileNotFoundException if the specified bucket does not exist - * @throws SdkServiceException if there is a problem accessing the S3 service. While - * this is a runtime exception that does not have to be caught - * by the caller, catching it is recommended to address - * connection problems early. + * + * @param bucketname the name of the S3 bucket that provides the storage for + * this interface + * @param folder the name of the folder within the bucket where objects will + * be stored. If null + * or an empty string, it will be assumed that the objects + * should reside at the + * root of the bucket. + * @param s3 the AmazonS3 client instance to use to access the bucket + * @throws FileNotFoundException if the specified bucket does not exist + * @throws SdkServiceException if there is a problem accessing the S3 service. + * While + * this is a runtime exception that does not have + * to be caught + * by the caller, catching it is recommended to + * address + * connection problems early. */ public AWSS3CacheVolume(String bucketname, String folder, S3Client s3) - throws FileNotFoundException, SdkServiceException - { + throws FileNotFoundException, SdkServiceException { this(bucketname, folder, null, s3); } - /** * create the storage instance - * @param bucketname the name of the S3 bucket that provides the storage for this interface - * @param folder the name of the folder within the bucket where objects will be stored. If null - * or an empty string, it will be assumed that the objects should reside at the - * root of the bucket. - * @param name a name to refer to this volume by - * @param s3 the AmazonS3 client instance to use to access the bucket - * @throws FileNotFoundException if the specified bucket does not exist - * @throws SdkServiceException if there is a problem accessing the S3 service. While - * this is a runtime exception that does not have to be caught - * by the caller, catching it is recommended to address - * connection problems early. + * + * @param bucketname the name of the S3 bucket that provides the storage for + * this interface + * @param folder the name of the folder within the bucket where objects will + * be stored. If null + * or an empty string, it will be assumed that the objects + * should reside at the + * root of the bucket. + * @param name a name to refer to this volume by + * @param s3 the AmazonS3 client instance to use to access the bucket + * @throws FileNotFoundException if the specified bucket does not exist + * @throws SdkServiceException if there is a problem accessing the S3 service. + * While + * this is a runtime exception that does not have + * to be caught + * by the caller, catching it is recommended to + * address + * connection problems early. */ public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client s3) - throws FileNotFoundException { + throws FileNotFoundException { bucket = bucketname; if (folder != null && folder.length() == 0) { @@ -202,21 +243,25 @@ public AWSS3CacheVolume(String bucketname, String folder, String name, S3Client } /** - * return the identifier or name assigned to this volume. If null is returned, + * return the identifier or name assigned to this volume. If null is returned, * the name is not known. */ - public String getName() { return name; } + public String getName() { + return name; + } private String s3name(String name) { if (folder == null) return name; - return folder+"/"+name; + return folder + "/" + name; } /** * return True if an object with a given name exists in this storage volume - * @param name the name of the object - * @throws StorageVolumeException if there is an error accessing the underlying storage system. + * + * @param name the name of the object + * @throws StorageVolumeException if there is an error accessing the underlying + * storage system. */ public boolean exists(String name) throws StorageVolumeException { try { @@ -237,39 +282,46 @@ public boolean exists(String name) throws StorageVolumeException { } /** - * save a copy of the named object to this storage volume. If an object - * already exists in the volume with this name, it will be replaced. + * save a copy of the named object to this storage volume. If an object + * already exists in the volume with this name, it will be replaced. *

- * This implementation will look for three metadata properties that will be incorporated into + * This implementation will look for three metadata properties that will be + * incorporated into * the S3 transfer request for robustness: *

    - *
  • size -- this will be set as the content-length header property for the file stream; - * if this number of bytes is not transfered successfully, an exception will - * occur.
  • - *
  • contentMD5 -- a base-64 encoding of the MD5 hash of the file which will be checked - * against the server-side value calculated by the AWS server; a mismatch will - * result in an error. Note that if this is not provided the AWS SDK will - * calculate and verify a value automatically; thus, it should not be necessary - * to set this.
  • - *
  • contentType -- the MIME-type to associate with this file. This is stored as - * associated AWS object metadata and will be used if the file is downloaded - * via an AWS public GET URL (and perhaps other download frontends).
  • + *
  • size -- this will be set as the content-length header + * property for the file stream; + * if this number of bytes is not transfered successfully, an exception will + * occur.
  • + *
  • contentMD5 -- a base-64 encoding of the MD5 hash of the file + * which will be checked + * against the server-side value calculated by the AWS server; a mismatch will + * result in an error. Note that if this is not provided the AWS SDK will + * calculate and verify a value automatically; thus, it should not be necessary + * to set this.
  • + *
  • contentType -- the MIME-type to associate with this file. + * This is stored as + * associated AWS object metadata and will be used if the file is downloaded + * via an AWS public GET URL (and perhaps other download frontends).
  • *
- * @param from an InputStream that contains the bytes the make up object to save - * @param name the name to assign to the object within the storage. - * @param md the metadata to be associated with that object. This parameter cannot be null - * and must include the object size. - * @throws StorageVolumeException if the method fails to save the object correctly. + * + * @param from an InputStream that contains the bytes the make up object to save + * @param name the name to assign to the object within the storage. + * @param md the metadata to be associated with that object. This parameter + * cannot be null + * and must include the object size. + * @throws StorageVolumeException if the method fails to save the object + * correctly. */ public void saveAs(InputStream from, String name, JSONObject md) throws StorageVolumeException { if (name == null || name.isEmpty()) { throw new IllegalArgumentException("AWSS3CacheVolume.saveAs(): must provide name"); } - + long size = -1L; String contentType = null; String contentMD5 = null; - + // Extract metadata if (md != null) { try { @@ -280,35 +332,47 @@ public void saveAs(InputStream from, String name, JSONObject md) throws StorageV contentType = md.optString("contentType", null); contentMD5 = md.optString("contentMD5", null); } - + if (size <= 0) { throw new IllegalArgumentException("AWSS3CacheVolume.saveAs(): metadata must include size property"); } - + try { + // Validate MD5 checksum if provided + if (contentMD5 != null) { + InputStream markableInputStream = from.markSupported() ? from : new BufferedInputStream(from); + markableInputStream.mark((int) size); // Mark the stream for reset + String calculatedMD5 = calculateMD5(markableInputStream, size); + if (!calculatedMD5.equals(contentMD5)) { + throw new StorageVolumeException("MD5 checksum mismatch for object: " + s3name(name)); + } + markableInputStream.reset(); // Reset the stream for the actual upload + from = markableInputStream; // Ensure the validated stream is used + } + // Prepare the PutObjectRequest PutObjectRequest.Builder putRequestBuilder = PutObjectRequest.builder() .bucket(bucket) .key(s3name(name)) .contentLength(size); - + if (contentType != null) { putRequestBuilder.contentType(contentType); } if (contentMD5 != null) { putRequestBuilder.contentMD5(contentMD5); } - + // Add Content-Disposition header (e.g., file name for web servers) if (name.endsWith("/")) { name = name.substring(0, name.length() - 1); } String[] nameFields = name.split("/"); putRequestBuilder.contentDisposition(nameFields[nameFields.length - 1]); - + // Perform the upload s3client.putObject(putRequestBuilder.build(), RequestBody.fromInputStream(from, size)); - + // Update metadata if provided if (md != null) { CacheObject co = get(name); @@ -324,53 +388,81 @@ public void saveAs(InputStream from, String name, JSONObject md) throws StorageV if (e.awsErrorDetails() != null && e.awsErrorDetails().errorCode().equals("InvalidDigest")) { throw new StorageVolumeException("MD5 checksum mismatch for object: " + s3name(name), e); } - throw new StorageVolumeException("Failed to upload object: " + s3name(name) + " (" + e.getMessage() + ")", e); + throw new StorageVolumeException("Failed to upload object: " + s3name(name) + " (" + e.getMessage() + ")", + e); } catch (Exception e) { - throw new StorageVolumeException("Unexpected error saving object " + s3name(name) + ": " + e.getMessage(), e); + throw new StorageVolumeException("Unexpected error saving object " + s3name(name) + ": " + e.getMessage(), + e); + } + } + + // Helper method to calculate MD5 checksum + private String calculateMD5(InputStream is, long size) throws Exception { + MessageDigest md = MessageDigest.getInstance("MD5"); + byte[] buffer = new byte[8192]; + int bytesRead; + long totalRead = 0; + + while ((bytesRead = is.read(buffer)) != -1) { + md.update(buffer, 0, bytesRead); + totalRead += bytesRead; + if (totalRead > size) { + throw new IllegalArgumentException("InputStream size exceeds specified size"); + } + } + + if (totalRead != size) { + throw new IllegalArgumentException("InputStream size does not match specified size"); } + + byte[] digest = md.digest(); + return Base64.getEncoder().encodeToString(digest); } - + /** - * save a copy of an object currently stored in another volume. If an object - * already exists in the volume with this name, it will be replaced. This - * allows for an implementation to invoke special optimizations for certain - * kinds of copies (e.g. S3 to S3). - * @param obj an object in another storage volume. - * @param name the name to assign to the object within the storage. - * @throws ObjectNotFoundException if the object does not exist in specified - * volume - * @throws StorageVolumeException if method fails to save the object correctly - * or if the request calls for copying an object to itself or - * if the given CacheObject is not sufficiently specified. + * save a copy of an object currently stored in another volume. If an object + * already exists in the volume with this name, it will be replaced. This + * allows for an implementation to invoke special optimizations for certain + * kinds of copies (e.g. S3 to S3). + * + * @param obj an object in another storage volume. + * @param name the name to assign to the object within the storage. + * @throws ObjectNotFoundException if the object does not exist in specified + * volume + * @throws StorageVolumeException if method fails to save the object correctly + * or if the request calls for copying an object + * to itself or + * if the given CacheObject is not sufficiently + * specified. */ public synchronized void saveAs(CacheObject obj, String name) throws StorageVolumeException { if (obj.name == null) - throw new StorageVolumeException("name for cache object (in volume, "+obj.volname+ - ") not set."); + throw new StorageVolumeException("name for cache object (in volume, " + obj.volname + + ") not set."); if (obj.volume == null) - throw new StorageVolumeException("Unable to locate volume, "+obj.volname+ - ", for cache object, "+obj.name); + throw new StorageVolumeException("Unable to locate volume, " + obj.volname + + ", for cache object, " + obj.name); if (this.name.equals(obj.volname) && name.equals(obj.name)) - throw new StorageVolumeException("Request to copy "+obj.volname+":"+obj.name+ - " onto itself"); - if (! obj.volume.exists(obj.name)) + throw new StorageVolumeException("Request to copy " + obj.volname + ":" + obj.name + + " onto itself"); + if (!obj.volume.exists(obj.name)) throw new ObjectNotFoundException(obj.name, obj.volname); try (InputStream is = obj.volume.getStream(obj.name)) { this.saveAs(is, name, obj.exportMetadata()); - } - catch (IOException ex) { - throw new StorageVolumeException("Trouble closing source stream while reading object "+obj.name); + } catch (IOException ex) { + throw new StorageVolumeException("Trouble closing source stream while reading object " + obj.name); } } /** * return an open InputStream to the object with the given name - * @param name the name of the object to get - * @throws ObjectNotFoundException if the named object does not exist in this - * volume - * @throws StorageVolumeException if there is any other problem opening the - * named object + * + * @param name the name of the object to get + * @throws ObjectNotFoundException if the named object does not exist in this + * volume + * @throws StorageVolumeException if there is any other problem opening the + * named object */ public InputStream getStream(String name) throws StorageVolumeException { String key = s3name(name); @@ -397,9 +489,10 @@ public InputStream getStream(String name) throws StorageVolumeException { /** * return a reference to an object in the volume given its name - * @param name the name of the object to get - * @throws ObjectNotFoundException if the named object does not exist in this - * volume + * + * @param name the name of the object to get + * @throws ObjectNotFoundException if the named object does not exist in this + * volume */ public CacheObject get(String name) throws StorageVolumeException { String key = s3name(name); @@ -431,14 +524,14 @@ public CacheObject get(String name) throws StorageVolumeException { } } - - /** + /** * remove the object with the give name from this storage volume - * @param name the name of the object to get - * @return boolean True if the object existed in the volume; false if it was - * not found in this volume - * @throws StorageVolumeException if there is an internal error while trying to - * remove the Object + * + * @param name the name of the object to get + * @return boolean True if the object existed in the volume; false if it was + * not found in this volume + * @throws StorageVolumeException if there is an internal error while trying to + * remove the Object */ public boolean remove(String name) throws StorageVolumeException { String key = s3name(name); @@ -448,7 +541,7 @@ public boolean remove(String name) throws StorageVolumeException { .bucket(bucket) .key(key) .build(); - + // Delete the object s3client.deleteObject(deleteRequest); return true; // If no exception, the object was successfully deleted @@ -463,19 +556,21 @@ public boolean remove(String name) throws StorageVolumeException { } /** - * return a URL that th eobject with the given name can be alternatively - * read from. This allows for a potentially faster way to deliver a file - * to web clients than via a Java stream copy. Not all implementations may - * support this. + * return a URL that th eobject with the given name can be alternatively + * read from. This allows for a potentially faster way to deliver a file + * to web clients than via a Java stream copy. Not all implementations may + * support this. * - * This implementation throws an UnsupportedOperationException if - * {@linkplain #AWSS3CacheVolume(String,String,AmazonS3,String) the constructor} + * This implementation throws an UnsupportedOperationException if + * {@linkplain #AWSS3CacheVolume(String,String,AmazonS3,String) the constructor} * was not provided with a redirectBaseURL argument. * - * @param name the name of the object to get - * @return URL a URL where the object can be streamed from - * @throws UnsupportedOperationException always as this function is not supported + * @param name the name of the object to get + * @return URL a URL where the object can be streamed from + * @throws UnsupportedOperationException always as this function is not + * supported */ + @Override public URL getRedirectFor(String name) throws StorageVolumeException, UnsupportedOperationException { if (baseurl == null) { throw new UnsupportedOperationException("AWSS3CacheVolume: getRedirectFor not supported"); @@ -483,25 +578,16 @@ public URL getRedirectFor(String name) throws StorageVolumeException, Unsupporte if (exists(name)) { try { - // Generate a presigned URL using S3Presigner - // S3Presigner replaces getUrl for presigned URL generation - try (S3Presigner presigner = S3Presigner.create()) { - GetObjectRequest getObjectRequest = GetObjectRequest.builder() - .bucket(bucket) - .key(s3name(name)) - .build(); - - GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder() - .signatureDuration(Duration.ofMinutes(15)) // URL validity duration - .getObjectRequest(getObjectRequest) - .build(); - - URL presignedUrl = presigner.presignGetObject(presignRequest).url(); - return presignedUrl; - } + // New way is to use S3Utilities to get the object URL + GetUrlRequest request = GetUrlRequest.builder() + .bucket(bucket) + .key(s3name(name)) + .build(); + + return s3client.utilities().getUrl(request); } catch (S3Exception ex) { throw new StorageVolumeException("Failed to determine redirect URL for name=" + name + ": " + - ex.getMessage(), ex); + ex.awsErrorDetails().errorMessage(), ex); } } else { try { @@ -512,19 +598,19 @@ public URL getRedirectFor(String name) throws StorageVolumeException, Unsupporte } } - /** * create a folder/subdirectory in a bucket if it already doesn't exist * - * @param bucketname the name of the bucket where the folder should exist - * @param folder the name of the folder to ensure exists - * @param s3 the authenticated AmazonS3 client to use to access the bucket + * @param bucketname the name of the bucket where the folder should exist + * @param folder the name of the folder to ensure exists + * @param s3 the authenticated AmazonS3 client to use to + * access the bucket */ public static boolean ensureBucketFolder(S3Client s3, String bucketname, String folder) throws S3Exception { if (!folder.endsWith("/")) { folder += "/"; } - + try { // Check if the folder exists by calling headObject s3.headObject(HeadObjectRequest.builder() @@ -537,14 +623,14 @@ public static boolean ensureBucketFolder(S3Client s3, String bucketname, String throw ex; // Re-throw exception if it's not a 404 (Not Found) error } } - + // Folder does not exist, create it as a zero-byte object try (InputStream emptyContent = new ByteArrayInputStream(new byte[0])) { s3.putObject(PutObjectRequest.builder() - .bucket(bucketname) - .key(folder) - .contentLength(0L) - .build(), + .bucket(bucketname) + .key(folder) + .contentLength(0L) + .build(), RequestBody.fromInputStream(emptyContent, 0)); return true; // Folder created successfully } catch (Exception e) { diff --git a/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java b/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java index d726b530..57d3958e 100644 --- a/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java +++ b/src/main/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorage.java @@ -19,6 +19,7 @@ import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; +import java.util.regex.Pattern; import gov.nist.oar.bags.preservation.BagUtils; import gov.nist.oar.distrib.Checksum; @@ -39,9 +40,9 @@ import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.services.s3.model.S3Object; - /** - * An implementation of the LongTermStorage interface for accessing files from an AWS-S3 storage bucket. + * An implementation of the LongTermStorage interface for accessing files from + * an AWS-S3 storage bucket. * * @author Deoyani Nandrekar-Heinis */ @@ -51,13 +52,33 @@ public class AWSS3LongTermStorage extends PDRBagStorageBase { public final String bucket; protected S3Client s3client; - protected Integer pagesz = null; // null means use default page size + protected Integer pagesz = null; // null means use default page size private long checksumSizeLim = defaultChecksumSizeLimit; + /** + * set the number of objects returned in a page of listing results. This can be + * used for testing. + * A null value means use the AWS default. + */ public void setPageSize(Integer sz) { - this.pagesz = sz; + pagesz = sz; } + /** + * create the storage instance + * + * @param bucketname the name of the S3 bucket that provides the storage for + * this interface + * @param s3 the AmazonS3 client instance to use to access the bucket + * @throws FileNotFoundException if the specified bucket does not exist + * @throws AmazonServiceException if there is a problem accessing the S3 + * service. While + * this is a runtime exception that does not have + * to be caught + * by the caller, catching it is recommended to + * address + * connection problems early. + */ public AWSS3LongTermStorage(String bucketname, S3Client s3Client) throws FileNotFoundException, StorageVolumeException { super(bucketname); @@ -75,6 +96,14 @@ public AWSS3LongTermStorage(String bucketname, S3Client s3Client) logger.info("Initialized AWSS3LongTermStorage for bucket: {}", bucket); } + /** + * return true if a file with the given name exists in the storage + * + * @param filename The name of the desired file. Note that this does not refer + * to files that + * may reside inside a serialized bag or other archive (e.g. + * zip) file. + */ @Override public boolean exists(String filename) throws StorageVolumeException { try { @@ -87,6 +116,18 @@ public boolean exists(String filename) throws StorageVolumeException { } } + /** + * Given an exact file name in the storage, return an InputStream open at the + * start of the file + * + * @param filename The name of the desired file. Note that this does not refer + * to files that + * may reside inside a serialized bag or other archive (e.g. + * zip) file. + * @return InputStream - open at the start of the file + * @throws FileNotFoundException if the file with the given filename does not + * exist + */ @Override public InputStream openFile(String filename) throws FileNotFoundException, StorageVolumeException { try { @@ -100,27 +141,76 @@ public InputStream openFile(String filename) throws FileNotFoundException, Stora } } + /** + * return the checksum for the given file + * + * @param filename The name of the desired file. Note that this does not refer + * to files that + * may reside inside a serialized bag or other archive (e.g. + * zip) file. + * @return Checksum, a container for the checksum value + * @throws FileNotFoundException if the file with the given filename does not + * exist + */ @Override public Checksum getChecksum(String filename) throws FileNotFoundException, StorageVolumeException { String checksumKey = filename + ".sha256"; - try (InputStream is = openFile(checksumKey); - BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { - String checksumValue = reader.readLine(); - return Checksum.sha256(checksumValue); - } catch (FileNotFoundException ex) { + ResponseInputStream s3ObjectStream = null; + + try { + // Try to retrieve the checksum file from S3 + s3ObjectStream = s3client.getObject(GetObjectRequest.builder() + .bucket(bucket) + .key(checksumKey) + .build()); + + try (InputStreamReader reader = new InputStreamReader(s3ObjectStream)) { + // Read and return the checksum from the file + return Checksum.sha256(readHash(reader)); + } catch (IOException e) { + throw new StorageStateException("Failed to read cached checksum value from " + checksumKey, e); + } + } catch (NoSuchKeyException e) { + // Handle missing checksum file + if (!filename.endsWith(".sha256")) { + logger.warn("No cached checksum available for " + filename); + } + if (getSize(filename) > checksumSizeLim) { throw new StorageStateException("No cached checksum for large file: " + filename); } + + // Calculate checksum on the fly for small files try (InputStream fileStream = openFile(filename)) { return Checksum.calcSHA256(fileStream); - } catch (IOException e) { - throw new StorageStateException("Unable to calculate checksum: " + filename, e); + } catch (IOException ex) { + throw new StorageStateException("Unable to calculate checksum for small file: " + filename, ex); + } + } catch (S3Exception ex) { + throw new StorageStateException( + "Trouble accessing " + checksumKey + ": " + ex.awsErrorDetails().errorMessage(), ex); + } finally { + if (s3ObjectStream != null) { + try { + s3ObjectStream.close(); + } catch (IOException e) { + logger.warn("Trouble closing S3Object stream: " + e.getMessage()); + } } - } catch (IOException ex) { - throw new StorageStateException("Error reading checksum for " + filename, ex); } } + /** + * Return the size of the named file in bytes + * + * @param filename The name of the desired file. Note that this does not refer + * to files that + * may reside inside a serialized bag or other archive (e.g. + * zip) file. + * @return long, the size of the file in bytes + * @throws FileNotFoundException if the file with the given filename does not + * exist + */ @Override public long getSize(String filename) throws FileNotFoundException, StorageVolumeException { try { @@ -144,6 +234,14 @@ protected ListObjectsV2Request createListRequest(String keyprefix, Integer pages return builder.build(); } + /** + * Return all the bags associated with the given ID + * + * @param identifier the AIP identifier for the desired data collection + * @return List, the file names for all bags associated with given ID + * @throws ResourceNotFoundException if there exist no bags with the given + * identifier + */ @Override public List findBagsFor(String identifier) throws ResourceNotFoundException, StorageVolumeException { @@ -163,6 +261,7 @@ public List findBagsFor(String identifier) request = request.toBuilder().continuationToken(response.nextContinuationToken()).build(); } while (response.isTruncated()); } catch (S3Exception ex) { + logger.error("Error accessing bucket {}: {}", bucket, ex.getMessage(), ex); throw new StorageStateException("Error accessing bucket: " + bucket, ex); } @@ -173,13 +272,46 @@ public List findBagsFor(String identifier) return filenames; } + /** + * Return the head bag associated with the given ID + * + * @param identifier the AIP identifier for the desired data collection + * @return String, the head bag's file name + * @throws ResourceNotFoundException if there exist no bags with the given + * identifier + */ + @Override + public String findHeadBagFor(String identifier) + throws ResourceNotFoundException, StorageStateException { + return findHeadBagFor(identifier, null); + } + + /** + * Return the name of the head bag for the identifier for given version + * + * @param identifier the AIP identifier for the desired data collection + * @param version the desired version of the AIP; if null, assume the latest + * version. + * If the version is an empty string, the head bag for bags + * without a + * version designation will be selected. + * @return String, the head bag's file name, or null if version is not found + * @throws ResourceNotFoundException if there exist no bags with the given + * identifier or version + */ @Override public String findHeadBagFor(String identifier, String version) throws ResourceNotFoundException, StorageStateException { + // Prefix handling with pattern matching for version String prefix = identifier + "."; if (version != null) { - version = version.replace(".", "_"); - prefix += version.replaceAll("(_0)+$", ""); + // Replace dots in version with underscores + version = Pattern.compile("\\.").matcher(version).replaceAll("_"); + + // Remove trailing "_0" for efficiency + if (!Pattern.compile("^[01](_0)*$").matcher(version).find()) { + prefix += Pattern.compile("(_0)+$").matcher(version).replaceAll(""); + } } String selected = null; @@ -192,7 +324,15 @@ public String findHeadBagFor(String identifier, String version) response = s3client.listObjectsV2(request); for (S3Object obj : response.contents()) { String name = obj.key(); + + // Filter out ".sha256" files and ensure legal bag names if (!name.endsWith(".sha256") && BagUtils.isLegalBagName(name)) { + // Check version match if provided + if (version != null && !BagUtils.matchesVersion(name, version)) { + continue; + } + + // Determine sequence number and update selected file int seq = BagUtils.sequenceNumberIn(name); if (seq > maxSeq) { maxSeq = seq; @@ -200,16 +340,20 @@ public String findHeadBagFor(String identifier, String version) } } } + + // Update continuation token for the next page request = request.toBuilder().continuationToken(response.nextContinuationToken()).build(); } while (response.isTruncated()); } catch (S3Exception ex) { throw new StorageStateException("Error accessing bucket: " + bucket, ex); } + // Handle case where no matching file is found if (selected == null) { throw ResourceNotFoundException.forID(identifier, version); } return selected; } + } \ No newline at end of file diff --git a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java index 120c54de..925d29ce 100644 --- a/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java +++ b/src/test/java/gov/nist/oar/distrib/cachemgr/storage/AWSS3CacheVolumeTest.java @@ -47,6 +47,7 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.waiters.WaiterResponse; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.CreateBucketRequest; @@ -56,6 +57,7 @@ import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchBucketException; @@ -63,22 +65,21 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.services.s3.waiters.S3Waiter; public class AWSS3CacheVolumeTest { - // static S3Mock api = new S3Mock.Builder().withPort(port).withInMemoryBackend().build(); + // static S3Mock api = new + // S3Mock.Builder().withPort(port).withInMemoryBackend().build(); @ClassRule public static S3MockTestRule siterule = new S3MockTestRule(); - // private static Logger logger = LoggerFactory.getLogger(AWSS3CacheVolumeTest.class); - - static int port = 9001; static final String bucket = "oar-cv-test"; static final String folder = "cach"; static String hash = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; static S3Client s3client = null; AWSS3CacheVolume s3cv = null; - + @BeforeClass public static void setUpClass() { s3client = createS3Client(); @@ -93,19 +94,33 @@ public static void setUpClass() { .bucket(bucket) .build()); - // Create folder (zero-length object with a trailing slash) - String folderKey = folder + "/"; - try (InputStream emptyContent = new ByteArrayInputStream(new byte[0])) { - s3client.putObject(PutObjectRequest.builder() - .bucket(bucket) - .key(folderKey) - .contentLength(0L) - .contentType("application/x-directory") - .build(), - software.amazon.awssdk.core.sync.RequestBody.fromInputStream(emptyContent, 0L)); - } catch (IOException ex) { - throw new RuntimeException("Failed to create folder in bucket", ex); - } + // Create the folder using the updated logic + createFolder(bucket, folder, s3client); + } + + public static void createFolder(String bucketName, String folderName, S3Client client) { + // Ensure folder name ends with a trailing slash + folderName = folderName.endsWith("/") ? folderName : folderName + "/"; + + // Create the folder as an empty object + PutObjectRequest putRequest = PutObjectRequest.builder() + .bucket(bucketName) + .key(folderName) + .build(); + client.putObject(putRequest, RequestBody.empty()); + + // Wait for the folder to exist + S3Waiter waiter = client.waiter(); + HeadObjectRequest headRequest = HeadObjectRequest.builder() + .bucket(bucketName) + .key(folderName) + .build(); + + WaiterResponse waiterResponse = waiter.waitUntilObjectExists(headRequest); + waiterResponse.matched().response() + .ifPresent(response -> System.out.println("Folder creation confirmed: " + response)); + + System.out.println("Folder " + folderName + " is ready."); } public static S3Client createS3Client() { @@ -137,6 +152,7 @@ private static void destroyBucket() { List keys = listResponse.contents().stream() .map(S3Object::key) .collect(Collectors.toList()); + for (String key : keys) { s3client.deleteObject(DeleteObjectRequest.builder() .bucket(bucket) @@ -154,14 +170,7 @@ private static void destroyBucket() { public void setUp() { // Verify folder exists in the bucket String folderKey = folder + "/"; - ListObjectsV2Response response = s3client.listObjectsV2(ListObjectsV2Request.builder() - .bucket(bucket) - .prefix(folderKey) - .build()); - - boolean folderExists = response.contents().stream() - .anyMatch(object -> object.key().equals(folderKey)); - assertNull("Folder does not exist: " + folder, folderExists ? null : folderKey); + assertTrue("Folder does not exist: " + folder, folderExists(bucket, folderKey)); // Initialize AWSS3CacheVolume try { @@ -180,9 +189,9 @@ public void tearDown() { private void depopulateFolder() { String folderKey = folder + "/"; List keysToDelete = s3client.listObjectsV2(ListObjectsV2Request.builder() - .bucket(bucket) - .prefix(folderKey) - .build()) + .bucket(bucket) + .prefix(folderKey) + .build()) .contents() .stream() .filter(obj -> !obj.key().equals(folderKey)) // Skip the folder itself @@ -235,10 +244,10 @@ public void testEnsureFolder() { try (InputStream is = new ByteArrayInputStream(obj)) { s3client.putObject(PutObjectRequest.builder() - .bucket(bucket) - .key(subobj) - .contentLength((long) obj.length) - .build(), + .bucket(bucket) + .key(subobj) + .contentLength((long) obj.length) + .build(), RequestBody.fromInputStream(is, obj.length)); } catch (IOException ex) { throw new RuntimeException("Failed to upload object", ex); @@ -260,10 +269,10 @@ public void testExists() throws StorageVolumeException { byte[] obj = "1".getBytes(); try (InputStream is = new ByteArrayInputStream(obj)) { s3client.putObject(PutObjectRequest.builder() - .bucket(bucket) - .key(objname) - .contentLength((long) obj.length) - .build(), + .bucket(bucket) + .key(objname) + .contentLength((long) obj.length) + .build(), RequestBody.fromInputStream(is, obj.length)); } catch (IOException ex) { throw new RuntimeException("Failed to upload object", ex); @@ -320,13 +329,21 @@ private boolean objectExists(String bucket, String key) { } // Helper method to check if a folder exists - private boolean folderExists(String bucket, String folderKey) { - ListObjectsV2Response response = s3client.listObjectsV2(ListObjectsV2Request.builder() - .bucket(bucket) - .prefix(folderKey) - .build()); - return response.contents().stream() - .anyMatch(object -> object.key().equals(folderKey)); + public boolean folderExists(String bucketName, String folderName) { + try { + HeadObjectRequest request = HeadObjectRequest.builder() + .bucket(bucketName) + .key(folderName) + .build(); + + s3client.headObject(request); + return true; + } catch (S3Exception e) { + if (e.statusCode() == 404) { + return false; + } + throw e; + } } @Test @@ -336,8 +353,8 @@ public void testGet() throws StorageVolumeException { CacheObject co = s3cv.get("test.txt"); assertEquals(co.getSize(), obj.length); long mod = co.getLastModified(); - assertTrue("Bad mod time: "+Long.toString(mod), mod > 0L); - assertEquals(co.getMetadatumString("contentType",""), "text/plain"); + assertTrue("Bad mod time: " + Long.toString(mod), mod > 0L); + assertEquals(co.getMetadatumString("contentType", ""), "text/plain"); } @Test @@ -356,47 +373,52 @@ public void testSaveAsWithMD5() throws StorageVolumeException { try { s3cv.saveAs(is, "test.txt", md); } finally { - try { is.close(); } catch (IOException ex) { } + try { + is.close(); + } catch (IOException ex) { + } } assertTrue(objectExists(bucket, objname)); assertTrue(s3cv.exists("test.txt")); assertEquals(md.getString("contentMD5"), "JjJWGp65Tg0F4+AyzFre7Q=="); // the etag should be an MD5 sum, but for some reason it is not - // assertEquals(md.getString("volumeChecksum"), "etag JjJWGp65Tg0F4+AyzFre7Q=="); + // assertEquals(md.getString("volumeChecksum"), "etag + // JjJWGp65Tg0F4+AyzFre7Q=="); } /* * this test is unreliable with S3Mock * - @Test - public void testSaveAsWithBadSize() throws StorageVolumeException { - String objname = folder + "/test.txt"; - assertTrue(! s3client.doesObjectExist(bucket, objname)); - assertTrue(! s3cv.exists("test.txt")); - - byte[] obj = "hello world.\n".getBytes(); - JSONObject md = new JSONObject(); - md.put("size", 5); - md.put("contentType", "text/plain"); - InputStream is = new ByteArrayInputStream(obj); - - try { - s3cv.saveAs(is, "test.txt", md); - fail("Failed to detect bad size"); - } catch (StorageVolumeException ex) { - // Expected! - assertTrue("Failed for the wrong reason: "+ex.getMessage(), - ex.getMessage().contains("correct number of bytes")); - } finally { - try { is.close(); } catch (IOException ex) { } - } - assertTrue(! s3client.doesObjectExist(bucket, objname)); - // - // NOTE! That this assert sometimes fails is believed to be an issue with S3Mock - // assertTrue(! s3cv.exists("test.txt")); - } - */ + * @Test + * public void testSaveAsWithBadSize() throws StorageVolumeException { + * String objname = folder + "/test.txt"; + * assertTrue(! s3client.doesObjectExist(bucket, objname)); + * assertTrue(! s3cv.exists("test.txt")); + * + * byte[] obj = "hello world.\n".getBytes(); + * JSONObject md = new JSONObject(); + * md.put("size", 5); + * md.put("contentType", "text/plain"); + * InputStream is = new ByteArrayInputStream(obj); + * + * try { + * s3cv.saveAs(is, "test.txt", md); + * fail("Failed to detect bad size"); + * } catch (StorageVolumeException ex) { + * // Expected! + * assertTrue("Failed for the wrong reason: "+ex.getMessage(), + * ex.getMessage().contains("correct number of bytes")); + * } finally { + * try { is.close(); } catch (IOException ex) { } + * } + * assertTrue(! s3client.doesObjectExist(bucket, objname)); + * // + * // NOTE! That this assert sometimes fails is believed to be an issue with + * S3Mock + * // assertTrue(! s3cv.exists("test.txt")); + * } + */ /* * S3Mock apparently does not check contentMD5 values @@ -420,15 +442,18 @@ public void testSaveAsWithBadMD5() throws StorageVolumeException { fail("Failed to detect bad MD5 sum"); } catch (StorageVolumeException ex) { // Expected! - assertTrue("Failed for the wrong reason: "+ex.getMessage(), - ex.getMessage().contains("md5 transfer")); + assertTrue("Failed for the wrong reason: " + ex.getMessage(), + ex.getMessage().contains("MD5 checksum mismatch for object")); } finally { - try { is.close(); } catch (IOException ex) { } + try { + is.close(); + } catch (IOException ex) { + } } assertTrue("Failed transfered object not deleted from bucket", - !objectExists(bucket, objname)); + !objectExists(bucket, objname)); assertTrue("Failed transfered object not deleted from volume", - !s3cv.exists("test.txt")); + !s3cv.exists("test.txt")); } @Test @@ -439,7 +464,8 @@ public void testGetStream() throws StorageVolumeException, IOException { try { s3cv.getStream("test.txt"); fail("Missing object did not throw ObjectNotFoundException"); - } catch (ObjectNotFoundException ex) { } + } catch (ObjectNotFoundException ex) { + } testSaveAs(); InputStream is = s3cv.getStream("test.txt"); @@ -448,9 +474,11 @@ public void testGetStream() throws StorageVolumeException, IOException { try { assertEquals(rdr.readLine(), "hello world."); assertNull(rdr.readLine()); - } - finally { - try { rdr.close(); } catch (IOException ex) { } + } finally { + try { + rdr.close(); + } catch (IOException ex) { + } } s3cv.remove("test.txt"); @@ -468,7 +496,8 @@ public void getSaveObject() throws StorageVolumeException { try { s3cv.get("test.txt"); fail("Missing object did not throw ObjectNotFoundException"); - } catch (ObjectNotFoundException ex) { } + } catch (ObjectNotFoundException ex) { + } testSaveAs(); CacheObject co = s3cv.get("test.txt"); @@ -487,15 +516,13 @@ public void getSaveObject() throws StorageVolumeException { @Test(expected = UnsupportedOperationException.class) public void testRedirectForUnsupported() - throws StorageVolumeException, UnsupportedOperationException, IOException - { + throws StorageVolumeException, UnsupportedOperationException, IOException { s3cv.getRedirectFor("goober"); } @Test public void testRedirectFor() - throws StorageVolumeException, UnsupportedOperationException, IOException, MalformedURLException - { + throws StorageVolumeException, UnsupportedOperationException, IOException, MalformedURLException { s3cv = new AWSS3CacheVolume(bucket, "cach", s3client, "https://ex.org/"); assertEquals(new URL("https://ex.org/goober"), s3cv.getRedirectFor("goober")); assertEquals(new URL("https://ex.org/i%20a/m%20groot"), s3cv.getRedirectFor("i a/m groot")); @@ -503,11 +530,10 @@ public void testRedirectFor() @Test public void testRedirectFor2() - throws StorageVolumeException, UnsupportedOperationException, IOException, MalformedURLException - { + throws StorageVolumeException, UnsupportedOperationException, IOException, MalformedURLException { s3cv = new AWSS3CacheVolume(bucket, "cach", s3client, "https://ex.org/"); testSaveAs(); - String burl = "http://localhost:9090//"+bucket+"/"+folder+"/"; - assertEquals(new URL(burl+"test.txt"), s3cv.getRedirectFor("test.txt")); + String burl = "http://localhost:9090/" + bucket + "/" + folder + "/"; + assertEquals(new URL(burl + "test.txt"), s3cv.getRedirectFor("test.txt")); } } diff --git a/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java b/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java index 33aded6c..3cf1e707 100644 --- a/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java +++ b/src/test/java/gov/nist/oar/distrib/storage/AWSS3ClientProviderTest.java @@ -25,6 +25,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; @@ -41,6 +42,9 @@ public class AWSS3ClientProviderTest { + @ClassRule + public static S3MockTestRule siterule = new S3MockTestRule(); + private static final String bucket = "oar-lts-test"; private AWSS3ClientProvider s3Provider = null; diff --git a/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java b/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java index 7e9fd035..63c86318 100644 --- a/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java +++ b/src/test/java/gov/nist/oar/distrib/storage/AWSS3LongTermStorageTest.java @@ -12,21 +12,27 @@ */ package gov.nist.oar.distrib.storage; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.URI; -import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; -import org.junit.Before; import org.junit.After; -import org.junit.BeforeClass; import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; -import static org.junit.Assert.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import gov.nist.oar.distrib.DistributionException; import gov.nist.oar.distrib.ResourceNotFoundException; @@ -37,15 +43,17 @@ import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchBucketException; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Exception; - -// import com.adobe.testing.s3mock.S3MockApplication; -// import gov.nist.oar.RequireWebSite; +import software.amazon.awssdk.services.s3.model.S3Object; /** * This is test class is used to connect to long term storage on AWS S3 @@ -55,66 +63,36 @@ */ public class AWSS3LongTermStorageTest { - // static S3MockApplication mockServer = null; @ClassRule public static S3MockTestRule siterule = new S3MockTestRule(); - - static S3Client s3client = null; - - // private static Logger logger = LoggerFactory.getLogger(AWSS3LongTermStorageTest.class); - - // static int port = 9001; - static final String bucket = "oar-lts-test"; - static String hash = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; + + private static Logger logger = LoggerFactory.getLogger(AWSS3LongTermStorageTest.class); + + private static final String BUCKET_NAME = "oar-lts-test"; + private static final String HASH = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; + private static S3Client s3client; AWSS3LongTermStorage s3Storage = null; - + @BeforeClass - public static void setUpClass() throws IOException { - // Start S3Mock and initialize the S3 client + public static void setUpClass() { + // Initialize S3 Client s3client = createS3Client(); - // Destroy the bucket if it already exists - if (bucketExists(bucket)) { + // Ensure bucket is clean before tests + if (bucketExists(BUCKET_NAME)) { destroyBucket(); } - - // Create the bucket - s3client.createBucket(CreateBucketRequest.builder().bucket(bucket).build()); - - // Populate the bucket + s3client.createBucket(CreateBucketRequest.builder().bucket(BUCKET_NAME).build()); populateBucket(); } - public static boolean bucketExists(String bucketName) { - try { - s3client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); - return true; - } catch (NoSuchBucketException e) { - return false; // Bucket does not exist - } catch (S3Exception e) { - throw new RuntimeException("Failed to check bucket existence: " + e.getMessage(), e); - } - } - - public static S3Client createS3Client() { - // Static credentials and mock endpoint configuration - AwsBasicCredentials credentials = AwsBasicCredentials.create("foo", "bar"); - String endpoint = "http://localhost:9090/"; - - return S3Client.builder() - .credentialsProvider(StaticCredentialsProvider.create(credentials)) - .region(Region.US_EAST_1) - .endpointOverride(URI.create(endpoint)) // Override to point to mock server - .forcePathStyle(true) // Enable path-style access - .build(); - } - @Before public void setUp() throws IOException { try { - s3Storage = new AWSS3LongTermStorage(bucket, s3client); + s3Storage = new AWSS3LongTermStorage(BUCKET_NAME, s3client); } catch (FileNotFoundException | StorageVolumeException ex) { - throw new IllegalStateException("Failed to initialize AWSS3LongTermStorage for test setup: " + ex.getMessage(), ex); + throw new IllegalStateException( + "Failed to initialize AWSS3LongTermStorage for test setup: " + ex.getMessage(), ex); } } @@ -125,35 +103,72 @@ public void tearDown() { @AfterClass public static void tearDownClass() { - destroyBucket(); - // mockServer.stop(); + try { + if (s3client != null) { + destroyBucket(); + } + } catch (Exception e) { + logger.warn("Failed to destroy bucket during teardown: " + e.getMessage()); + } + siterule.stopServer(); + } + + public static S3Client createS3Client() { + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("foo", "bar"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:9090/")) // Mock server endpoint + .forcePathStyle(true) + .build(); + } + + public static boolean bucketExists(String bucketName) { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build()); + return true; + } catch (NoSuchBucketException e) { + return false; + } } public static void destroyBucket() { - // List and delete all objects in the bucket, then delete the bucket - s3client.listObjectsV2(builder -> builder.bucket(bucket).build()) + s3client.listObjectsV2(ListObjectsV2Request.builder().bucket(BUCKET_NAME).build()) .contents() - .forEach(obj -> s3client.deleteObject(builder -> builder.bucket(bucket).key(obj.key()).build())); + .forEach(object -> s3client.deleteObject(DeleteObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(object.key()) + .build())); + s3client.deleteBucket(DeleteBucketRequest.builder().bucket(BUCKET_NAME).build()); + } + + @Test + public void testBucketCreation() { + try { + s3client.headBucket(HeadBucketRequest.builder().bucket(BUCKET_NAME).build()); + assertTrue("Bucket exists and is accessible", true); + } catch (Exception e) { + fail("Bucket creation or access failed: " + e.getMessage()); + } } public static void populateBucket() { String[] bases = { - "mds013u4g.1_0_0.mbag0_4-", "mds013u4g.1_0_1.mbag0_4-", "mds013u4g.1_1.mbag0_4-", - "mds088kd2.mbag0_3-", "mds088kd2.mbag0_3-", "mds088kd2.1_0_1.mbag0_4-" + "mds013u4g.1_0_0.mbag0_4-", "mds013u4g.1_0_1.mbag0_4-", "mds013u4g.1_1.mbag0_4-", + "mds088kd2.mbag0_3-", "mds088kd2.mbag0_3-", "mds088kd2.1_0_1.mbag0_4-" }; int j = 0; for (String base : bases) { for (int i = 0; i < 3; i++) { String bag = base + j++ + ((i > 1) ? ".7z" : ".zip"); - String baghash = hash + " " + bag; + String baghash = HASH + " " + bag; // Check if the object already exists - if (!objectExists(bucket, bag)) { + if (!objectExists(BUCKET_NAME, bag)) { // Upload the empty "bag" file try (InputStream ds = new ByteArrayInputStream("0".getBytes())) { s3client.putObject(PutObjectRequest.builder() - .bucket(bucket) + .bucket(BUCKET_NAME) .key(bag) .contentType("text/plain") .contentLength(1L) @@ -165,7 +180,7 @@ public static void populateBucket() { // Upload the "baghash" file try (InputStream ds = new ByteArrayInputStream(baghash.getBytes())) { s3client.putObject(PutObjectRequest.builder() - .bucket(bucket) + .bucket(BUCKET_NAME) .key(bag + ".sha256") .contentType("text/plain") .contentLength((long) baghash.length()) @@ -194,12 +209,42 @@ private static boolean objectExists(String bucket, String key) { } @Test - public void testCtor() { + public void testBucketContents() { + ListObjectsV2Response response = s3client.listObjectsV2(ListObjectsV2Request.builder().bucket(BUCKET_NAME).build()); + List keys = response.contents().stream().map(S3Object::key).collect(Collectors.toList()); + assertTrue(keys.contains("mds013u4g.1_0_0.mbag0_4-0.zip")); + assertTrue(keys.contains("mds013u4g.1_0_0.mbag0_4-2.7z")); + } + + @Test + public void testPopulateBucket() { try { - s3client.headBucket(HeadBucketRequest.builder().bucket(bucket).build()); - assertTrue(true); - } catch (NoSuchBucketException e) { - fail("Bucket does not exist: " + bucket); + // Call for populateBucket was done in class setup + + // Verify that all expected objects are present in the bucket + String[] expectedKeys = { + "mds013u4g.1_0_0.mbag0_4-0.zip", "mds013u4g.1_0_0.mbag0_4-1.zip", "mds013u4g.1_0_0.mbag0_4-2.7z", + "mds013u4g.1_0_1.mbag0_4-3.zip", "mds013u4g.1_0_1.mbag0_4-4.zip", "mds013u4g.1_0_1.mbag0_4-5.7z", + "mds013u4g.1_1.mbag0_4-6.zip", "mds013u4g.1_1.mbag0_4-7.zip", "mds013u4g.1_1.mbag0_4-8.7z", + "mds088kd2.mbag0_3-9.zip", "mds088kd2.mbag0_3-10.zip", "mds088kd2.mbag0_3-11.7z", + "mds088kd2.mbag0_3-12.zip", "mds088kd2.mbag0_3-13.zip", "mds088kd2.mbag0_3-14.7z", + "mds088kd2.1_0_1.mbag0_4-15.zip", "mds088kd2.1_0_1.mbag0_4-16.zip", "mds088kd2.1_0_1.mbag0_4-17.7z" + }; + + // Verify each object exists in the bucket + for (String key : expectedKeys) { + assertTrue("Object " + key + " should exist in the bucket", + objectExists(BUCKET_NAME, key)); + } + + // Verify corresponding .sha256 files exist + for (String key : expectedKeys) { + String hashKey = key + ".sha256"; + assertTrue("Hash file " + hashKey + " should exist in the bucket", + objectExists(BUCKET_NAME, hashKey)); + } + } catch (Exception e) { + fail("populateBucket test failed: " + e.getMessage()); } } @@ -215,15 +260,16 @@ public void testFindBagsFor() throws DistributionException, FileNotFoundExceptio filenames.add("mds013u4g.1_1.mbag0_4-6.zip"); filenames.add("mds013u4g.1_1.mbag0_4-7.zip"); filenames.add("mds013u4g.1_1.mbag0_4-8.7z"); - + assertEquals(filenames, s3Storage.findBagsFor("mds013u4g")); try { filenames = s3Storage.findBagsFor("mds013u4g9"); - fail("Failed to raise ResourceNotFoundException; returned "+filenames.toString()); - } catch (ResourceNotFoundException ex) { } + fail("Failed to raise ResourceNotFoundException; returned " + filenames.toString()); + } catch (ResourceNotFoundException ex) { + } - filenames = new ArrayList(); + filenames = new ArrayList(); filenames.add("mds088kd2.mbag0_3-9.zip"); filenames.add("mds088kd2.mbag0_3-10.zip"); filenames.add("mds088kd2.mbag0_3-11.7z"); @@ -237,10 +283,10 @@ public void testFindBagsFor() throws DistributionException, FileNotFoundExceptio List found = s3Storage.findBagsFor("mds088kd2"); found.sort(null); - + assertEquals(filenames, found); } - + @Test public void testFindBagsForByPage() throws DistributionException, FileNotFoundException { s3Storage.setPageSize(4); @@ -254,14 +300,14 @@ public void testFindBagsForByPage() throws DistributionException, FileNotFoundEx filenames.add("mds013u4g.1_1.mbag0_4-6.zip"); filenames.add("mds013u4g.1_1.mbag0_4-7.zip"); filenames.add("mds013u4g.1_1.mbag0_4-8.7z"); - + assertEquals(filenames, s3Storage.findBagsFor("mds013u4g")); } @Test - public void testFileChecksum() throws FileNotFoundException, DistributionException { + public void testFileChecksum() throws FileNotFoundException, DistributionException { String getChecksumHash = s3Storage.getChecksum("mds088kd2.mbag0_3-10.zip").hash; - assertEquals(getChecksumHash.trim(), hash.trim()); + assertEquals(getChecksumHash.trim(), HASH.trim()); String h = "6f6173bf926eef7978d86a98f19ebc54b14ce3f8acaa2ce7dc8d199ae65adcb7"; getChecksumHash = s3Storage.getChecksum("mds088kd2.mbag0_3-10.zip.sha256").hash; @@ -269,21 +315,21 @@ public void testFileChecksum() throws FileNotFoundException, DistributionExcepti } @Test - public void testFileSize() throws FileNotFoundException, DistributionException { + public void testFileSize() throws FileNotFoundException, DistributionException { long filelength = s3Storage.getSize("mds088kd2.1_0_1.mbag0_4-17.7z"); assertEquals(1, filelength); filelength = s3Storage.getSize("mds088kd2.1_0_1.mbag0_4-17.7z.sha256"); assertEquals(94, filelength); - } + } - //Need to update deatils to compare two file streams + // Need to update deatils to compare two file streams @Test - public void testFileStream() throws FileNotFoundException, DistributionException, IOException { + public void testFileStream() throws FileNotFoundException, DistributionException, IOException { InputStream is = s3Storage.openFile("mds088kd2.1_0_1.mbag0_4-17.7z"); byte[] buf = new byte[100]; int n = is.read(buf); - assertEquals("Unexpected output: "+(new String(buf, 0, n)), 1, n); + assertEquals("Unexpected output: " + (new String(buf, 0, n)), 1, n); assertEquals("0", new String(buf, 0, 1)); assertEquals(-1, is.read()); is.close(); @@ -297,47 +343,51 @@ public void testFileStream() throws FileNotFoundException, DistributionException is = s3Storage.openFile("goober-17.7z"); fail("Failed to barf on missing file"); is.close(); - } catch (FileNotFoundException ex) { } - } + } catch (FileNotFoundException ex) { + } + } @Test public void testFileHeadbag() throws FileNotFoundException, DistributionException { - assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2")); - assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g")); + assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2")); + assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g")); - assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g", "1.1")); - assertEquals("mds013u4g.1_0_1.mbag0_4-5.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.1")); - assertEquals("mds013u4g.1_0_0.mbag0_4-2.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.0")); + assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g", "1.1")); + assertEquals("mds013u4g.1_0_1.mbag0_4-5.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.1")); + assertEquals("mds013u4g.1_0_0.mbag0_4-2.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.0")); - assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2", "1.0.1")); - assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "0")); - assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "1")); + assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2", "1.0.1")); + assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "0")); + assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "1")); try { String bagname = s3Storage.findHeadBagFor("mds013u4g9"); - fail("Failed to raise ResourceNotFoundException; returned "+bagname.toString()); - } catch (ResourceNotFoundException ex) { } + fail("Failed to raise ResourceNotFoundException; returned " + bagname.toString()); + } catch (ResourceNotFoundException ex) { + } } @Test public void testFindHeadbagByPage() throws FileNotFoundException, DistributionException { s3Storage.setPageSize(2); - assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2")); - assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g")); + assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2")); + assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g")); - assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g", "1.1")); - assertEquals("mds013u4g.1_0_1.mbag0_4-5.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.1")); - assertEquals("mds013u4g.1_0_0.mbag0_4-2.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.0")); + assertEquals("mds013u4g.1_1.mbag0_4-8.7z", s3Storage.findHeadBagFor("mds013u4g", "1.1")); + assertEquals("mds013u4g.1_0_1.mbag0_4-5.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.1")); + assertEquals("mds013u4g.1_0_0.mbag0_4-2.7z", s3Storage.findHeadBagFor("mds013u4g", "1.0.0")); - assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2", "1.0.1")); - assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "0")); - assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "1")); + assertEquals("mds088kd2.1_0_1.mbag0_4-17.7z", s3Storage.findHeadBagFor("mds088kd2", "1.0.1")); + assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "0")); + assertEquals("mds088kd2.mbag0_3-14.7z", s3Storage.findHeadBagFor("mds088kd2", "1")); try { String bagname = s3Storage.findHeadBagFor("mds013u4g9"); - fail("Failed to raise ResourceNotFoundException; returned "+bagname.toString()); - } catch (ResourceNotFoundException ex) { } + fail("Failed to raise ResourceNotFoundException; returned " + bagname.toString()); + } catch (ResourceNotFoundException ex) { + } } + } diff --git a/src/test/java/gov/nist/oar/distrib/storage/S3MockTestRule.java b/src/test/java/gov/nist/oar/distrib/storage/S3MockTestRule.java index 6ffadd9b..f499ce8d 100644 --- a/src/test/java/gov/nist/oar/distrib/storage/S3MockTestRule.java +++ b/src/test/java/gov/nist/oar/distrib/storage/S3MockTestRule.java @@ -20,11 +20,11 @@ import org.slf4j.LoggerFactory; import java.net.URL; +import java.util.concurrent.TimeUnit; import java.net.MalformedURLException; import java.net.HttpURLConnection; import java.io.IOException; import java.io.File; -import java.io.FileNotFoundException; import java.lang.ProcessBuilder.Redirect; /** @@ -59,7 +59,14 @@ public void startServer() throws IOException { public void stopServer() { log.info("Shutting down S3Mock server"); - server.destroy(); + if (server != null && server.isAlive()) { + server.destroy(); + try { + server.waitFor(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + log.warn("Interrupted while waiting for server shutdown."); + } + } } public boolean checkAvailable() throws MalformedURLException { @@ -86,17 +93,23 @@ public boolean checkAvailable() throws MalformedURLException { } public boolean waitForServer(long timeoutms) throws InterruptedException, MalformedURLException { - long starttime = System.currentTimeMillis(); + long startTime = System.currentTimeMillis(); + long remainingTime = timeoutms; + - while (System.currentTimeMillis() - starttime < timeoutms) { - if (! server.isAlive()) - throw new IllegalStateException("Server exited prematurely; status="+ - Integer.toString(server.exitValue())); - if (checkAvailable()) + while (remainingTime > 0) { + if (!server.isAlive()) { + throw new IllegalStateException("S3Mock server exited prematurely; status=" + + server.exitValue()); + } + if (checkAvailable()) { + log.info("S3Mock server is available."); return true; - Thread.sleep(2000); + } + Thread.sleep(3000); // Delay before checking server status + remainingTime = timeoutms - (System.currentTimeMillis() - startTime); + log.warn("Waiting for S3Mock server, remaining time: " + remainingTime + "ms"); } - return false; } diff --git a/src/test/java/gov/nist/oar/distrib/web/AIPAccessControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/AIPAccessControllerTest.java index 54704ac7..11ca4110 100644 --- a/src/test/java/gov/nist/oar/distrib/web/AIPAccessControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/AIPAccessControllerTest.java @@ -49,7 +49,8 @@ "distrib.bagstore.mode=local", "distrib.bagstore.location=${basedir}/src/test/resources", "distrib.baseurl=http://localhost/oar-distrb-service", - "logging.path=${basedir}/target/surefire-reports" + "logging.path=${basedir}/target/surefire-reports", + "cloud.aws.region=us-east-1" }) public class AIPAccessControllerTest { diff --git a/src/test/java/gov/nist/oar/distrib/web/BundleDownloadPlanControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/BundleDownloadPlanControllerTest.java index 3dc03d1e..d88de815 100644 --- a/src/test/java/gov/nist/oar/distrib/web/BundleDownloadPlanControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/BundleDownloadPlanControllerTest.java @@ -57,7 +57,8 @@ "logging.path=${basedir}/target/surefire-reports", "distrib.packaging.maxpackagesize = 2000000", "distrib.packaging.maxfilecount = 2", - "distrib.packaging.allowedurls = nist.gov|s3.amazonaws.com/nist-midas|httpstat.us" }) + "distrib.packaging.allowedurls = nist.gov|s3.amazonaws.com/nist-midas|httpstat.us", + "cloud.aws.region=us-east-1" }) public class BundleDownloadPlanControllerTest { RequireWebSite required = new RequireWebSite("https://s3.amazonaws.com/nist-midas/1894/license.pdf"); Logger logger = LoggerFactory.getLogger(BundleDownloadPlanControllerTest.class); diff --git a/src/test/java/gov/nist/oar/distrib/web/CacheManagementControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/CacheManagementControllerTest.java index 4a67db6e..fc0f262d 100644 --- a/src/test/java/gov/nist/oar/distrib/web/CacheManagementControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/CacheManagementControllerTest.java @@ -77,7 +77,8 @@ "distrib.cachemgr.volumes[0].roles[1]=fast", "distrib.cachemgr.volumes[1].roles[0]=large", "distrib.cachemgr.volumes[1].roles[1]=general", - "distrib.cachemgr.restapi.accesstoken=SECRET" + "distrib.cachemgr.restapi.accesstoken=SECRET", + "cloud.aws.region=us-east-1" }) public class CacheManagementControllerTest { diff --git a/src/test/java/gov/nist/oar/distrib/web/DataBundleAccessControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/DataBundleAccessControllerTest.java index ff0b165c..bf86a6e0 100644 --- a/src/test/java/gov/nist/oar/distrib/web/DataBundleAccessControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/DataBundleAccessControllerTest.java @@ -57,7 +57,8 @@ @TestPropertySource(properties = { "distrib.bagstore.mode=local", "distrib.bagstore.location=./src/test/resources", "distrib.baseurl=http://localhost/od/ds", "logging.path=./target/surefire-reports", "distrib.packaging.maxpackagesize = 100000", - "distrib.packaging.maxfilecount = 2", "distrib.packaging.allowedurls = nist.gov|s3.amazonaws.com/nist-midas" + "distrib.packaging.maxfilecount = 2", "distrib.packaging.allowedurls = nist.gov|s3.amazonaws.com/nist-midas", + "cloud.aws.region=us-east-1" // "logging.level.org.springframework.web=DEBUG" }) public class DataBundleAccessControllerTest { diff --git a/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerTest.java index c7776c68..2f819be9 100644 --- a/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerTest.java @@ -50,6 +50,7 @@ "distrib.bagstore.location=${basedir}/src/test/resources", "distrib.baseurl=http://localhost/oar-distrb-service", "logging.path=${basedir}/target/surefire-reports", + "cloud.aws.region=us-east-1" // "logging.level.org.springframework.web=DEBUG" // "logging.level.gov.nist.oar.distrib=DEBUG" }) diff --git a/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerWithCacheTest.java b/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerWithCacheTest.java index 12ed0067..610730ee 100644 --- a/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerWithCacheTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/DatasetAccessControllerWithCacheTest.java @@ -83,7 +83,8 @@ // "logging.level.org.springframework.web=DEBUG" "logging.level.gov.nist.oar.distrib=DEBUG", "logging.path=${basedir}/target", - "logging.file=tst.log" + "logging.file=tst.log", + "cloud.aws.region=us-east-1" }) public class DatasetAccessControllerWithCacheTest { diff --git a/src/test/java/gov/nist/oar/distrib/web/NISTDistribServiceConfigTest.java b/src/test/java/gov/nist/oar/distrib/web/NISTDistribServiceConfigTest.java index e8e00c47..0111e351 100644 --- a/src/test/java/gov/nist/oar/distrib/web/NISTDistribServiceConfigTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/NISTDistribServiceConfigTest.java @@ -59,7 +59,9 @@ "distrib.rpa.authorized[0]=AUTHORIZED.1", "distrib.rpa.authorized[1]=AUTHORIZED.2", "distrib.rpa.bagstore-location=${basedir}/src/test/resources/restricted", - "distrib.rpa.bagstore-mode=local" + "distrib.rpa.bagstore-mode=local", + "cloud.aws.region=us-east-1" + }) public class NISTDistribServiceConfigTest { diff --git a/src/test/java/gov/nist/oar/distrib/web/NoCacheManagementControllerTest.java b/src/test/java/gov/nist/oar/distrib/web/NoCacheManagementControllerTest.java index 61978c47..2e057da7 100644 --- a/src/test/java/gov/nist/oar/distrib/web/NoCacheManagementControllerTest.java +++ b/src/test/java/gov/nist/oar/distrib/web/NoCacheManagementControllerTest.java @@ -46,7 +46,8 @@ "distrib.bagstore.location=${basedir}/src/test/resources", "distrib.baseurl=http://localhost/oar-distrb-service", "distrib.cachemgr.restapi.accesstoken=SECRET", - "logging.path=${basedir}/target/surefire-reports" + "logging.path=${basedir}/target/surefire-reports", + "cloud.aws.region=us-east-1" }) public class NoCacheManagementControllerTest { From 44e624c679004991430e2a26f2f0978a81bec5e6 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Wed, 18 Dec 2024 15:45:25 -0500 Subject: [PATCH 5/8] update dependencies --- pom.xml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/pom.xml b/pom.xml index 9ffb1efa..ed8b2a2a 100644 --- a/pom.xml +++ b/pom.xml @@ -50,19 +50,6 @@ org.springframework.boot spring-boot-starter-web - - - - software.amazon.awssdk - s3 - 2.25.16 - - software.amazon.awssdk From 7731249028eac6df96cc04730c62959c43b3a1e1 Mon Sep 17 00:00:00 2001 From: RayPlante Date: Mon, 20 Jan 2025 13:58:57 -0500 Subject: [PATCH 6/8] GA source.yml: update deprecated cache action --- .github/workflows/source.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index b2391667..d0ed0a0f 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -18,7 +18,7 @@ jobs: uses: actions/checkout@v4 - name: Cache Maven packages - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.m2 key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml') }} From f7eec7bd18633f65165404c3894cde2ca90e6764 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Thu, 23 Jan 2025 21:46:46 -0500 Subject: [PATCH 7/8] add region configuration to S3 client setup --- .../gov/nist/oar/distrib/web/NISTDistribServiceConfig.java | 2 +- src/main/resources/application.yml | 4 ++++ src/test/resources/application.yml | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java b/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java index 3cd46504..a3270421 100644 --- a/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java +++ b/src/main/java/gov/nist/oar/distrib/web/NISTDistribServiceConfig.java @@ -141,7 +141,7 @@ public class NISTDistribServiceConfig { /** * the AWS region the service should operate in; this is ignored if mode=local. */ - @Value("${cloud.aws.region:@null}") + @Value("${cloud.aws.region:us-east-1}") String region; /** diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index b462a7f2..305bed65 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -27,6 +27,10 @@ server: max-threads: 200 min-spare-threads: 10 +cloud: + aws: + region: us-east-1 + logging: file: distservice.log path: /var/log/dist-service diff --git a/src/test/resources/application.yml b/src/test/resources/application.yml index 94f7204d..944ec912 100644 --- a/src/test/resources/application.yml +++ b/src/test/resources/application.yml @@ -4,7 +4,7 @@ spring: enabled: false server: - port: 8083 + port: 0 # Avoid conflict with actual service running port 8083 distrib: bagstore: From 30c248c8a3ca7b7d70a21964ce1a26c9547c0e42 Mon Sep 17 00:00:00 2001 From: elmiomar Date: Fri, 24 Jan 2025 14:48:26 -0500 Subject: [PATCH 8/8] fix config server location --- src/main/resources/application.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index 305bed65..aab79767 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -4,7 +4,8 @@ spring: config: import: "optional:configserver:" cloud: - uri: http://localhost:8087 + config: + uri: http://localhost:8087 server: port: 8083