AWS 문서 예제 리포지토리에서 더 많은 SDK
기계 번역으로 제공되는 번역입니다. 제공된 번역과 원본 영어의 내용이 상충하는 경우에는 영어 버전이 우선합니다.
를 사용하여 Amazon S3에서 대용량 파일 업로드 또는 다운로드 AWS SDK
다음 코드 예제에서는 Amazon S3에 대용량 파일을 업로드하고 Amazon S3에서 대용량 파일을 다운로드하는 방법을 보여줍니다.
자세한 내용은 멀티파트 업로드를 사용하여 객체 업로드를 참조하십시오.
- .NET
-
- AWS SDK for .NET
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. Amazon S3 TransferUtility S3를 사용하여 S3 버킷에서 파일을 주고 받는 호출 함수입니다.
global using System.Text; global using Amazon.S3; global using Amazon.S3.Model; global using Amazon.S3.Transfer; global using TransferUtilityBasics; // This Amazon S3 client uses the default user credentials // defined for this computer. using Microsoft.Extensions.Configuration; IAmazonS3 client = new AmazonS3Client(); var transferUtil = new TransferUtility(client); IConfiguration _configuration; _configuration = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("settings.json") // Load test settings from JSON file. .AddJsonFile("settings.local.json", true) // Optionally load local settings. .Build(); // Edit the values in settings.json to use an S3 bucket and files that // exist on your AWS account and on the local computer where you // run this scenario. var bucketName = _configuration["BucketName"]; var localPath = $"{Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData)}\\TransferFolder"; DisplayInstructions(); PressEnter(); Console.WriteLine(); // Upload a single file to an S3 bucket. DisplayTitle("Upload a single file"); var fileToUpload = _configuration["FileToUpload"]; Console.WriteLine($"Uploading {fileToUpload} to the S3 bucket, {bucketName}."); var success = await TransferMethods.UploadSingleFileAsync(transferUtil, bucketName, fileToUpload, localPath); if (success) { Console.WriteLine($"Successfully uploaded the file, {fileToUpload} to {bucketName}."); } PressEnter(); // Upload a local directory to an S3 bucket. DisplayTitle("Upload all files from a local directory"); Console.WriteLine("Upload all the files in a local folder to an S3 bucket."); const string keyPrefix = "UploadFolder"; var uploadPath = $"{localPath}\\UploadFolder"; Console.WriteLine($"Uploading the files in {uploadPath} to {bucketName}"); DisplayTitle($"{uploadPath} files"); DisplayLocalFiles(uploadPath); Console.WriteLine(); PressEnter(); success = await TransferMethods.UploadFullDirectoryAsync(transferUtil, bucketName, keyPrefix, uploadPath); if (success) { Console.WriteLine($"Successfully uploaded the files in {uploadPath} to {bucketName}."); Console.WriteLine($"{bucketName} currently contains the following files:"); await DisplayBucketFiles(client, bucketName, keyPrefix); Console.WriteLine(); } PressEnter(); // Download a single file from an S3 bucket. DisplayTitle("Download a single file"); Console.WriteLine("Now we will download a single file from an S3 bucket."); var keyName = _configuration["FileToDownload"]; Console.WriteLine($"Downloading {keyName} from {bucketName}."); success = await TransferMethods.DownloadSingleFileAsync(transferUtil, bucketName, keyName, localPath); if (success) { Console.WriteLine("$Successfully downloaded the file, {keyName} from {bucketName}."); } PressEnter(); // Download the contents of a directory from an S3 bucket. DisplayTitle("Download the contents of an S3 bucket"); var s3Path = _configuration["S3Path"]; var downloadPath = $"{localPath}\\{s3Path}"; Console.WriteLine($"Downloading the contents of {bucketName}\\{s3Path}"); Console.WriteLine($"{bucketName}\\{s3Path} contains the following files:"); await DisplayBucketFiles(client, bucketName, s3Path); Console.WriteLine(); success = await TransferMethods.DownloadS3DirectoryAsync(transferUtil, bucketName, s3Path, downloadPath); if (success) { Console.WriteLine($"Downloaded the files in {bucketName} to {downloadPath}."); Console.WriteLine($"{downloadPath} now contains the following files:"); DisplayLocalFiles(downloadPath); } Console.WriteLine("\nThe TransferUtility Basics application has completed."); PressEnter(); // Displays the title for a section of the scenario. static void DisplayTitle(string titleText) { var sepBar = new string('-', Console.WindowWidth); Console.WriteLine(sepBar); Console.WriteLine(CenterText(titleText)); Console.WriteLine(sepBar); } // Displays a description of the actions to be performed by the scenario. static void DisplayInstructions() { var sepBar = new string('-', Console.WindowWidth); DisplayTitle("Amazon S3 Transfer Utility Basics"); Console.WriteLine("This program shows how to use the Amazon S3 Transfer Utility."); Console.WriteLine("It performs the following actions:"); Console.WriteLine("\t1. Upload a single object to an S3 bucket."); Console.WriteLine("\t2. Upload an entire directory from the local computer to an\n\t S3 bucket."); Console.WriteLine("\t3. Download a single object from an S3 bucket."); Console.WriteLine("\t4. Download the objects in an S3 bucket to a local directory."); Console.WriteLine($"\n{sepBar}"); } // Pauses the scenario. static void PressEnter() { Console.WriteLine("Press <Enter> to continue."); _ = Console.ReadLine(); Console.WriteLine("\n"); } // Returns the string textToCenter, padded on the left with spaces // that center the text on the console display. static string CenterText(string textToCenter) { var centeredText = new StringBuilder(); var screenWidth = Console.WindowWidth; centeredText.Append(new string(' ', (int)(screenWidth - textToCenter.Length) / 2)); centeredText.Append(textToCenter); return centeredText.ToString(); } // Displays a list of file names included in the specified path. static void DisplayLocalFiles(string localPath) { var fileList = Directory.GetFiles(localPath); if (fileList.Length > 0) { foreach (var fileName in fileList) { Console.WriteLine(fileName); } } } // Displays a list of the files in the specified S3 bucket and prefix. static async Task DisplayBucketFiles(IAmazonS3 client, string bucketName, string s3Path) { ListObjectsV2Request request = new() { BucketName = bucketName, Prefix = s3Path, MaxKeys = 5, }; var response = new ListObjectsV2Response(); do { response = await client.ListObjectsV2Async(request); response.S3Objects .ForEach(obj => Console.WriteLine($"{obj.Key}")); // If the response is truncated, set the request ContinuationToken // from the NextContinuationToken property of the response. request.ContinuationToken = response.NextContinuationToken; } while (response.IsTruncated); }
단일 파일을 업로드합니다.
/// <summary> /// Uploads a single file from the local computer to an S3 bucket. /// </summary> /// <param name="transferUtil">The transfer initialized TransferUtility /// object.</param> /// <param name="bucketName">The name of the S3 bucket where the file /// will be stored.</param> /// <param name="fileName">The name of the file to upload.</param> /// <param name="localPath">The local path where the file is stored.</param> /// <returns>A boolean value indicating the success of the action.</returns> public static async Task<bool> UploadSingleFileAsync( TransferUtility transferUtil, string bucketName, string fileName, string localPath) { if (File.Exists($"{localPath}\\{fileName}")) { try { await transferUtil.UploadAsync(new TransferUtilityUploadRequest { BucketName = bucketName, Key = fileName, FilePath = $"{localPath}\\{fileName}", }); return true; } catch (AmazonS3Exception s3Ex) { Console.WriteLine($"Could not upload {fileName} from {localPath} because:"); Console.WriteLine(s3Ex.Message); return false; } } else { Console.WriteLine($"{fileName} does not exist in {localPath}"); return false; } }
전체 로컬 디렉토리를 업로드합니다.
/// <summary> /// Uploads all the files in a local directory to a directory in an S3 /// bucket. /// </summary> /// <param name="transferUtil">The transfer initialized TransferUtility /// object.</param> /// <param name="bucketName">The name of the S3 bucket where the files /// will be stored.</param> /// <param name="keyPrefix">The key prefix is the S3 directory where /// the files will be stored.</param> /// <param name="localPath">The local directory that contains the files /// to be uploaded.</param> /// <returns>A Boolean value representing the success of the action.</returns> public static async Task<bool> UploadFullDirectoryAsync( TransferUtility transferUtil, string bucketName, string keyPrefix, string localPath) { if (Directory.Exists(localPath)) { try { await transferUtil.UploadDirectoryAsync(new TransferUtilityUploadDirectoryRequest { BucketName = bucketName, KeyPrefix = keyPrefix, Directory = localPath, }); return true; } catch (AmazonS3Exception s3Ex) { Console.WriteLine($"Can't upload the contents of {localPath} because:"); Console.WriteLine(s3Ex?.Message); return false; } } else { Console.WriteLine($"The directory {localPath} does not exist."); return false; } }
단일 파일을 다운로드합니다.
/// <summary> /// Download a single file from an S3 bucket to the local computer. /// </summary> /// <param name="transferUtil">The transfer initialized TransferUtility /// object.</param> /// <param name="bucketName">The name of the S3 bucket containing the /// file to download.</param> /// <param name="keyName">The name of the file to download.</param> /// <param name="localPath">The path on the local computer where the /// downloaded file will be saved.</param> /// <returns>A Boolean value indicating the results of the action.</returns> public static async Task<bool> DownloadSingleFileAsync( TransferUtility transferUtil, string bucketName, string keyName, string localPath) { await transferUtil.DownloadAsync(new TransferUtilityDownloadRequest { BucketName = bucketName, Key = keyName, FilePath = $"{localPath}\\{keyName}", }); return (File.Exists($"{localPath}\\{keyName}")); }
S3 버킷의 콘텐츠를 다운로드합니다.
/// <summary> /// Downloads the contents of a directory in an S3 bucket to a /// directory on the local computer. /// </summary> /// <param name="transferUtil">The transfer initialized TransferUtility /// object.</param> /// <param name="bucketName">The bucket containing the files to download.</param> /// <param name="s3Path">The S3 directory where the files are located.</param> /// <param name="localPath">The local path to which the files will be /// saved.</param> /// <returns>A Boolean value representing the success of the action.</returns> public static async Task<bool> DownloadS3DirectoryAsync( TransferUtility transferUtil, string bucketName, string s3Path, string localPath) { int fileCount = 0; // If the directory doesn't exist, it will be created. if (Directory.Exists(s3Path)) { var files = Directory.GetFiles(localPath); fileCount = files.Length; } await transferUtil.DownloadDirectoryAsync(new TransferUtilityDownloadDirectoryRequest { BucketName = bucketName, LocalDirectory = localPath, S3Directory = s3Path, }); if (Directory.Exists(localPath)) { var files = Directory.GetFiles(localPath); if (files.Length > fileCount) { return true; } // No change in the number of files. Assume // the download failed. return false; } // The local directory doesn't exist. No files // were downloaded. return false; }
를 사용하여 업로드 진행 상황을 추적합니다 TransferUtility.
using System; using System.Threading.Tasks; using Amazon.S3; using Amazon.S3.Transfer; /// <summary> /// This example shows how to track the progress of a multipart upload /// using the Amazon Simple Storage Service (Amazon S3) TransferUtility to /// upload to an Amazon S3 bucket. /// </summary> public class TrackMPUUsingHighLevelAPI { public static async Task Main() { string bucketName = "amzn-s3-demo-bucket"; string keyName = "sample_pic.png"; string path = "filepath/directory/"; string filePath = $"{path}{keyName}"; // If the AWS Region defined for your default user is different // from the Region where your Amazon S3 bucket is located, // pass the Region name to the Amazon S3 client object's constructor. // For example: RegionEndpoint.USWest2 or RegionEndpoint.USEast2. IAmazonS3 client = new AmazonS3Client(); await TrackMPUAsync(client, bucketName, filePath, keyName); } /// <summary> /// Starts an Amazon S3 multipart upload and assigns an event handler to /// track the progress of the upload. /// </summary> /// <param name="client">The initialized Amazon S3 client object used to /// perform the multipart upload.</param> /// <param name="bucketName">The name of the bucket to which to upload /// the file.</param> /// <param name="filePath">The path, including the file name of the /// file to be uploaded to the Amazon S3 bucket.</param> /// <param name="keyName">The file name to be used in the /// destination Amazon S3 bucket.</param> public static async Task TrackMPUAsync( IAmazonS3 client, string bucketName, string filePath, string keyName) { try { var fileTransferUtility = new TransferUtility(client); // Use TransferUtilityUploadRequest to configure options. // In this example we subscribe to an event. var uploadRequest = new TransferUtilityUploadRequest { BucketName = bucketName, FilePath = filePath, Key = keyName, }; uploadRequest.UploadProgressEvent += new EventHandler<UploadProgressArgs>( UploadRequest_UploadPartProgressEvent); await fileTransferUtility.UploadAsync(uploadRequest); Console.WriteLine("Upload completed"); } catch (AmazonS3Exception ex) { Console.WriteLine($"Error:: {ex.Message}"); } } /// <summary> /// Event handler to check the progress of the multipart upload. /// </summary> /// <param name="sender">The object that raised the event.</param> /// <param name="e">The object that contains multipart upload /// information.</param> public static void UploadRequest_UploadPartProgressEvent(object sender, UploadProgressArgs e) { // Process event. Console.WriteLine($"{e.TransferredBytes}/{e.TotalBytes}"); } }
암호화를 사용하여 객체를 업로드합니다.
using System; using System.Collections.Generic; using System.IO; using System.Security.Cryptography; using System.Threading.Tasks; using Amazon.S3; using Amazon.S3.Model; /// <summary> /// Uses the Amazon Simple Storage Service (Amazon S3) low level API to /// perform a multipart upload to an Amazon S3 bucket. /// </summary> public class SSECLowLevelMPUcopyObject { public static async Task Main() { string existingBucketName = "amzn-s3-demo-bucket"; string sourceKeyName = "sample_file.txt"; string targetKeyName = "sample_file_copy.txt"; string filePath = $"sample\\{targetKeyName}"; // If the AWS Region defined for your default user is different // from the Region where your Amazon S3 bucket is located, // pass the Region name to the Amazon S3 client object's constructor. // For example: RegionEndpoint.USEast1. IAmazonS3 client = new AmazonS3Client(); // Create the encryption key. var base64Key = CreateEncryptionKey(); await CreateSampleObjUsingClientEncryptionKeyAsync( client, existingBucketName, sourceKeyName, filePath, base64Key); } /// <summary> /// Creates the encryption key to use with the multipart upload. /// </summary> /// <returns>A string containing the base64-encoded key for encrypting /// the multipart upload.</returns> public static string CreateEncryptionKey() { Aes aesEncryption = Aes.Create(); aesEncryption.KeySize = 256; aesEncryption.GenerateKey(); string base64Key = Convert.ToBase64String(aesEncryption.Key); return base64Key; } /// <summary> /// Creates and uploads an object using a multipart upload. /// </summary> /// <param name="client">The initialized Amazon S3 object used to /// initialize and perform the multipart upload.</param> /// <param name="existingBucketName">The name of the bucket to which /// the object will be uploaded.</param> /// <param name="sourceKeyName">The source object name.</param> /// <param name="filePath">The location of the source object.</param> /// <param name="base64Key">The encryption key to use with the upload.</param> public static async Task CreateSampleObjUsingClientEncryptionKeyAsync( IAmazonS3 client, string existingBucketName, string sourceKeyName, string filePath, string base64Key) { List<UploadPartResponse> uploadResponses = new List<UploadPartResponse>(); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; InitiateMultipartUploadResponse initResponse = await client.InitiateMultipartUploadAsync(initiateRequest); long contentLength = new FileInfo(filePath).Length; long partSize = 5 * (long)Math.Pow(2, 20); // 5 MB try { long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { UploadPartRequest uploadRequest = new UploadPartRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, PartNumber = i, PartSize = partSize, FilePosition = filePosition, FilePath = filePath, ServerSideEncryptionCustomerMethod = ServerSideEncryptionCustomerMethod.AES256, ServerSideEncryptionCustomerProvidedKey = base64Key, }; // Upload part and add response to our list. uploadResponses.Add(await client.UploadPartAsync(uploadRequest)); filePosition += partSize; } CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, }; completeRequest.AddPartETags(uploadResponses); CompleteMultipartUploadResponse completeUploadResponse = await client.CompleteMultipartUploadAsync(completeRequest); } catch (Exception exception) { Console.WriteLine($"Exception occurred: {exception.Message}"); // If there was an error, abort the multipart upload. AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest { BucketName = existingBucketName, Key = sourceKeyName, UploadId = initResponse.UploadId, }; await client.AbortMultipartUploadAsync(abortMPURequest); } } }
- Go
-
- SDK Go V2용
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. 업로드 관리자를 사용하여 데이터를 부분으로 나누고 동시에 업로드하여 큰 객체를 업로드합니다.
// BucketBasics encapsulates the Amazon Simple Storage Service (Amazon S3) actions // used in the examples. // It contains S3Client, an Amazon S3 service client that is used to perform bucket // and object actions. type BucketBasics struct { S3Client *s3.Client } // UploadLargeObject uses an upload manager to upload data to an object in a bucket. // The upload manager breaks large data into parts and uploads the parts concurrently. func (basics BucketBasics) UploadLargeObject(ctx context.Context, bucketName string, objectKey string, largeObject []byte) error { largeBuffer := bytes.NewReader(largeObject) var partMiBs int64 = 10 uploader := manager.NewUploader(basics.S3Client, func(u *manager.Uploader) { u.PartSize = partMiBs * 1024 * 1024 }) _, err := uploader.Upload(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), Body: largeBuffer, }) if err != nil { log.Printf("Couldn't upload large object to %v:%v. Here's why: %v\n", bucketName, objectKey, err) } return err }
다운로드 관리자를 사용하여 데이터를 부분으로 나누고 동시에 다운로드하여 큰 객체를 다운로드합니다.
// DownloadLargeObject uses a download manager to download an object from a bucket. // The download manager gets the data in parts and writes them to a buffer until all of // the data has been downloaded. func (basics BucketBasics) DownloadLargeObject(ctx context.Context, bucketName string, objectKey string) ([]byte, error) { var partMiBs int64 = 10 downloader := manager.NewDownloader(basics.S3Client, func(d *manager.Downloader) { d.PartSize = partMiBs * 1024 * 1024 }) buffer := manager.NewWriteAtBuffer([]byte{}) _, err := downloader.Download(ctx, buffer, &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), }) if err != nil { log.Printf("Couldn't download large object from %v:%v. Here's why: %v\n", bucketName, objectKey, err) } return buffer.Bytes(), err }
- Java
-
- SDK Java 2.x용
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. STransferManagerS3를 사용하여 S3 버킷과 파일을 주고 받는 호출 함수입니다.
public Integer downloadObjectsToDirectory(S3TransferManager transferManager, URI destinationPathURI, String bucketName) { DirectoryDownload directoryDownload = transferManager.downloadDirectory(DownloadDirectoryRequest.builder() .destination(Paths.get(destinationPathURI)) .bucket(bucketName) .build()); CompletedDirectoryDownload completedDirectoryDownload = directoryDownload.completionFuture().join(); completedDirectoryDownload.failedTransfers() .forEach(fail -> logger.warn("Object [{}] failed to transfer", fail.toString())); return completedDirectoryDownload.failedTransfers().size(); }
전체 로컬 디렉토리를 업로드합니다.
public Integer uploadDirectory(S3TransferManager transferManager, URI sourceDirectory, String bucketName) { DirectoryUpload directoryUpload = transferManager.uploadDirectory(UploadDirectoryRequest.builder() .source(Paths.get(sourceDirectory)) .bucket(bucketName) .build()); CompletedDirectoryUpload completedDirectoryUpload = directoryUpload.completionFuture().join(); completedDirectoryUpload.failedTransfers() .forEach(fail -> logger.warn("Object [{}] failed to transfer", fail.toString())); return completedDirectoryUpload.failedTransfers().size(); }
단일 파일을 업로드합니다.
public String uploadFile(S3TransferManager transferManager, String bucketName, String key, URI filePathURI) { UploadFileRequest uploadFileRequest = UploadFileRequest.builder() .putObjectRequest(b -> b.bucket(bucketName).key(key)) .source(Paths.get(filePathURI)) .build(); FileUpload fileUpload = transferManager.uploadFile(uploadFileRequest); CompletedFileUpload uploadResult = fileUpload.completionFuture().join(); return uploadResult.response().eTag(); }
코드 예제에서는 다음 가져오기를 사용합니다.
import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.waiters.S3Waiter; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import java.io.IOException; import java.io.RandomAccessFile; import java.net.URISyntaxException; import java.net.URL; import java.nio.ByteBuffer; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.UUID; import java.util.concurrent.CompletableFuture;
콘텐츠 크기가 임계값을 초과할 때 기반 S3 클라이언트 위에 있는 S3 Transfer Manager를 사용하여 멀티파트 업로드를 투명하게 수행합니다. AWS CRT S3 기본 임계값 크기는 8MB입니다.
/** * Uploads a file to an Amazon S3 bucket using the S3TransferManager. * * @param filePath the file path of the file to be uploaded */ public void multipartUploadWithTransferManager(String filePath) { S3TransferManager transferManager = S3TransferManager.create(); UploadFileRequest uploadFileRequest = UploadFileRequest.builder() .putObjectRequest(b -> b .bucket(bucketName) .key(key)) .source(Paths.get(filePath)) .build(); FileUpload fileUpload = transferManager.uploadFile(uploadFileRequest); fileUpload.completionFuture().join(); transferManager.close(); }
S3ClientAPI
를 사용하여 멀티파트 업로드를 수행합니다. /** * Performs a multipart upload to Amazon S3 using the provided S3 client. * * @param filePath the path to the file to be uploaded */ public void multipartUploadWithS3Client(String filePath) { // Initiate the multipart upload. CreateMultipartUploadResponse createMultipartUploadResponse = s3Client.createMultipartUpload(b -> b .bucket(bucketName) .key(key)); String uploadId = createMultipartUploadResponse.uploadId(); // Upload the parts of the file. int partNumber = 1; List<CompletedPart> completedParts = new ArrayList<>(); ByteBuffer bb = ByteBuffer.allocate(1024 * 1024 * 5); // 5 MB byte buffer try (RandomAccessFile file = new RandomAccessFile(filePath, "r")) { long fileSize = file.length(); long position = 0; while (position < fileSize) { file.seek(position); long read = file.getChannel().read(bb); bb.flip(); // Swap position and limit before reading from the buffer. UploadPartRequest uploadPartRequest = UploadPartRequest.builder() .bucket(bucketName) .key(key) .uploadId(uploadId) .partNumber(partNumber) .build(); UploadPartResponse partResponse = s3Client.uploadPart( uploadPartRequest, RequestBody.fromByteBuffer(bb)); CompletedPart part = CompletedPart.builder() .partNumber(partNumber) .eTag(partResponse.eTag()) .build(); completedParts.add(part); bb.clear(); position += read; partNumber++; } } catch (IOException e) { logger.error(e.getMessage()); } // Complete the multipart upload. s3Client.completeMultipartUpload(b -> b .bucket(bucketName) .key(key) .uploadId(uploadId) .multipartUpload(CompletedMultipartUpload.builder().parts(completedParts).build())); }
멀티파트 지원이 활성화된 S3AsyncClient API
를 사용하여 멀티파트 업로드를 수행합니다. /** * Uploads a file to an S3 bucket using the S3AsyncClient and enabling multipart support. * * @param filePath the local file path of the file to be uploaded */ public void multipartUploadWithS3AsyncClient(String filePath) { // Enable multipart support. S3AsyncClient s3AsyncClient = S3AsyncClient.builder() .multipartEnabled(true) .build(); CompletableFuture<PutObjectResponse> response = s3AsyncClient.putObject(b -> b .bucket(bucketName) .key(key), Paths.get(filePath)); response.join(); logger.info("File uploaded in multiple 8 MiB parts using S3AsyncClient."); }
- JavaScript
-
- SDK 용 JavaScript (v3)
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. 대용량 파일을 업로드합니다.
import { S3Client } from "@aws-sdk/client-s3"; import { Upload } from "@aws-sdk/lib-storage"; import { ProgressBar, logger, } from "@aws-doc-sdk-examples/lib/utils/util-log.js"; const twentyFiveMB = 25 * 1024 * 1024; export const createString = (size = twentyFiveMB) => { return "x".repeat(size); }; /** * Create a 25MB file and upload it in parts to the specified * Amazon S3 bucket. * @param {{ bucketName: string, key: string }} */ export const main = async ({ bucketName, key }) => { const str = createString(); const buffer = Buffer.from(str, "utf8"); const progressBar = new ProgressBar({ description: `Uploading "${key}" to "${bucketName}"`, barLength: 30, }); try { const upload = new Upload({ client: new S3Client({}), params: { Bucket: bucketName, Key: key, Body: buffer, }, }); upload.on("httpUploadProgress", ({ loaded, total }) => { progressBar.update({ current: loaded, total }); }); await upload.done(); } catch (caught) { if (caught instanceof Error && caught.name === "AbortError") { logger.error(`Multipart upload was aborted. ${caught.message}`); } else { throw caught; } } };
대용량 파일을 다운로드합니다.
import { GetObjectCommand, NoSuchKey, S3Client } from "@aws-sdk/client-s3"; import { createWriteStream, rmSync } from "node:fs"; const s3Client = new S3Client({}); const oneMB = 1024 * 1024; export const getObjectRange = ({ bucket, key, start, end }) => { const command = new GetObjectCommand({ Bucket: bucket, Key: key, Range: `bytes=${start}-${end}`, }); return s3Client.send(command); }; /** * @param {string | undefined} contentRange */ export const getRangeAndLength = (contentRange) => { const [range, length] = contentRange.split("/"); const [start, end] = range.split("-"); return { start: Number.parseInt(start), end: Number.parseInt(end), length: Number.parseInt(length), }; }; export const isComplete = ({ end, length }) => end === length - 1; const downloadInChunks = async ({ bucket, key }) => { const writeStream = createWriteStream( fileURLToPath(new URL(`./${key}`, import.meta.url)), ).on("error", (err) => console.error(err)); let rangeAndLength = { start: -1, end: -1, length: -1 }; while (!isComplete(rangeAndLength)) { const { end } = rangeAndLength; const nextRange = { start: end + 1, end: end + oneMB }; const { ContentRange, Body } = await getObjectRange({ bucket, key, ...nextRange, }); console.log(`Downloaded bytes ${nextRange.start} to ${nextRange.end}`); writeStream.write(await Body.transformToByteArray()); rangeAndLength = getRangeAndLength(ContentRange); } }; /** * Download a large object from and Amazon S3 bucket. * * When downloading a large file, you might want to break it down into * smaller pieces. Amazon S3 accepts a Range header to specify the start * and end of the byte range to be downloaded. * * @param {{ bucketName: string, key: string }} */ export const main = async ({ bucketName, key }) => { try { await downloadInChunks({ bucket: bucketName, key: key, }); } catch (caught) { if (caught instanceof NoSuchKey) { console.error(`Failed to download object. No such key "${key}".`); rmSync(key); } } };
- Python
-
- SDK Python용(Boto3)
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. 사용 가능한 여러 전송 관리자 설정을 사용하여 파일을 전송하는 함수를 생성합니다. 콜백 클래스를 사용하여 파일 전송 중에 콜백 진행률을 작성합니다.
import sys import threading import boto3 from boto3.s3.transfer import TransferConfig MB = 1024 * 1024 s3 = boto3.resource("s3") class TransferCallback: """ Handle callbacks from the transfer manager. The transfer manager periodically calls the __call__ method throughout the upload and download process so that it can take action, such as displaying progress to the user and collecting data about the transfer. """ def __init__(self, target_size): self._target_size = target_size self._total_transferred = 0 self._lock = threading.Lock() self.thread_info = {} def __call__(self, bytes_transferred): """ The callback method that is called by the transfer manager. Display progress during file transfer and collect per-thread transfer data. This method can be called by multiple threads, so shared instance data is protected by a thread lock. """ thread = threading.current_thread() with self._lock: self._total_transferred += bytes_transferred if thread.ident not in self.thread_info.keys(): self.thread_info[thread.ident] = bytes_transferred else: self.thread_info[thread.ident] += bytes_transferred target = self._target_size * MB sys.stdout.write( f"\r{self._total_transferred} of {target} transferred " f"({(self._total_transferred / target) * 100:.2f}%)." ) sys.stdout.flush() def upload_with_default_configuration( local_file_path, bucket_name, object_key, file_size_mb ): """ Upload a file from a local folder to an Amazon S3 bucket, using the default configuration. """ transfer_callback = TransferCallback(file_size_mb) s3.Bucket(bucket_name).upload_file( local_file_path, object_key, Callback=transfer_callback ) return transfer_callback.thread_info def upload_with_chunksize_and_meta( local_file_path, bucket_name, object_key, file_size_mb, metadata=None ): """ Upload a file from a local folder to an Amazon S3 bucket, setting a multipart chunk size and adding metadata to the Amazon S3 object. The multipart chunk size controls the size of the chunks of data that are sent in the request. A smaller chunk size typically results in the transfer manager using more threads for the upload. The metadata is a set of key-value pairs that are stored with the object in Amazon S3. """ transfer_callback = TransferCallback(file_size_mb) config = TransferConfig(multipart_chunksize=1 * MB) extra_args = {"Metadata": metadata} if metadata else None s3.Bucket(bucket_name).upload_file( local_file_path, object_key, Config=config, ExtraArgs=extra_args, Callback=transfer_callback, ) return transfer_callback.thread_info def upload_with_high_threshold(local_file_path, bucket_name, object_key, file_size_mb): """ Upload a file from a local folder to an Amazon S3 bucket, setting a multipart threshold larger than the size of the file. Setting a multipart threshold larger than the size of the file results in the transfer manager sending the file as a standard upload instead of a multipart upload. """ transfer_callback = TransferCallback(file_size_mb) config = TransferConfig(multipart_threshold=file_size_mb * 2 * MB) s3.Bucket(bucket_name).upload_file( local_file_path, object_key, Config=config, Callback=transfer_callback ) return transfer_callback.thread_info def upload_with_sse( local_file_path, bucket_name, object_key, file_size_mb, sse_key=None ): """ Upload a file from a local folder to an Amazon S3 bucket, adding server-side encryption with customer-provided encryption keys to the object. When this kind of encryption is specified, Amazon S3 encrypts the object at rest and allows downloads only when the expected encryption key is provided in the download request. """ transfer_callback = TransferCallback(file_size_mb) if sse_key: extra_args = {"SSECustomerAlgorithm": "AES256", "SSECustomerKey": sse_key} else: extra_args = None s3.Bucket(bucket_name).upload_file( local_file_path, object_key, ExtraArgs=extra_args, Callback=transfer_callback ) return transfer_callback.thread_info def download_with_default_configuration( bucket_name, object_key, download_file_path, file_size_mb ): """ Download a file from an Amazon S3 bucket to a local folder, using the default configuration. """ transfer_callback = TransferCallback(file_size_mb) s3.Bucket(bucket_name).Object(object_key).download_file( download_file_path, Callback=transfer_callback ) return transfer_callback.thread_info def download_with_single_thread( bucket_name, object_key, download_file_path, file_size_mb ): """ Download a file from an Amazon S3 bucket to a local folder, using a single thread. """ transfer_callback = TransferCallback(file_size_mb) config = TransferConfig(use_threads=False) s3.Bucket(bucket_name).Object(object_key).download_file( download_file_path, Config=config, Callback=transfer_callback ) return transfer_callback.thread_info def download_with_high_threshold( bucket_name, object_key, download_file_path, file_size_mb ): """ Download a file from an Amazon S3 bucket to a local folder, setting a multipart threshold larger than the size of the file. Setting a multipart threshold larger than the size of the file results in the transfer manager sending the file as a standard download instead of a multipart download. """ transfer_callback = TransferCallback(file_size_mb) config = TransferConfig(multipart_threshold=file_size_mb * 2 * MB) s3.Bucket(bucket_name).Object(object_key).download_file( download_file_path, Config=config, Callback=transfer_callback ) return transfer_callback.thread_info def download_with_sse( bucket_name, object_key, download_file_path, file_size_mb, sse_key ): """ Download a file from an Amazon S3 bucket to a local folder, adding a customer-provided encryption key to the request. When this kind of encryption is specified, Amazon S3 encrypts the object at rest and allows downloads only when the expected encryption key is provided in the download request. """ transfer_callback = TransferCallback(file_size_mb) if sse_key: extra_args = {"SSECustomerAlgorithm": "AES256", "SSECustomerKey": sse_key} else: extra_args = None s3.Bucket(bucket_name).Object(object_key).download_file( download_file_path, ExtraArgs=extra_args, Callback=transfer_callback ) return transfer_callback.thread_info
전송 관리자 기능을 시연하고 결과를 보고합니다.
import hashlib import os import platform import shutil import time import boto3 from boto3.s3.transfer import TransferConfig from botocore.exceptions import ClientError from botocore.exceptions import ParamValidationError from botocore.exceptions import NoCredentialsError import file_transfer MB = 1024 * 1024 # These configuration attributes affect both uploads and downloads. CONFIG_ATTRS = ( "multipart_threshold", "multipart_chunksize", "max_concurrency", "use_threads", ) # These configuration attributes affect only downloads. DOWNLOAD_CONFIG_ATTRS = ("max_io_queue", "io_chunksize", "num_download_attempts") class TransferDemoManager: """ Manages the demonstration. Collects user input from a command line, reports transfer results, maintains a list of artifacts created during the demonstration, and cleans them up after the demonstration is completed. """ def __init__(self): self._s3 = boto3.resource("s3") self._chore_list = [] self._create_file_cmd = None self._size_multiplier = 0 self.file_size_mb = 30 self.demo_folder = None self.demo_bucket = None self._setup_platform_specific() self._terminal_width = shutil.get_terminal_size(fallback=(80, 80))[0] def collect_user_info(self): """ Collect local folder and Amazon S3 bucket name from the user. These locations are used to store files during the demonstration. """ while not self.demo_folder: self.demo_folder = input( "Which file folder do you want to use to store " "demonstration files? " ) if not os.path.isdir(self.demo_folder): print(f"{self.demo_folder} isn't a folder!") self.demo_folder = None while not self.demo_bucket: self.demo_bucket = input( "Which Amazon S3 bucket do you want to use to store " "demonstration files? " ) try: self._s3.meta.client.head_bucket(Bucket=self.demo_bucket) except ParamValidationError as err: print(err) self.demo_bucket = None except ClientError as err: print(err) print( f"Either {self.demo_bucket} doesn't exist or you don't " f"have access to it." ) self.demo_bucket = None def demo( self, question, upload_func, download_func, upload_args=None, download_args=None ): """Run a demonstration. Ask the user if they want to run this specific demonstration. If they say yes, create a file on the local path, upload it using the specified upload function, then download it using the specified download function. """ if download_args is None: download_args = {} if upload_args is None: upload_args = {} question = question.format(self.file_size_mb) answer = input(f"{question} (y/n)") if answer.lower() == "y": local_file_path, object_key, download_file_path = self._create_demo_file() file_transfer.TransferConfig = self._config_wrapper( TransferConfig, CONFIG_ATTRS ) self._report_transfer_params( "Uploading", local_file_path, object_key, **upload_args ) start_time = time.perf_counter() thread_info = upload_func( local_file_path, self.demo_bucket, object_key, self.file_size_mb, **upload_args, ) end_time = time.perf_counter() self._report_transfer_result(thread_info, end_time - start_time) file_transfer.TransferConfig = self._config_wrapper( TransferConfig, CONFIG_ATTRS + DOWNLOAD_CONFIG_ATTRS ) self._report_transfer_params( "Downloading", object_key, download_file_path, **download_args ) start_time = time.perf_counter() thread_info = download_func( self.demo_bucket, object_key, download_file_path, self.file_size_mb, **download_args, ) end_time = time.perf_counter() self._report_transfer_result(thread_info, end_time - start_time) def last_name_set(self): """Get the name set used for the last demo.""" return self._chore_list[-1] def cleanup(self): """ Remove files from the demo folder, and uploaded objects from the Amazon S3 bucket. """ print("-" * self._terminal_width) for local_file_path, s3_object_key, downloaded_file_path in self._chore_list: print(f"Removing {local_file_path}") try: os.remove(local_file_path) except FileNotFoundError as err: print(err) print(f"Removing {downloaded_file_path}") try: os.remove(downloaded_file_path) except FileNotFoundError as err: print(err) if self.demo_bucket: print(f"Removing {self.demo_bucket}:{s3_object_key}") try: self._s3.Bucket(self.demo_bucket).Object(s3_object_key).delete() except ClientError as err: print(err) def _setup_platform_specific(self): """Set up platform-specific command used to create a large file.""" if platform.system() == "Windows": self._create_file_cmd = "fsutil file createnew {} {}" self._size_multiplier = MB elif platform.system() == "Linux" or platform.system() == "Darwin": self._create_file_cmd = f"dd if=/dev/urandom of={{}} " f"bs={MB} count={{}}" self._size_multiplier = 1 else: raise EnvironmentError( f"Demo of platform {platform.system()} isn't supported." ) def _create_demo_file(self): """ Create a file in the demo folder specified by the user. Store the local path, object name, and download path for later cleanup. Only the local file is created by this method. The Amazon S3 object and download file are created later during the demonstration. Returns: A tuple that contains the local file path, object name, and download file path. """ file_name_template = "TestFile{}-{}.demo" local_suffix = "local" object_suffix = "s3object" download_suffix = "downloaded" file_tag = len(self._chore_list) + 1 local_file_path = os.path.join( self.demo_folder, file_name_template.format(file_tag, local_suffix) ) s3_object_key = file_name_template.format(file_tag, object_suffix) downloaded_file_path = os.path.join( self.demo_folder, file_name_template.format(file_tag, download_suffix) ) filled_cmd = self._create_file_cmd.format( local_file_path, self.file_size_mb * self._size_multiplier ) print( f"Creating file of size {self.file_size_mb} MB " f"in {self.demo_folder} by running:" ) print(f"{'':4}{filled_cmd}") os.system(filled_cmd) chore = (local_file_path, s3_object_key, downloaded_file_path) self._chore_list.append(chore) return chore def _report_transfer_params(self, verb, source_name, dest_name, **kwargs): """Report configuration and extra arguments used for a file transfer.""" print("-" * self._terminal_width) print(f"{verb} {source_name} ({self.file_size_mb} MB) to {dest_name}") if kwargs: print("With extra args:") for arg, value in kwargs.items(): print(f'{"":4}{arg:<20}: {value}') @staticmethod def ask_user(question): """ Ask the user a yes or no question. Returns: True when the user answers 'y' or 'Y'; otherwise, False. """ answer = input(f"{question} (y/n) ") return answer.lower() == "y" @staticmethod def _config_wrapper(func, config_attrs): def wrapper(*args, **kwargs): config = func(*args, **kwargs) print("With configuration:") for attr in config_attrs: print(f'{"":4}{attr:<20}: {getattr(config, attr)}') return config return wrapper @staticmethod def _report_transfer_result(thread_info, elapsed): """Report the result of a transfer, including per-thread data.""" print(f"\nUsed {len(thread_info)} threads.") for ident, byte_count in thread_info.items(): print(f"{'':4}Thread {ident} copied {byte_count} bytes.") print(f"Your transfer took {elapsed:.2f} seconds.") def main(): """ Run the demonstration script for s3_file_transfer. """ demo_manager = TransferDemoManager() demo_manager.collect_user_info() # Upload and download with default configuration. Because the file is 30 MB # and the default multipart_threshold is 8 MB, both upload and download are # multipart transfers. demo_manager.demo( "Do you want to upload and download a {} MB file " "using the default configuration?", file_transfer.upload_with_default_configuration, file_transfer.download_with_default_configuration, ) # Upload and download with multipart_threshold set higher than the size of # the file. This causes the transfer manager to use standard transfers # instead of multipart transfers. demo_manager.demo( "Do you want to upload and download a {} MB file " "as a standard (not multipart) transfer?", file_transfer.upload_with_high_threshold, file_transfer.download_with_high_threshold, ) # Upload with specific chunk size and additional metadata. # Download with a single thread. demo_manager.demo( "Do you want to upload a {} MB file with a smaller chunk size and " "then download the same file using a single thread?", file_transfer.upload_with_chunksize_and_meta, file_transfer.download_with_single_thread, upload_args={ "metadata": { "upload_type": "chunky", "favorite_color": "aqua", "size": "medium", } }, ) # Upload using server-side encryption with customer-provided # encryption keys. # Generate a 256-bit key from a passphrase. sse_key = hashlib.sha256("demo_passphrase".encode("utf-8")).digest() demo_manager.demo( "Do you want to upload and download a {} MB file using " "server-side encryption?", file_transfer.upload_with_sse, file_transfer.download_with_sse, upload_args={"sse_key": sse_key}, download_args={"sse_key": sse_key}, ) # Download without specifying an encryption key to show that the # encryption key must be included to download an encrypted object. if demo_manager.ask_user( "Do you want to try to download the encrypted " "object without sending the required key?" ): try: _, object_key, download_file_path = demo_manager.last_name_set() file_transfer.download_with_default_configuration( demo_manager.demo_bucket, object_key, download_file_path, demo_manager.file_size_mb, ) except ClientError as err: print( "Got expected error when trying to download an encrypted " "object without specifying encryption info:" ) print(f"{'':4}{err}") # Remove all created and downloaded files, remove all objects from # S3 storage. if demo_manager.ask_user( "Demonstration complete. Do you want to remove local files " "and S3 objects?" ): demo_manager.cleanup() if __name__ == "__main__": try: main() except NoCredentialsError as error: print(error) print( "To run this example, you must have valid credentials in " "a shared credential file or set in environment variables." )
- Rust
-
- SDK Rust용
-
참고
에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리
에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요. use std::fs::File; use std::io::prelude::*; use std::path::Path; use aws_config::meta::region::RegionProviderChain; use aws_sdk_s3::error::DisplayErrorContext; use aws_sdk_s3::operation::{ create_multipart_upload::CreateMultipartUploadOutput, get_object::GetObjectOutput, }; use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart}; use aws_sdk_s3::{config::Region, Client as S3Client}; use aws_smithy_types::byte_stream::{ByteStream, Length}; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use s3_code_examples::error::S3ExampleError; use std::process; use uuid::Uuid; //In bytes, minimum chunk size of 5MB. Increase CHUNK_SIZE to send larger chunks. const CHUNK_SIZE: u64 = 1024 * 1024 * 5; const MAX_CHUNKS: u64 = 10000; #[tokio::main] pub async fn main() { if let Err(err) = run_example().await { eprintln!("Error: {}", DisplayErrorContext(err)); process::exit(1); } } async fn run_example() -> Result<(), S3ExampleError> { let shared_config = aws_config::load_from_env().await; let client = S3Client::new(&shared_config); let bucket_name = format!("amzn-s3-demo-bucket-{}", Uuid::new_v4()); let region_provider = RegionProviderChain::first_try(Region::new("us-west-2")); let region = region_provider.region().await.unwrap(); s3_code_examples::create_bucket(&client, &bucket_name, ®ion).await?; let key = "sample.txt".to_string(); // Create a multipart upload. Use UploadPart and CompleteMultipartUpload to // upload the file. let multipart_upload_res: CreateMultipartUploadOutput = client .create_multipart_upload() .bucket(&bucket_name) .key(&key) .send() .await?; let upload_id = multipart_upload_res.upload_id().ok_or(S3ExampleError::new( "Missing upload_id after CreateMultipartUpload", ))?; //Create a file of random characters for the upload. let mut file = File::create(&key).expect("Could not create sample file."); // Loop until the file is 5 chunks. while file.metadata().unwrap().len() <= CHUNK_SIZE * 4 { let rand_string: String = thread_rng() .sample_iter(&Alphanumeric) .take(256) .map(char::from) .collect(); let return_string: String = "\n".to_string(); file.write_all(rand_string.as_ref()) .expect("Error writing to file."); file.write_all(return_string.as_ref()) .expect("Error writing to file."); } let path = Path::new(&key); let file_size = tokio::fs::metadata(path) .await .expect("it exists I swear") .len(); let mut chunk_count = (file_size / CHUNK_SIZE) + 1; let mut size_of_last_chunk = file_size % CHUNK_SIZE; if size_of_last_chunk == 0 { size_of_last_chunk = CHUNK_SIZE; chunk_count -= 1; } if file_size == 0 { return Err(S3ExampleError::new("Bad file size.")); } if chunk_count > MAX_CHUNKS { return Err(S3ExampleError::new( "Too many chunks! Try increasing your chunk size.", )); } let mut upload_parts: Vec<aws_sdk_s3::types::CompletedPart> = Vec::new(); for chunk_index in 0..chunk_count { let this_chunk = if chunk_count - 1 == chunk_index { size_of_last_chunk } else { CHUNK_SIZE }; let stream = ByteStream::read_from() .path(path) .offset(chunk_index * CHUNK_SIZE) .length(Length::Exact(this_chunk)) .build() .await .unwrap(); // Chunk index needs to start at 0, but part numbers start at 1. let part_number = (chunk_index as i32) + 1; let upload_part_res = client .upload_part() .key(&key) .bucket(&bucket_name) .upload_id(upload_id) .body(stream) .part_number(part_number) .send() .await?; upload_parts.push( CompletedPart::builder() .e_tag(upload_part_res.e_tag.unwrap_or_default()) .part_number(part_number) .build(), ); } // upload_parts: Vec<aws_sdk_s3::types::CompletedPart> let completed_multipart_upload: CompletedMultipartUpload = CompletedMultipartUpload::builder() .set_parts(Some(upload_parts)) .build(); let _complete_multipart_upload_res = client .complete_multipart_upload() .bucket(&bucket_name) .key(&key) .multipart_upload(completed_multipart_upload) .upload_id(upload_id) .send() .await?; let data: GetObjectOutput = s3_code_examples::download_object(&client, &bucket_name, &key).await?; let data_length: u64 = data .content_length() .unwrap_or_default() .try_into() .unwrap(); if file.metadata().unwrap().len() == data_length { println!("Data lengths match."); } else { println!("The data was not the same size!"); } s3_code_examples::clear_bucket(&client, &bucket_name) .await .expect("Error emptying bucket."); s3_code_examples::delete_bucket(&client, &bucket_name) .await .expect("Error deleting bucket."); Ok(()) }