Amazon Bedrock에서 여러 파운데이션 모델 간접 호출 - Amazon Bedrock

기계 번역으로 제공되는 번역입니다. 제공된 번역과 원본 영어의 내용이 상충하는 경우에는 영어 버전이 우선합니다.

Amazon Bedrock에서 여러 파운데이션 모델 간접 호출

다음 코드 예제는 Amazon Bedrock의 다양한 대규모 언어 모델(LLMs)을 준비하고 프롬프트를 보내는 방법을 보여줍니다.

Go
SDK Go V2용
참고

에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요.

Amazon Bedrock에서 여러 파운데이션 모델을 간접 호출합니다.

// InvokeModelsScenario demonstrates how to use the Amazon Bedrock Runtime client // to invoke various foundation models for text and image generation // // 1. Generate text with Anthropic Claude 2 // 2. Generate text with AI21 Labs Jurassic-2 // 3. Generate text with Meta Llama 2 Chat // 4. Generate text and asynchronously process the response stream with Anthropic Claude 2 // 5. Generate and image with the Amazon Titan image generation model // 6. Generate text with Amazon Titan Text G1 Express model type InvokeModelsScenario struct { sdkConfig aws.Config invokeModelWrapper actions.InvokeModelWrapper responseStreamWrapper actions.InvokeModelWithResponseStreamWrapper questioner demotools.IQuestioner } // NewInvokeModelsScenario constructs an InvokeModelsScenario instance from a configuration. // It uses the specified config to get a Bedrock Runtime client and create wrappers for the // actions used in the scenario. func NewInvokeModelsScenario(sdkConfig aws.Config, questioner demotools.IQuestioner) InvokeModelsScenario { client := bedrockruntime.NewFromConfig(sdkConfig) return InvokeModelsScenario{ sdkConfig: sdkConfig, invokeModelWrapper: actions.InvokeModelWrapper{BedrockRuntimeClient: client}, responseStreamWrapper: actions.InvokeModelWithResponseStreamWrapper{BedrockRuntimeClient: client}, questioner: questioner, } } // Runs the interactive scenario. func (scenario InvokeModelsScenario) Run(ctx context.Context) { defer func() { if r := recover(); r != nil { log.Printf("Something went wrong with the demo: %v\n", r) } }() log.Println(strings.Repeat("=", 77)) log.Println("Welcome to the Amazon Bedrock Runtime model invocation demo.") log.Println(strings.Repeat("=", 77)) log.Printf("First, let's invoke a few large-language models using the synchronous client:\n\n") text2textPrompt := "In one paragraph, who are you?" log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) scenario.InvokeClaude(ctx, text2textPrompt) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Jurassic-2 with prompt: %v\n", text2textPrompt) scenario.InvokeJurassic2(ctx, text2textPrompt) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Llama2 with prompt: %v\n", text2textPrompt) scenario.InvokeLlama2(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Printf("Now, let's invoke Claude with the asynchronous client and process the response stream:\n\n") log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) scenario.InvokeWithResponseStream(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Printf("Now, let's create an image with the Amazon Titan image generation model:\n\n") text2ImagePrompt := "stylized picture of a cute old steampunk robot" seed := rand.Int63n(2147483648) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Amazon Titan with prompt: %v\n", text2ImagePrompt) scenario.InvokeTitanImage(ctx, text2ImagePrompt, seed) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Titan Text Express with prompt: %v\n", text2textPrompt) scenario.InvokeTitanText(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Println("Thanks for watching!") log.Println(strings.Repeat("=", 77)) } func (scenario InvokeModelsScenario) InvokeClaude(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeClaude(ctx, prompt) if err != nil { panic(err) } log.Printf("\nClaude : %v\n", strings.TrimSpace(completion)) } func (scenario InvokeModelsScenario) InvokeJurassic2(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeJurassic2(ctx, prompt) if err != nil { panic(err) } log.Printf("\nJurassic-2 : %v\n", strings.TrimSpace(completion)) } func (scenario InvokeModelsScenario) InvokeLlama2(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeLlama2(ctx, prompt) if err != nil { panic(err) } log.Printf("\nLlama 2 : %v\n\n", strings.TrimSpace(completion)) } func (scenario InvokeModelsScenario) InvokeWithResponseStream(ctx context.Context, prompt string) { log.Println("\nClaude with response stream:") _, err := scenario.responseStreamWrapper.InvokeModelWithResponseStream(ctx, prompt) if err != nil { panic(err) } log.Println() } func (scenario InvokeModelsScenario) InvokeTitanImage(ctx context.Context, prompt string, seed int64) { base64ImageData, err := scenario.invokeModelWrapper.InvokeTitanImage(ctx, prompt, seed) if err != nil { panic(err) } imagePath := saveImage(base64ImageData, "amazon.titan-image-generator-v1") fmt.Printf("The generated image has been saved to %s\n", imagePath) } func (scenario InvokeModelsScenario) InvokeTitanText(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeTitanText(ctx, prompt) if err != nil { panic(err) } log.Printf("\nTitan Text Express : %v\n\n", strings.TrimSpace(completion)) }
JavaScript
SDK 용 JavaScript (v3)
참고

에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요.

import { fileURLToPath } from "url"; import { Scenario, ScenarioAction, ScenarioInput, ScenarioOutput, } from "@aws-doc-sdk-examples/lib/scenario/index.js"; import { FoundationModels } from "../config/foundation_models.js"; /** * @typedef {Object} ModelConfig * @property {Function} module * @property {Function} invoker * @property {string} modelId * @property {string} modelName */ const greeting = new ScenarioOutput( "greeting", "Welcome to the Amazon Bedrock Runtime client demo!", { header: true }, ); const selectModel = new ScenarioInput("model", "First, select a model:", { type: "select", choices: Object.values(FoundationModels).map((model) => ({ name: model.modelName, value: model, })), }); const enterPrompt = new ScenarioInput("prompt", "Now, enter your prompt:", { type: "input", }); const printDetails = new ScenarioOutput( "print details", /** * @param {{ model: ModelConfig, prompt: string }} c */ (c) => console.log(`Invoking ${c.model.modelName} with '${c.prompt}'...`), { slow: false }, ); const invokeModel = new ScenarioAction( "invoke model", /** * @param {{ model: ModelConfig, prompt: string, response: string }} c */ async (c) => { const modelModule = await c.model.module(); const invoker = c.model.invoker(modelModule); c.response = await invoker(c.prompt, c.model.modelId); }, ); const printResponse = new ScenarioOutput( "print response", /** * @param {{ response: string }} c */ (c) => c.response, { slow: false }, ); const scenario = new Scenario("Amazon Bedrock Runtime Demo", [ greeting, selectModel, enterPrompt, printDetails, invokeModel, printResponse, ]); if (process.argv[1] === fileURLToPath(import.meta.url)) { scenario.run(); }
PHP
PHP용 SDK
참고

에 대한 자세한 내용은 를 참조하세요 GitHub. AWS 코드 예시 리포지토리에서 전체 예시를 찾고 설정 및 실행하는 방법을 배워보세요.

Amazon BedrockLLMs에서 여러 을 호출합니다.

namespace BedrockRuntime; class GettingStartedWithBedrockRuntime { protected BedrockRuntimeService $bedrockRuntimeService; public function runExample() { echo "\n"; echo "---------------------------------------------------------------------\n"; echo "Welcome to the Amazon Bedrock Runtime getting started demo using PHP!\n"; echo "---------------------------------------------------------------------\n"; $clientArgs = [ 'region' => 'us-east-1', 'version' => 'latest', 'profile' => 'default', ]; $bedrockRuntimeService = new BedrockRuntimeService($clientArgs); $prompt = 'In one paragraph, who are you?'; echo "\nPrompt: " . $prompt; echo "\n\nAnthropic Claude:"; echo $bedrockRuntimeService->invokeClaude($prompt); echo "\n\nAI21 Labs Jurassic-2: "; echo $bedrockRuntimeService->invokeJurassic2($prompt); echo "\n\nMeta Llama 2 Chat: "; echo $bedrockRuntimeService->invokeLlama2($prompt); echo "\n---------------------------------------------------------------------\n"; $image_prompt = 'stylized picture of a cute old steampunk robot'; echo "\nImage prompt: " . $image_prompt; echo "\n\nStability.ai Stable Diffusion XL:\n"; $diffusionSeed = rand(0, 4294967295); $style_preset = 'photographic'; $base64 = $bedrockRuntimeService->invokeStableDiffusion($image_prompt, $diffusionSeed, $style_preset); $image_path = $this->saveImage($base64, 'stability.stable-diffusion-xl'); echo "The generated images have been saved to $image_path"; echo "\n\nAmazon Titan Image Generation:\n"; $titanSeed = rand(0, 2147483647); $base64 = $bedrockRuntimeService->invokeTitanImage($image_prompt, $titanSeed); $image_path = $this->saveImage($base64, 'amazon.titan-image-generator-v1'); echo "The generated images have been saved to $image_path"; } private function saveImage($base64_image_data, $model_id): string { $output_dir = "output"; if (!file_exists($output_dir)) { mkdir($output_dir); } $i = 1; while (file_exists("$output_dir/$model_id" . '_' . "$i.png")) { $i++; } $image_data = base64_decode($base64_image_data); $file_path = "$output_dir/$model_id" . '_' . "$i.png"; $file = fopen($file_path, 'wb'); fwrite($file, $image_data); fclose($file); return $file_path; } }

개발자 안내서 및 코드 예제의 AWS SDK 전체 목록은 섹션을 참조하세요에서 Amazon Bedrock 사용 AWS SDK. 이 주제에는 시작하기에 대한 정보와 이전 SDK 버전에 대한 세부 정보도 포함되어 있습니다.