Enum AcceleratorName
Specific hardware accelerator models supported by EC2.
Namespace: Amazon.CDK.AWS.EC2
Assembly: Amazon.CDK.Lib.dll
Syntax (csharp)
public enum AcceleratorName
Syntax (vb)
Public Enum AcceleratorName
Remarks
Defines exact accelerator models that can be required or excluded when selecting instance types.
ExampleMetadata: infused
Examples
Vpc vpc;
var securityGroup = new SecurityGroup(this, "SecurityGroup", new SecurityGroupProps {
Vpc = vpc,
Description = "Security group for managed instances"
});
var miCapacityProvider = new ManagedInstancesCapacityProvider(this, "MICapacityProvider", new ManagedInstancesCapacityProviderProps {
Subnets = vpc.PrivateSubnets,
SecurityGroups = new [] { securityGroup },
InstanceRequirements = new InstanceRequirementsConfig {
// Required: CPU and memory constraints
VCpuCountMin = 2,
VCpuCountMax = 8,
MemoryMin = Size.Gibibytes(4),
MemoryMax = Size.Gibibytes(32),
// CPU preferences
CpuManufacturers = new [] { CpuManufacturer.INTEL, CpuManufacturer.AMD },
InstanceGenerations = new [] { InstanceGeneration.CURRENT },
// Instance type filtering
AllowedInstanceTypes = new [] { "m5.*", "c5.*" },
// Performance characteristics
BurstablePerformance = BurstablePerformance.EXCLUDED,
BareMetal = BareMetal.EXCLUDED,
// Accelerator requirements (for ML/AI workloads)
AcceleratorTypes = new [] { AcceleratorType.GPU },
AcceleratorManufacturers = new [] { AcceleratorManufacturer.NVIDIA },
AcceleratorNames = new [] { AcceleratorName.T4, AcceleratorName.V100 },
AcceleratorCountMin = 1,
// Storage requirements
LocalStorage = LocalStorage.REQUIRED,
LocalStorageTypes = new [] { LocalStorageType.SSD },
TotalLocalStorageGBMin = 100,
// Network requirements
NetworkInterfaceCountMin = 2,
NetworkBandwidthGbpsMin = 10,
// Cost optimization
OnDemandMaxPricePercentageOverLowestPrice = 10
}
});
Synopsis
Fields
| A100 | NVIDIA A100 GPU. |
| A10G | NVIDIA A10G GPU. |
| GAUDI_HL_205 | Habana Gaudi HL-205 accelerator for deep learning training. |
| H100 | NVIDIA H100 GPU. |
| INFERENTIA | AWS Inferentia chips. |
| INFERENTIA2 | AWS Inferentia2 chips for high-performance ML inference. |
| K520 | NVIDIA GRID K520 GPU. |
| K80 | NVIDIA K80 GPU. |
| L4 | NVIDIA L4 GPU for AI inference and graphics workloads. |
| L40S | NVIDIA L40S GPU for AI inference and graphics workloads. |
| M60 | NVIDIA M60 GPU. |
| RADEON_PRO_V520 | AMD Radeon Pro V520 GPU. |
| T4 | NVIDIA T4 GPU. |
| T4G | NVIDIA T4G GPUs. |
| TRAINIUM | AWS Trainium chips for high-performance ML training. |
| TRAINIUM2 | AWS Trainium2 chips for high-performance ML training. |
| U30 | Xilinx U30 media transcoding accelerator for video processing. |
| V100 | NVIDIA V100 GPU. |
| VU9P | Xilinx VU9P FPGA. |
Fields
| Name | Description |
|---|---|
| A100 | NVIDIA A100 GPU. |
| A10G | NVIDIA A10G GPU. |
| GAUDI_HL_205 | Habana Gaudi HL-205 accelerator for deep learning training. |
| H100 | NVIDIA H100 GPU. |
| INFERENTIA | AWS Inferentia chips. |
| INFERENTIA2 | AWS Inferentia2 chips for high-performance ML inference. |
| K520 | NVIDIA GRID K520 GPU. |
| K80 | NVIDIA K80 GPU. |
| L4 | NVIDIA L4 GPU for AI inference and graphics workloads. |
| L40S | NVIDIA L40S GPU for AI inference and graphics workloads. |
| M60 | NVIDIA M60 GPU. |
| RADEON_PRO_V520 | AMD Radeon Pro V520 GPU. |
| T4 | NVIDIA T4 GPU. |
| T4G | NVIDIA T4G GPUs. |
| TRAINIUM | AWS Trainium chips for high-performance ML training. |
| TRAINIUM2 | AWS Trainium2 chips for high-performance ML training. |
| U30 | Xilinx U30 media transcoding accelerator for video processing. |
| V100 | NVIDIA V100 GPU. |
| VU9P | Xilinx VU9P FPGA. |