diff --git a/.github/workflows/performance-benchmarks-go.yml b/.github/workflows/performance-benchmarks-go.yml index 887c4ac60..faa2ad02d 100644 --- a/.github/workflows/performance-benchmarks-go.yml +++ b/.github/workflows/performance-benchmarks-go.yml @@ -1,4 +1,4 @@ -# This workflow runs every day 09:00 UTC (1AM PST) +# This workflow runs every day 16:00 UTC (8PM PST) name: Performance Benchmarks on: diff --git a/.github/workflows/performance-benchmarks-rust.yml b/.github/workflows/performance-benchmarks-rust.yml new file mode 100644 index 000000000..d7a0bddbc --- /dev/null +++ b/.github/workflows/performance-benchmarks-rust.yml @@ -0,0 +1,135 @@ +# This workflow runs every day 16:00 UTC (8PM PST) +name: Performance Benchmarks + +on: + workflow_call: + inputs: + dafny: + description: "The Dafny version to run" + required: false + default: "4.9.0" + type: string + regenerate-code: + description: "Regenerate code using smithy-dafny" + required: false + default: false + type: boolean + mpl-version: + description: "MPL version to use" + required: false + type: string + mpl-head: + description: "Running on MPL HEAD" + required: false + default: false + type: boolean + +jobs: + testRust: + strategy: + fail-fast: false + matrix: + library: [DynamoDbEncryption] + benchmark-dir: [db-esdk-performance-testing] + # removed windows-latest because somehow it can't build aws-lc in CI + os: [ubuntu-22.04, macos-15-intel] + runs-on: ${{ matrix.os }} + permissions: + id-token: write + contents: read + env: + RUST_MIN_STACK: 838860800 + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v5 + with: + aws-region: us-west-2 + role-to-assume: arn:aws:iam::370957321024:role/GitHub-CI-DDBEC-Dafny-Role-us-west-2 + role-session-name: DDBEC-Dafny-Rust-Tests + + - name: Setup Docker + if: matrix.os == 'macos-15-intel' && matrix.library == 'TestVectors' + uses: douglascamata/setup-docker-macos-action@v1.0.2 + + - name: Support longpaths on Git checkout + run: | + git config --global core.longpaths true + - uses: actions/checkout@v5 + - name: Init Submodules + shell: bash + run: | + git submodule update --init --recursive submodules/smithy-dafny + git submodule update --init --recursive submodules/MaterialProviders + + - name: Setup Rust Toolchain for GitHub CI + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + components: rustfmt + + - name: Setup Dafny + uses: ./submodules/MaterialProviders/.github/actions/setup_dafny/ + with: + dafny-version: 4.10.0 + + - name: Update MPL submodule if using MPL HEAD + if: ${{ inputs.mpl-head == true }} + working-directory: submodules/MaterialProviders + run: | + git checkout main + git pull + git submodule update --init --recursive + git rev-parse HEAD + + - name: Setup Java 17 for codegen + uses: actions/setup-java@v5 + with: + distribution: "corretto" + java-version: "17" + + - name: Install Smithy-Dafny codegen dependencies + uses: ./.github/actions/install_smithy_dafny_codegen_dependencies + + - name: Run make polymorph_rust + shell: bash + working-directory: ./${{ matrix.library }} + run: | + make polymorph_rust + + - name: Compile ${{ matrix.library }} implementation + shell: bash + working-directory: ./${{ matrix.library }} + run: | + # This works because `node` is installed by default on GHA runners + CORES=$(node -e 'console.log(os.cpus().length)') + make transpile_rust TRANSPILE_TESTS_IN_RUST=1 CORES=$CORES + + - name: Run Performance Benchmarks - Quick Mode + shell: bash + working-directory: ./${{matrix.benchmark-dir}}/benchmarks/rust + run: | + cargo run --release -- --quick + + - name: Parse and Format Logs + working-directory: ./${{matrix.benchmark-dir}}/benchmarks/results/raw-data/ + run: | + LOG_FILE="rust_results.json" + UPLOAD_FILE="cloudwatch_logs.json" + TIMESTAMP=$(date +%s%3N) + jq -c --arg ts "$(date +%s)000" '[.results[] as $result | .metadata as $meta | {timestamp: ($ts | tonumber), message: ({metadata: $meta, result: $result} | tostring)}]' $LOG_FILE > $UPLOAD_FILE + + - name: Upload logs to CloudWatch + working-directory: ./${{matrix.benchmark-dir}}/benchmarks/results/raw-data/ + run: | + LOG_FILE="cloudwatch_logs.json" + LOG_GROUP="aws-dbesdk-performance-benchmarks" + LOG_STREAM="rust/quick_benchmarks/${{ github.workflow }}" + + # Create log stream (ignore if exists) + aws logs create-log-stream \ + --log-group-name "$LOG_GROUP" \ + --log-stream-name "$LOG_STREAM" 2>/dev/null || true + + aws logs put-log-events \ + --log-group-name "$LOG_GROUP" \ + --log-stream-name "$LOG_STREAM" \ + --log-events file://$LOG_FILE diff --git a/.github/workflows/performance-benchmarks.yml b/.github/workflows/performance-benchmarks.yml index 671250b81..eb82758e5 100644 --- a/.github/workflows/performance-benchmarks.yml +++ b/.github/workflows/performance-benchmarks.yml @@ -1,4 +1,4 @@ -# This workflow runs every day 09:00 UTC (1AM PST) +# This workflow runs every day 16:00 UTC (8PM PST) name: Performance Benchmarks permissions: @@ -21,8 +21,13 @@ jobs: uses: ./.github/workflows/performance-benchmarks-go.yml with: dafny: ${{needs.getVersion.outputs.version}} + performance-benchmarks-rust: + needs: getVersion + uses: ./.github/workflows/performance-benchmarks-rust.yml + with: + dafny: ${{needs.getVersion.outputs.version}} notify: - needs: [getVersion, performance-benchmarks-go] + needs: [getVersion, performance-benchmarks-go, performance-benchmarks-rust] if: ${{ failure() }} uses: aws/aws-cryptographic-material-providers-library/.github/workflows/slack-notification.yml@main with: diff --git a/db-esdk-performance-testing/benchmarks/results/raw-data/rust_results.json b/db-esdk-performance-testing/benchmarks/results/raw-data/rust_results.json new file mode 100644 index 000000000..c553440dd --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/results/raw-data/rust_results.json @@ -0,0 +1,93 @@ +{ + "metadata": { + "language": "rust", + "timestamp": "2026-01-22 21:23:51", + "rust_version": "unknown", + "cpu_count": 14, + "total_memory_gb": 48.0, + "total_tests": 3 + }, + "results": [ + { + "test_name": "throughput", + "language": "rust", + "data_size": 102400, + "concurrency": 1, + "encrypt_latency_ms": 2.0424586666666666, + "decrypt_latency_ms": 1.1346386666666666, + "end_to_end_latency_ms": 3.1774306666666665, + "ops_per_second": 313.69134089559503, + "bytes_per_second": 32121993.307708934, + "peak_memory_mb": 0.0, + "memory_efficiency_ratio": 0.0, + "p50_latency": 3.1489589999999996, + "p95_latency": 3.2460456000000004, + "p99_latency": 3.2546755200000006, + "timestamp": "2026-01-22 21:23:51", + "rust_version": "unknown", + "cpu_count": 14, + "total_memory_gb": 48.0, + "alloc": { + "count_k": 45, + "total_m": 8, + "max_bytes_m": 0, + "net_count": 8, + "net_total": 0 + } + }, + { + "test_name": "memory", + "language": "rust", + "data_size": 102400, + "concurrency": 1, + "encrypt_latency_ms": 0.0, + "decrypt_latency_ms": 0.0, + "end_to_end_latency_ms": 0.0, + "ops_per_second": 0.0, + "bytes_per_second": 0.0, + "peak_memory_mb": 0.4375, + "memory_efficiency_ratio": 0.22321428571428573, + "p50_latency": 0.0, + "p95_latency": 0.0, + "p99_latency": 0.0, + "timestamp": "2026-01-22 21:23:51", + "rust_version": "unknown", + "cpu_count": 14, + "total_memory_gb": 48.0, + "alloc": { + "count_k": 75, + "total_m": 14, + "max_bytes_m": 0, + "net_count": 6, + "net_total": 0 + } + }, + { + "test_name": "concurrent", + "language": "rust", + "data_size": 102400, + "concurrency": 2, + "encrypt_latency_ms": 0.0, + "decrypt_latency_ms": 0.0, + "end_to_end_latency_ms": 3.4039084, + "ops_per_second": 575.1588876427113, + "bytes_per_second": 58896270.09461364, + "peak_memory_mb": 0.0, + "memory_efficiency_ratio": 0.0, + "p50_latency": 3.3777085, + "p95_latency": 3.7358293500000004, + "p99_latency": 3.7699994700000006, + "timestamp": "2026-01-22 21:23:51", + "rust_version": "unknown", + "cpu_count": 14, + "total_memory_gb": 48.0, + "alloc": { + "count_k": 150, + "total_m": 29, + "max_bytes_m": 1, + "net_count": 5, + "net_total": 0 + } + } + ] +} diff --git a/db-esdk-performance-testing/benchmarks/rust/.gitignore b/db-esdk-performance-testing/benchmarks/rust/.gitignore new file mode 100644 index 000000000..fa8d85ac5 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/.gitignore @@ -0,0 +1,2 @@ +Cargo.lock +target diff --git a/db-esdk-performance-testing/benchmarks/rust/Cargo.toml b/db-esdk-performance-testing/benchmarks/rust/Cargo.toml new file mode 100644 index 000000000..912a24429 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "dbesdk-benchmark" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "dbesdk-benchmark" +path = "src/main.rs" + +[dependencies] +# AWS SDK and DynamoDB Encryption SDK +aws-db-esdk = { path = "../../../DynamoDbEncryption/runtimes/rust" } + +# Async runtime +tokio = { version = "1.47", features = ["full"] } + +# Serialization +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_yaml = "0.9" + +# CLI and progress +clap = { version = "4.5", features = ["derive"] } +indicatif = "0.18" + +# Statistics and benchmarking +hdrhistogram = "7.5" +rand = "0.9" + +# System info +sysinfo = "0.37" + +# Error handling +anyhow = "1.0" +thiserror = "2.0" + +# Logging +log = "0.4" +env_logger = "0.11" + +# Time +chrono = { version = "0.4", features = ["serde"] } + +# Memory profiling +memory-stats = "1.2" +stats_alloc = "0.1" + +# Async utilities +futures = "0.3" + +aws-config = "1.8.6" +aws-sdk-dynamodb = "1.92.0" +aws-sdk-kms = "1.86.0" +aws-sdk-sso = "1.83.0" +aws-sdk-ssooidc = "1.84.0" +aws-sdk-sts = "1.85.0" +aws-smithy-types = "1.3" +cpu-time = "1.0.0" + +[dev-dependencies] +criterion = { version = "0.7", features = ["html_reports"] } diff --git a/db-esdk-performance-testing/benchmarks/rust/README.md b/db-esdk-performance-testing/benchmarks/rust/README.md new file mode 100644 index 000000000..d533fb7ec --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/README.md @@ -0,0 +1,71 @@ +# AWS Database Encryption SDK for DynamoDB - Rust Performance Benchmarks + +This directory contains comprehensive performance benchmarks for the AWS Database Encryption SDK for DynamoDB Rust implementation. The benchmarks measure throughput, memory usage, and concurrency performance across various data sizes and system configurations. + +## Features + +- **Throughput Benchmarks**: Measure encryption/decryption operations per second +- **Memory Benchmarks**: Track peak memory usage and efficiency ratios +- **Concurrency Benchmarks**: Test performance under different thread counts +- **Raw AES Keyring**: Local 256-bit AES keys (no KMS dependency) + +## Prerequisites + +- Rust 1.70+ (recommended: latest stable) +- Access to the AWS Database Encryption SDK for DynamoDB Rust runtime + +## Quick Start + +```bash +# If necessary, build the ESDK and return here +cd ../../../DynamoDbEncryption/ +make polymorph_rust transpile_rust +cd ../db-esdk-performance-testing/benchmarks/rust/ + +# Build and run +cargo run --release -- --config ../../config/test-scenarios.yaml + +# Quick test (requires quick_config in YAML) +cargo run --release -- --quick +``` + +## Configuration + +### Command Line + +- `--config`: Path to YAML config file (required) +- `--output`: Results output path (default: `../../results/raw-data/rust_results.json`) +- `--quick`: Quick test mode (requires `quick_config` section in YAML) + +## Logging + +Default: info level. Override with `RUST_LOG`: + +```bash +RUST_LOG=debug ./target/release/db_esdk_benchmark --config config.yaml +``` + +## Development + +```bash +# Format and lint +cargo fmt +cargo clippy -- -D warnings + +# Test +cargo test + +# Debug build +cargo build +RUST_LOG=debug ./target/debug/esdk_benchmark --config config.yaml +``` + +## Troubleshooting + +- **Build issues**: `rustup update && cargo clean && cargo build --release` +- **Config issues**: Validate YAML syntax and check file permissions +- **Memory issues**: Monitor with `htop` or Activity Monitor + +## License + +Apache License 2.0 diff --git a/db-esdk-performance-testing/benchmarks/rust/src/alloc.rs b/db-esdk-performance-testing/benchmarks/rust/src/alloc.rs new file mode 100644 index 000000000..69f4c45c9 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/alloc.rs @@ -0,0 +1,118 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use serde::Serialize; +use std::sync::atomic::AtomicIsize; +use std::sync::atomic::Ordering; + +pub struct ResourceTracker { + pub count: isize, + pub total: isize, + pub net_total: isize, + pub net_count: isize, +} + +#[derive(Debug, Default, Clone, Serialize)] +pub struct ResourceResults { + pub count_k: isize, + pub total_m: isize, + pub max_bytes_m: isize, + pub net_count: isize, + pub net_total: isize, +} + +impl ResourceTracker { + pub fn new() -> Self { + clear_max(); + Self { + count: get_counter(), + total: get_total(), + net_total: get_net_total(), + net_count: get_net_counter(), + } + } + + pub fn get_results(&self) -> ResourceResults { + ResourceResults { + count_k: (get_counter() - self.count) / 1000, + total_m: (get_total() - self.total) / 1_000_000, + max_bytes_m: (get_max_total() - self.net_total) / 1_000_000, + net_count: get_net_counter() - self.net_count, + net_total: (get_net_total() - self.net_total) / 1_000_000, + } + } +} + +// total number of allocations made over the life of the program +static COUNTER: AtomicIsize = AtomicIsize::new(0); + +// total number of bytes allocated over the life of the program +static TOTAL: AtomicIsize = AtomicIsize::new(0); + +// number allocations not yet deallocated +static NET_COUNTER: AtomicIsize = AtomicIsize::new(0); + +// number bytes not yet deallocated +static NET_TOTAL: AtomicIsize = AtomicIsize::new(0); + +// the peak value reached by NET_TOTAL +// This is reset whenever a ResourceTracker is created +// so it gives the right answer for a single operation +// but it does not handle nested ResourceTrackers correctly. +static MAX_NET_TOTAL: AtomicIsize = AtomicIsize::new(0); + +fn add_to_counter(inc: isize) { + COUNTER.fetch_add(1, Ordering::SeqCst); + TOTAL.fetch_add(inc, Ordering::SeqCst); + NET_COUNTER.fetch_add(1, Ordering::SeqCst); + NET_TOTAL.fetch_add(inc, Ordering::SeqCst); + MAX_NET_TOTAL.fetch_max(NET_TOTAL.load(Ordering::SeqCst), Ordering::SeqCst); +} + +fn subtract_from_counter(inc: isize) { + NET_COUNTER.fetch_sub(1, Ordering::SeqCst); + NET_TOTAL.fetch_sub(inc, Ordering::SeqCst); +} + +fn get_counter() -> isize { + COUNTER.load(Ordering::SeqCst) +} + +fn get_total() -> isize { + TOTAL.load(Ordering::SeqCst) +} + +fn get_net_counter() -> isize { + NET_COUNTER.load(Ordering::SeqCst) +} + +fn get_net_total() -> isize { + NET_TOTAL.load(Ordering::SeqCst) +} + +fn clear_max() { + MAX_NET_TOTAL.store(0, Ordering::SeqCst) +} + +fn get_max_total() -> isize { + MAX_NET_TOTAL.load(Ordering::SeqCst) +} + +use std::alloc::{GlobalAlloc, Layout, System}; + +struct MyAllocator; + +unsafe impl GlobalAlloc for MyAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + add_to_counter(layout.size() as isize); + unsafe { System.alloc(layout) } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + subtract_from_counter(layout.size() as isize); + unsafe { System.dealloc(ptr, layout) } + } +} + +#[global_allocator] +static GLOBAL: MyAllocator = MyAllocator; diff --git a/db-esdk-performance-testing/benchmarks/rust/src/benchmark.rs b/db-esdk-performance-testing/benchmarks/rust/src/benchmark.rs new file mode 100644 index 000000000..8d1281507 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/benchmark.rs @@ -0,0 +1,114 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{anyhow, Result}; +use aws_db_esdk::{ + item_encryptor::{ + client::Client as ItemEncryptorClient, + types::DynamoDbItemEncryptorConfig, + }, + material_providers::{ + client::Client as MaterialProvidersClient, + types::{MaterialProvidersConfig, AesWrappingAlg, DbeAlgorithmSuiteId}, + }, +}; +use log::info; +use rand::Rng; +use sysinfo::System; + +use crate::config::{TestConfig, load_config}; +use crate::results::BenchmarkResult; +use crate::tests::create_attribute_actions; + +// Constants for memory testing +pub const MEMORY_TEST_ITERATIONS: usize = 5; + +pub struct DbeSDKBenchmark { + pub item_encryptor: ItemEncryptorClient, + pub config: TestConfig, + pub results: Vec, + pub cpu_count: usize, + pub total_memory_gb: f64, +} + +impl DbeSDKBenchmark { + pub async fn new(config_path: &str) -> Result { + // Get system info + let mut system = System::new_all(); + system.refresh_all(); + let cpu_count = system.cpus().len(); + let total_memory_gb = system.total_memory() as f64 / (1024.0 * 1024.0 * 1024.0); + + // Load configuration + let config = load_config(config_path)?; + + // Setup item encryptor + let item_encryptor = Self::setup_item_encryptor(&config).await?; + + info!( + "Initialized DB-ESDK Benchmark - CPU cores: {}, Memory: {:.1}GB", + cpu_count, total_memory_gb + ); + + Ok(Self { + item_encryptor, + config, + results: Vec::new(), + cpu_count, + total_memory_gb, + }) + } + + async fn setup_item_encryptor(config: &TestConfig) -> Result { + // Initialize material providers client + let mpl_config = MaterialProvidersConfig::builder().build()?; + let mpl_client = MaterialProvidersClient::from_conf(mpl_config)?; + + // Create Raw AES keyring + let mut key = [0u8; 32]; // 256-bit key + rand::rng().fill(&mut key); + + let raw_keyring = mpl_client + .create_raw_aes_keyring() + .key_name("test-aes-256-key") + .key_namespace("dbesdk-performance-test") + .wrapping_key(key.to_vec()) + .wrapping_alg(AesWrappingAlg::AlgAes256GcmIv12Tag16) + .send() + .await?; + + // Create item encryptor configuration + let attribute_actions = create_attribute_actions(); + let allowed_unsigned_attribute_prefix = ":".to_string(); + let partition_key_name = "partition_key".to_string(); + let sort_key_name = "sort_key".to_string(); + let algorithm_suite_id = DbeAlgorithmSuiteId::AlgAes256GcmHkdfSha512CommitKeyEcdsaP384SymsigHmacSha384; + + let encryptor_config = DynamoDbItemEncryptorConfig::builder() + .logical_table_name(config.table_name.clone()) + .partition_key_name(partition_key_name) + .sort_key_name(sort_key_name) + .attribute_actions_on_encrypt(attribute_actions) + .keyring(raw_keyring.clone()) + .allowed_unsigned_attribute_prefix(allowed_unsigned_attribute_prefix) + .algorithm_suite_id(algorithm_suite_id) + .build() + .map_err(|e| anyhow!("Failed to create item encryptor config: {}", e))?; + + // Create item encryptor client + let item_encryptor = ItemEncryptorClient::from_conf(encryptor_config) + .map_err(|e| anyhow!("Failed to create item encryptor: {}", e))?; + + info!("DB-ESDK item encryptor initialized successfully"); + Ok(item_encryptor) + } + + pub fn save_results(&self, output_path: &str) -> Result<()> { + crate::results::save_results( + &self.results, + output_path, + self.cpu_count, + self.total_memory_gb, + ) + } +} diff --git a/db-esdk-performance-testing/benchmarks/rust/src/config.rs b/db-esdk-performance-testing/benchmarks/rust/src/config.rs new file mode 100644 index 000000000..8c26821cb --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/config.rs @@ -0,0 +1,66 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use std::fs; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestConfig { + pub data_sizes: DataSizes, + pub iterations: IterationConfig, + pub concurrency_levels: Vec, + pub table_name: String, + pub quick_config: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSizes { + pub small: Vec, + pub medium: Vec, + pub large: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct IterationConfig { + pub warmup: usize, + pub measurement: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct QuickConfig { + pub data_sizes: QuickDataSizes, + pub iterations: QuickIterationConfig, + pub concurrency_levels: Vec, + pub test_types: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct QuickDataSizes { + pub small: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct QuickIterationConfig { + pub warmup: usize, + pub measurement: usize, +} + +pub fn load_config(config_path: &str) -> Result { + if !std::path::Path::new(config_path).exists() { + return Err(anyhow::anyhow!("Config file not found: {}", config_path)); + } + + let config_content = fs::read_to_string(config_path) + .with_context(|| format!("Failed to read config file: {}", config_path))?; + + let mut config: TestConfig = + serde_yaml::from_str(&config_content).with_context(|| "Failed to parse config file")?; + + // Set default table name if not provided + if config.table_name.is_empty() { + config.table_name = "dbesdk-performance-testing".to_string(); + } + + Ok(config) +} diff --git a/db-esdk-performance-testing/benchmarks/rust/src/main.rs b/db-esdk-performance-testing/benchmarks/rust/src/main.rs new file mode 100644 index 000000000..d04495fd0 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/main.rs @@ -0,0 +1,103 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::collapsible_if)] + +mod alloc; +mod benchmark; +mod config; +mod results; +mod tests; + +use anyhow::Result; +use clap::{Arg, Command}; + +use benchmark::DbeSDKBenchmark; + +fn main() -> Result<()> { + // Set larger stack size for tokio runtime + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(4) + .thread_stack_size(8 * 1024 * 1024) // 8MB stack size + .enable_all() + .build()?; + + rt.block_on(async { run_benchmark().await }) +} + +async fn run_benchmark() -> Result<()> { + env_logger::Builder::from_default_env() + .filter_level(log::LevelFilter::Info) + .init(); + + let matches = Command::new("DB-ESDK Rust Benchmark") + .version("1.0") + .about("AWS Database Encryption SDK Performance Benchmark Suite - Rust Implementation") + .arg( + Arg::new("config") + .long("config") + .value_name("FILE") + .help("Path to test configuration file") + .default_value("../config/test-scenarios.yaml"), + ) + .arg( + Arg::new("output") + .long("output") + .value_name("FILE") + .help("Path to output results file") + .default_value("../results/raw-data/rust_results.json"), + ) + .arg( + Arg::new("quick") + .long("quick") + .help("Run quick test with reduced iterations") + .action(clap::ArgAction::SetTrue), + ) + .get_matches(); + + let config_path = matches.get_one::("config").unwrap(); + let output_path = matches.get_one::("output").unwrap(); + let quick = matches.get_flag("quick"); + + // Initialize benchmark + let mut bench = DbeSDKBenchmark::new(config_path).await?; + + // Adjust config for quick test + if quick { + let quick_config = bench.config.quick_config.as_ref().ok_or_else(|| { + anyhow::anyhow!("Quick mode requested but no quick_config found in config file") + })?; + + bench.config.iterations.measurement = quick_config.iterations.measurement; + bench.config.iterations.warmup = quick_config.iterations.warmup; + bench.config.data_sizes.small = quick_config.data_sizes.small.clone(); + bench.config.data_sizes.medium = Vec::new(); + bench.config.data_sizes.large = Vec::new(); + bench.config.concurrency_levels = quick_config.concurrency_levels.clone(); + } + + // Run benchmarks + bench.run_all_benchmarks(quick).await?; + + // Save results + bench.save_results(output_path)?; + + // Print summary + println!("\n=== DB-ESDK Rust Benchmark Summary ==="); + println!("Total tests completed: {}", bench.results.len()); + println!("Results saved to: {}", output_path); + + if !bench.results.is_empty() { + let mut max_throughput = 0.0; + for result in &bench.results { + if result.test_name == "throughput" && result.ops_per_second > max_throughput { + max_throughput = result.ops_per_second; + } + } + if max_throughput > 0.0 { + println!("Maximum throughput: {:.2} ops/sec", max_throughput); + } + } + + Ok(()) +} diff --git a/db-esdk-performance-testing/benchmarks/rust/src/results.rs b/db-esdk-performance-testing/benchmarks/rust/src/results.rs new file mode 100644 index 000000000..f0fa5bff1 --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/results.rs @@ -0,0 +1,148 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use crate::alloc; +use anyhow::Result; +use chrono::Utc; +use serde::Serialize; +use std::fs::{self, File}; +use std::io::Write; +use std::path::Path; + +#[derive(Debug, Serialize, Clone)] +pub struct BenchmarkResult { + pub test_name: String, + pub language: String, + pub data_size: usize, + pub concurrency: usize, + pub encrypt_latency_ms: f64, + pub decrypt_latency_ms: f64, + pub end_to_end_latency_ms: f64, + pub ops_per_second: f64, + pub bytes_per_second: f64, + pub peak_memory_mb: f64, + pub memory_efficiency_ratio: f64, + pub p50_latency: f64, + pub p95_latency: f64, + pub p99_latency: f64, + pub timestamp: String, + pub rust_version: String, + pub cpu_count: usize, + pub total_memory_gb: f64, + pub alloc: alloc::ResourceResults, +} + +#[derive(Debug, Serialize)] +struct BenchmarkResults { + metadata: BenchmarkMetadata, + results: Vec, +} + +#[derive(Debug, Serialize)] +struct BenchmarkMetadata { + language: String, + timestamp: String, + rust_version: String, + cpu_count: usize, + total_memory_gb: f64, + total_tests: usize, +} + +// === Utility Functions === + +pub fn average(values: &[f64]) -> f64 { + if values.is_empty() { + return 0.0; + } + values.iter().sum::() / values.len() as f64 +} + +pub fn percentile(sorted_values: &[f64], p: f64) -> f64 { + if sorted_values.is_empty() { + return 0.0; + } + if p <= 0.0 { + return sorted_values[0]; + } + if p >= 100.0 { + return sorted_values[sorted_values.len() - 1]; + } + + let index = (p / 100.0) * (sorted_values.len() - 1) as f64; + let lower = index.floor() as usize; + let upper = index.ceil() as usize; + + if lower == upper { + return sorted_values[lower]; + } + + let weight = index - lower as f64; + sorted_values[lower] * (1.0 - weight) + sorted_values[upper] * weight +} + +// === Results Saving === + +pub fn save_results( + results: &[BenchmarkResult], + output_path: &str, + cpu_count: usize, + total_memory_gb: f64, +) -> Result<()> { + // Create output directory if it doesn't exist + if let Some(parent) = Path::new(output_path).parent() { + fs::create_dir_all(parent)?; + } + + let results_data = BenchmarkResults { + metadata: BenchmarkMetadata { + language: "rust".to_string(), + timestamp: Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(), + rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()), + cpu_count, + total_memory_gb, + total_tests: results.len(), + }, + results: results.to_vec(), + }; + + let json_data = serde_json::to_string_pretty(&results_data)?; + let mut file = File::create(output_path)?; + file.write_all(json_data.as_bytes())?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_average() { + let values = vec![1.0, 2.0, 3.0, 4.0, 5.0]; + assert_eq!(average(&values), 3.0); + + let empty: Vec = vec![]; + assert_eq!(average(&empty), 0.0); + } + + #[test] + fn test_percentile() { + let mut values = vec![1.0, 2.0, 3.0, 4.0, 5.0]; + values.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + assert_eq!(percentile(&values, 50.0), 3.0); + assert_eq!(percentile(&values, 0.0), 1.0); + assert_eq!(percentile(&values, 100.0), 5.0); + + let empty: Vec = vec![]; + assert_eq!(percentile(&empty, 50.0), 0.0); + } + + #[test] + fn test_format_data_size() { + assert_eq!(format_data_size(512), "512 B"); + assert_eq!(format_data_size(1024), "1.0 KB"); + assert_eq!(format_data_size(1024 * 1024), "1.0 MB"); + assert_eq!(format_data_size(1024 * 1024 * 1024), "1.0 GB"); + } +} diff --git a/db-esdk-performance-testing/benchmarks/rust/src/tests.rs b/db-esdk-performance-testing/benchmarks/rust/src/tests.rs new file mode 100644 index 000000000..54c449c6e --- /dev/null +++ b/db-esdk-performance-testing/benchmarks/rust/src/tests.rs @@ -0,0 +1,646 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +use crate::alloc; +use anyhow::{anyhow, Result}; +use aws_db_esdk::{ + item_encryptor::client::Client as ItemEncryptorClient, + CryptoAction, +}; +use aws_sdk_dynamodb::types::AttributeValue; +use chrono::Utc; +use futures::future::join_all; +use indicatif::{ProgressBar, ProgressStyle}; +use log::{info, warn}; +use memory_stats::memory_stats; +use rand::Rng; +use std::collections::HashMap; +use std::time::Instant; + +use crate::benchmark::{DbeSDKBenchmark, MEMORY_TEST_ITERATIONS}; +use crate::results::{BenchmarkResult, average, percentile}; + +impl DbeSDKBenchmark { + // === Helper Functions === + + async fn run_encrypt_decrypt_cycle(&self, item: &HashMap) -> Result<(f64, f64)> { + // Encrypt + let encrypt_start = Instant::now(); + let encrypted_item = self + .item_encryptor + .encrypt_item() + .plaintext_item(item.clone()) + .send() + .await? + .encrypted_item + .unwrap(); + let encrypt_duration = encrypt_start.elapsed().as_secs_f64() * 1000.0; + + // Decrypt + let decrypt_start = Instant::now(); + let _decrypted_result = self + .item_encryptor + .decrypt_item() + .encrypted_item(encrypted_item) + .send() + .await? + .plaintext_item + .unwrap(); + let decrypt_duration = decrypt_start.elapsed().as_secs_f64() * 1000.0; + + Ok((encrypt_duration, decrypt_duration)) + } + + fn should_run_test_type(&self, test_type: &str, is_quick_mode: bool) -> bool { + if is_quick_mode { + if let Some(quick_config) = &self.config.quick_config { + if let Some(test_types) = &quick_config.test_types { + if !test_types.is_empty() { + return test_types.contains(&test_type.to_string()); + } + } + } + } + true + } + + // === Throughput Test Implementation === + + /// Runs throughput test measuring operations per second for DynamoDB item encryption + async fn run_throughput_test( + &mut self, + data_size: usize, + iterations: usize, + ) -> Result { + info!( + "Running throughput test - Size: {} bytes, Iterations: {}", + data_size, iterations + ); + + // Generate test data and create DynamoDB item + let test_data = generate_test_data(data_size); + let item = create_test_item_from_data("test-partition", 1, &test_data); + + // Warmup + for i in 0..self.config.iterations.warmup { + if let Err(e) = self.run_encrypt_decrypt_cycle(&item).await { + return Err(anyhow!("Warmup iteration {} failed: {}", i, e)); + } + } + + // Measurement runs + let mut encrypt_latencies = Vec::new(); + let mut decrypt_latencies = Vec::new(); + let mut end_to_end_latencies = Vec::new(); + let mut total_bytes = 0u64; + + // Progress bar + let pb = ProgressBar::new(iterations as u64); + pb.set_style( + ProgressStyle::default_bar() + .template("{msg} {percent}% |{bar:50.cyan/blue}| ({pos}/{len}) [{elapsed}<{eta}]") + .unwrap() + .progress_chars("██ "), + ); + pb.set_message("Throughput test"); + + let start_time = Instant::now(); + let alloc = alloc::ResourceTracker::new(); + for i in 0..iterations { + let iteration_start = Instant::now(); + let (encrypt_ms, decrypt_ms) = self + .run_encrypt_decrypt_cycle(&item) + .await + .map_err(|e| anyhow!("Measurement iteration {} failed: {}", i, e))?; + let iteration_duration = iteration_start.elapsed().as_secs_f64() * 1000.0; + + encrypt_latencies.push(encrypt_ms); + decrypt_latencies.push(decrypt_ms); + end_to_end_latencies.push(iteration_duration); + total_bytes += data_size as u64; + + pb.inc(1); + } + let total_duration = start_time.elapsed().as_secs_f64(); + pb.finish_and_clear(); + + // Calculate metrics + end_to_end_latencies.sort_by(|a, b| a.partial_cmp(b).unwrap()); + let result = BenchmarkResult { + test_name: "throughput".to_string(), + language: "rust".to_string(), + data_size, + concurrency: 1, + encrypt_latency_ms: average(&encrypt_latencies), + decrypt_latency_ms: average(&decrypt_latencies), + end_to_end_latency_ms: average(&end_to_end_latencies), + ops_per_second: iterations as f64 / total_duration, + bytes_per_second: total_bytes as f64 / total_duration, + p50_latency: percentile(&end_to_end_latencies, 50.0), + p95_latency: percentile(&end_to_end_latencies, 95.0), + p99_latency: percentile(&end_to_end_latencies, 99.0), + peak_memory_mb: 0.0, + memory_efficiency_ratio: 0.0, + timestamp: Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(), + rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()), + cpu_count: self.cpu_count, + total_memory_gb: self.total_memory_gb, + alloc: alloc.get_results(), + }; + + info!( + "Throughput test completed - Ops/sec: {:.2}, MB/sec: {:.2}", + result.ops_per_second, + result.bytes_per_second / (1024.0 * 1024.0) + ); + + Ok(result) + } + + // === Memory Test Implementation === + + /// Runs memory usage test by measuring memory delta during DynamoDB item encryption + async fn run_memory_test(&mut self, data_size: usize) -> Result { + info!( + "Running memory test - Size: {} bytes ({} iterations)", + data_size, MEMORY_TEST_ITERATIONS + ); + + // Generate test data and create DynamoDB item + let test_data = generate_test_data(data_size); + let item = create_test_item_from_data("test-partition", 1, &test_data); + + let mut peak_memory_delta_mb = 0.0; + let mut avg_memory_samples = Vec::new(); + let alloc = alloc::ResourceTracker::new(); + + // Run iterations with memory sampling + for i in 0..MEMORY_TEST_ITERATIONS { + // Force garbage collection and get baseline memory + std::hint::black_box(&item); // Prevent optimization + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + + let baseline_memory = if let Some(stats) = memory_stats() { + stats.physical_mem as f64 / (1024.0 * 1024.0) + } else { + 0.0 + }; + + let mut iteration_samples = Vec::new(); + let mut iteration_peak = baseline_memory; + + let operation_start = Instant::now(); + + // Run DynamoDB encryption operation with memory sampling + let item_clone = item.clone(); + let item_encryptor = self.item_encryptor.clone(); + + let operation_task = tokio::spawn(async move { + Self::run_encrypt_decrypt_cycle_static(&item_encryptor, &item_clone).await + }); + + // Sample memory during operation + let mut sample_count = 0; + while !operation_task.is_finished() { + if let Some(stats) = memory_stats() { + let current_physical = stats.physical_mem as f64 / (1024.0 * 1024.0); + let delta = current_physical - baseline_memory; + + // Only track positive deltas for average (memory increases) + if delta > 0.0 { + iteration_samples.push(delta); + } + + // Track peak regardless of sign + if current_physical > iteration_peak { + iteration_peak = current_physical; + } + sample_count += 1; + } + tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; + } + + // Wait for operation to complete + let _ = operation_task.await.unwrap().unwrap(); + let operation_duration = operation_start.elapsed(); + + // Get final memory delta + let final_stats = memory_stats().unwrap_or(memory_stats::MemoryStats { + physical_mem: 0, + virtual_mem: 0, + }); + let final_physical = final_stats.physical_mem as f64 / (1024.0 * 1024.0); + let final_delta = final_physical - baseline_memory; + + // Calculate metrics for this iteration (memory delta from baseline) + let iter_peak_delta_mb = (iteration_peak - baseline_memory).max(final_delta); + + let iter_avg_delta_mb = if !iteration_samples.is_empty() { + iteration_samples.iter().sum::() / iteration_samples.len() as f64 + } else { + // If no positive deltas, use final delta if positive, otherwise 0 + final_delta.max(0.0) + }; + + // Update global maximum + if iter_peak_delta_mb > peak_memory_delta_mb { + peak_memory_delta_mb = iter_peak_delta_mb; + } + + avg_memory_samples.push(iter_avg_delta_mb); + + info!( + "=== Iteration {} === Peak Delta: {:.2} MB, Avg Delta: {:.2} MB ({:?}, {} samples)", + i + 1, + iter_peak_delta_mb, + iter_avg_delta_mb, + operation_duration, + sample_count + ); + } + + // Calculate overall averages + let overall_avg_delta_mb = if !avg_memory_samples.is_empty() { + avg_memory_samples.iter().sum::() / avg_memory_samples.len() as f64 + } else { + 0.0 + }; + + // Calculate memory efficiency + let memory_efficiency = if peak_memory_delta_mb > 0.0 { + data_size as f64 / (peak_memory_delta_mb * 1024.0 * 1024.0) + } else { + 0.0 + }; + + info!("\nMemory Summary:"); + info!( + "- Peak Memory Delta: {:.2} MB (operation overhead)", + peak_memory_delta_mb + ); + info!( + "- Average Memory Delta: {:.2} MB (operation overhead)", + overall_avg_delta_mb + ); + + let result = BenchmarkResult { + test_name: "memory".to_string(), + language: "rust".to_string(), + data_size, + concurrency: 1, + encrypt_latency_ms: 0.0, + decrypt_latency_ms: 0.0, + end_to_end_latency_ms: 0.0, + ops_per_second: 0.0, + bytes_per_second: 0.0, + peak_memory_mb: peak_memory_delta_mb, + memory_efficiency_ratio: memory_efficiency, + p50_latency: 0.0, + p95_latency: 0.0, + p99_latency: 0.0, + timestamp: Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(), + rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()), + cpu_count: self.cpu_count, + total_memory_gb: self.total_memory_gb, + alloc: alloc.get_results(), + }; + + Ok(result) + } + + // Static helper for memory test + async fn run_encrypt_decrypt_cycle_static( + item_encryptor: &ItemEncryptorClient, + item: &HashMap, + ) -> Result<(f64, f64)> { + // Encrypt + let encrypt_start = Instant::now(); + let encrypted_item = item_encryptor + .encrypt_item() + .plaintext_item(item.clone()) + .send() + .await? + .encrypted_item + .unwrap(); + let encrypt_duration = encrypt_start.elapsed().as_secs_f64() * 1000.0; + + // Decrypt + let decrypt_start = Instant::now(); + let _decrypted_result = item_encryptor + .decrypt_item() + .encrypted_item(encrypted_item) + .send() + .await? + .plaintext_item + .unwrap(); + let decrypt_duration = decrypt_start.elapsed().as_secs_f64() * 1000.0; + + Ok((encrypt_duration, decrypt_duration)) + } + + // === Concurrent Test Implementation === + + /// Runs concurrent test with multiple workers to measure parallel DynamoDB encryption performance + async fn run_concurrent_test( + &mut self, + data_size: usize, + concurrency: usize, + iterations_per_worker: usize, + ) -> Result { + info!( + "Running concurrent test - Size: {} bytes, Concurrency: {}", + data_size, concurrency + ); + + // Create tasks for concurrent execution + let mut tasks = Vec::new(); + for _worker_id in 0..concurrency { + let item_encryptor = self.item_encryptor.clone(); + + let task = tokio::spawn(async move { + let mut worker_times = Vec::new(); + for j in 0..iterations_per_worker { + let iter_start = Instant::now(); + + // Generate test data and create DynamoDB item per worker + let worker_data = generate_test_data(data_size); + let worker_item = create_test_item_from_data("test-partition", j as i32, &worker_data); + + // Run encrypt-decrypt cycle + let encrypted_item = item_encryptor + .encrypt_item() + .plaintext_item(worker_item.clone()) + .send() + .await? + .encrypted_item + .unwrap(); + + // Decrypt + let _decrypted_result = item_encryptor + .decrypt_item() + .encrypted_item(encrypted_item) + .send() + .await? + .plaintext_item + .unwrap(); + + worker_times.push(iter_start.elapsed().as_secs_f64() * 1000.0); + } + Ok::, anyhow::Error>(worker_times) + }); + tasks.push(task); + } + + let start_time = Instant::now(); + let alloc = alloc::ResourceTracker::new(); + let results = join_all(tasks).await; + let total_duration = start_time.elapsed().as_secs_f64(); + + // Collect all times + let mut all_times = Vec::new(); + for result in results { + let worker_times = result??; + all_times.extend(worker_times); + } + + // Calculate metrics + let total_ops = concurrency * iterations_per_worker; + let total_bytes = total_ops * data_size; + + all_times.sort_by(|a, b| a.partial_cmp(b).unwrap()); + let result = BenchmarkResult { + test_name: "concurrent".to_string(), + language: "rust".to_string(), + data_size, + concurrency, + encrypt_latency_ms: 0.0, + decrypt_latency_ms: 0.0, + end_to_end_latency_ms: average(&all_times), + ops_per_second: total_ops as f64 / total_duration, + bytes_per_second: total_bytes as f64 / total_duration, + p50_latency: percentile(&all_times, 50.0), + p95_latency: percentile(&all_times, 95.0), + p99_latency: percentile(&all_times, 99.0), + peak_memory_mb: 0.0, + memory_efficiency_ratio: 0.0, + timestamp: Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(), + rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()), + cpu_count: self.cpu_count, + total_memory_gb: self.total_memory_gb, + alloc: alloc.get_results(), + }; + + info!( + "Concurrent test completed - Ops/sec: {:.2}, Avg latency: {:.2} ms", + result.ops_per_second, result.end_to_end_latency_ms + ); + + Ok(result) + } + + // === Test Orchestration === + + async fn run_throughput_tests(&mut self, data_sizes: &[usize], iterations: usize) { + info!("Running throughput tests..."); + for &data_size in data_sizes { + match self.run_throughput_test(data_size, iterations).await { + Ok(result) => { + info!( + "Throughput test completed: {:.2} ops/sec", + result.ops_per_second + ); + self.results.push(result); + } + Err(e) => { + warn!("Throughput test failed: {}", e); + } + } + } + } + + async fn run_memory_tests(&mut self, data_sizes: &[usize]) { + info!("Running memory tests..."); + for &data_size in data_sizes { + match self.run_memory_test(data_size).await { + Ok(result) => { + info!( + "Memory test completed: {:.2} MB peak", + result.peak_memory_mb + ); + self.results.push(result); + } + Err(e) => { + warn!("Memory test failed: {}", e); + } + } + } + } + + async fn run_concurrency_tests(&mut self, data_sizes: &[usize], concurrency_levels: &[u32]) { + info!("Running concurrency tests..."); + for &data_size in data_sizes { + for &concurrency in concurrency_levels { + if concurrency > 1 { + // Skip single-threaded + match self.run_concurrent_test(data_size, concurrency as usize, 5).await { + Ok(result) => { + info!( + "Concurrent test completed: {:.2} ops/sec @ {} threads", + result.ops_per_second, concurrency + ); + self.results.push(result); + } + Err(e) => { + warn!("Concurrent test failed: {}", e); + } + } + } + } + } + } + + pub async fn run_all_benchmarks(&mut self, is_quick_mode: bool) -> Result<()> { + info!("Starting comprehensive DB-ESDK benchmark suite"); + + // Combine all data sizes + let mut data_sizes = Vec::new(); + data_sizes.extend(&self.config.data_sizes.small); + data_sizes.extend(&self.config.data_sizes.medium); + data_sizes.extend(&self.config.data_sizes.large); + + let concurrency_levels = self.config.concurrency_levels.clone(); + + // Run test suites + if self.should_run_test_type("throughput", is_quick_mode) { + self.run_throughput_tests(&data_sizes, self.config.iterations.measurement) + .await; + } else { + info!("Skipping throughput tests (not in test_types)"); + } + + if self.should_run_test_type("memory", is_quick_mode) { + self.run_memory_tests(&data_sizes).await; + } else { + info!("Skipping memory tests (not in test_types)"); + } + + if self.should_run_test_type("concurrency", is_quick_mode) { + self.run_concurrency_tests(&data_sizes, &concurrency_levels) + .await; + } else { + info!("Skipping concurrency tests (not in test_types)"); + } + info!( + "Benchmark suite completed. Total results: {}", + self.results.len() + ); + Ok(()) + } +} + +// === DynamoDB Helper Functions === + +/// Generate random test data of specified size +fn generate_test_data(size: usize) -> Vec { + let mut data = vec![0u8; size]; + rand::rng().fill(&mut data[..]); + data +} + +/// Create a test DynamoDB item with the given data +fn create_test_item_from_data( + partition_key: &str, + sort_key: i32, + data: &[u8], +) -> HashMap { + let mut item = HashMap::new(); + + item.insert( + "partition_key".to_string(), + AttributeValue::S(partition_key.to_string()), + ); + + item.insert( + "sort_key".to_string(), + AttributeValue::N(sort_key.to_string()), + ); + + item.insert( + "attribute1".to_string(), + AttributeValue::B(data.to_vec().into()), + ); + + item.insert( + "attribute2".to_string(), + AttributeValue::S("test-value".to_string()), + ); + + // Unsigned attribute (not encrypted) + item.insert( + ":attribute3".to_string(), + AttributeValue::S("unsigned-value".to_string()), + ); + + item +} + +/// Create attribute actions configuration for encryption +pub fn create_attribute_actions() -> HashMap { + let mut actions = HashMap::new(); + + actions.insert("partition_key".to_string(), CryptoAction::SignOnly); + actions.insert("sort_key".to_string(), CryptoAction::SignOnly); + actions.insert("attribute1".to_string(), CryptoAction::EncryptAndSign); + actions.insert("attribute2".to_string(), CryptoAction::SignOnly); + actions.insert(":attribute3".to_string(), CryptoAction::DoNothing); + + actions +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_test_item() { + let data = vec![1, 2, 3, 4, 5]; + let item = create_test_item_from_data("test-pk", 42, &data); + + assert_eq!(item.len(), 5); + assert!(item.contains_key("partition_key")); + assert!(item.contains_key("sort_key")); + assert!(item.contains_key("attribute1")); + assert!(item.contains_key("attribute2")); + assert!(item.contains_key(":attribute3")); + + if let Some(AttributeValue::S(pk)) = item.get("partition_key") { + assert_eq!(pk, "test-pk"); + } else { + panic!("partition_key not found or wrong type"); + } + + if let Some(AttributeValue::N(sk)) = item.get("sort_key") { + assert_eq!(sk, "42"); + } else { + panic!("sort_key not found or wrong type"); + } + + if let Some(AttributeValue::B(blob)) = item.get("attribute1") { + assert_eq!(blob.as_ref(), &data); + } else { + panic!("attribute1 not found or wrong type"); + } + } + + #[test] + fn test_create_attribute_actions() { + let actions = create_attribute_actions(); + + assert_eq!(actions.len(), 5); + assert_eq!(actions.get("partition_key"), Some(&CryptoAction::SignOnly)); + assert_eq!(actions.get("sort_key"), Some(&CryptoAction::SignOnly)); + assert_eq!(actions.get("attribute1"), Some(&CryptoAction::EncryptAndSign)); + assert_eq!(actions.get("attribute2"), Some(&CryptoAction::SignOnly)); + assert_eq!(actions.get(":attribute3"), Some(&CryptoAction::DoNothing)); + } +}