317 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
			
		
		
	
	
			317 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
| // benches/concurrent_ops.rs
 | |
| use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
 | |
| use tokio::runtime::Runtime;
 | |
| use std::sync::Arc;
 | |
| 
 | |
| mod common;
 | |
| use common::*;
 | |
| 
 | |
| /// Benchmark concurrent write operations
 | |
| fn bench_concurrent_writes(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/writes");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = BenchmarkBackend::new(backend_type).expect("Failed to create backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let mut generator = DataGenerator::new(42 + client_id as u64);
 | |
|                                     for i in 0..100 {
 | |
|                                         let key = format!("client:{}:key:{}", client_id, i);
 | |
|                                         let value = generator.generate_value(100);
 | |
|                                         storage.set(key, value).unwrap();
 | |
|                                     }
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark concurrent read operations
 | |
| fn bench_concurrent_reads(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/reads");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             // Pre-populate with data
 | |
|             let backend = setup_populated_backend(backend_type, 10_000, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let generator = DataGenerator::new(42);
 | |
|                                     for i in 0..100 {
 | |
|                                         let key_id = (client_id * 100 + i) % 10_000;
 | |
|                                         let key = generator.generate_key("bench:key", key_id);
 | |
|                                         storage.get(&key).unwrap();
 | |
|                                     }
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark mixed concurrent workload (70% reads, 30% writes)
 | |
| fn bench_concurrent_mixed(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/mixed");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             // Pre-populate with data
 | |
|             let backend = setup_populated_backend(backend_type, 10_000, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let mut generator = DataGenerator::new(42 + client_id as u64);
 | |
|                                     for i in 0..100 {
 | |
|                                         if i % 10 < 7 {
 | |
|                                             // 70% reads
 | |
|                                             let key_id = (client_id * 100 + i) % 10_000;
 | |
|                                             let key = generator.generate_key("bench:key", key_id);
 | |
|                                             storage.get(&key).unwrap();
 | |
|                                         } else {
 | |
|                                             // 30% writes
 | |
|                                             let key = format!("client:{}:key:{}", client_id, i);
 | |
|                                             let value = generator.generate_value(100);
 | |
|                                             storage.set(key, value).unwrap();
 | |
|                                         }
 | |
|                                     }
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark concurrent hash operations
 | |
| fn bench_concurrent_hash_ops(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/hash_ops");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = BenchmarkBackend::new(backend_type).expect("Failed to create backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let mut generator = DataGenerator::new(42 + client_id as u64);
 | |
|                                     for i in 0..50 {
 | |
|                                         let key = format!("client:{}:hash:{}", client_id, i);
 | |
|                                         let field = format!("field{}", i % 10);
 | |
|                                         let value = generator.generate_value(100);
 | |
|                                         storage.hset(&key, vec![(field, value)]).unwrap();
 | |
|                                     }
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark concurrent list operations
 | |
| fn bench_concurrent_list_ops(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/list_ops");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = BenchmarkBackend::new(backend_type).expect("Failed to create backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let mut generator = DataGenerator::new(42 + client_id as u64);
 | |
|                                     for i in 0..50 {
 | |
|                                         let key = format!("client:{}:list:{}", client_id, i);
 | |
|                                         let element = generator.generate_value(100);
 | |
|                                         storage.rpush(&key, vec![element]).unwrap();
 | |
|                                     }
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark concurrent scan operations
 | |
| fn bench_concurrent_scans(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("concurrent_ops/scans");
 | |
|     
 | |
|     for num_clients in [10, 50] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             // Pre-populate with data
 | |
|             let backend = setup_populated_backend(backend_type, 10_000, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let storage = backend.storage.clone();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/clients", backend.name()), num_clients),
 | |
|                 &(storage, num_clients),
 | |
|                 |b, (storage, num_clients)| {
 | |
|                     let rt = Runtime::new().unwrap();
 | |
|                     b.to_async(&rt).iter(|| {
 | |
|                         let storage = storage.clone();
 | |
|                         let num_clients = *num_clients;
 | |
|                         async move {
 | |
|                             let mut tasks = Vec::new();
 | |
|                             
 | |
|                             for _client_id in 0..num_clients {
 | |
|                                 let storage = storage.clone();
 | |
|                                 let task = tokio::spawn(async move {
 | |
|                                     let mut cursor = 0u64;
 | |
|                                     let mut total = 0;
 | |
|                                     loop {
 | |
|                                         let (next_cursor, items) = storage
 | |
|                                             .scan(cursor, None, Some(100))
 | |
|                                             .unwrap();
 | |
|                                         total += items.len();
 | |
|                                         if next_cursor == 0 {
 | |
|                                             break;
 | |
|                                         }
 | |
|                                         cursor = next_cursor;
 | |
|                                     }
 | |
|                                     total
 | |
|                                 });
 | |
|                                 tasks.push(task);
 | |
|                             }
 | |
|                             
 | |
|                             for task in tasks {
 | |
|                                 task.await.unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| criterion_group!(
 | |
|     benches,
 | |
|     bench_concurrent_writes,
 | |
|     bench_concurrent_reads,
 | |
|     bench_concurrent_mixed,
 | |
|     bench_concurrent_hash_ops,
 | |
|     bench_concurrent_list_ops,
 | |
|     bench_concurrent_scans,
 | |
| );
 | |
| 
 | |
| criterion_main!(benches); |