336 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
			
		
		
	
	
			336 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
| // benches/bulk_ops.rs
 | |
| use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId, BatchSize};
 | |
| 
 | |
| mod common;
 | |
| use common::*;
 | |
| 
 | |
| /// Benchmark bulk insert operations with varying batch sizes
 | |
| fn bench_bulk_insert(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/insert");
 | |
|     
 | |
|     for size in [100, 1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size),
 | |
|                 &(backend_type, size),
 | |
|                 |b, &(backend_type, size)| {
 | |
|                     b.iter_batched(
 | |
|                         || {
 | |
|                             let backend = BenchmarkBackend::new(backend_type).unwrap();
 | |
|                             let mut generator = DataGenerator::new(42);
 | |
|                             let data = generator.generate_string_pairs(size, 100);
 | |
|                             (backend, data)
 | |
|                         },
 | |
|                         |(backend, data)| {
 | |
|                             for (key, value) in data {
 | |
|                                 backend.storage.set(key, value).unwrap();
 | |
|                             }
 | |
|                         },
 | |
|                         BatchSize::SmallInput
 | |
|                     );
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk sequential read operations
 | |
| fn bench_bulk_read_sequential(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/read_sequential");
 | |
|     
 | |
|     for size in [1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = setup_populated_backend(backend_type, size, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let generator = DataGenerator::new(42);
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend.name()), size),
 | |
|                 &(backend, size),
 | |
|                 |b, (backend, size)| {
 | |
|                     b.iter(|| {
 | |
|                         for i in 0..*size {
 | |
|                             let key = generator.generate_key("bench:key", i);
 | |
|                             backend.storage.get(&key).unwrap();
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk random read operations
 | |
| fn bench_bulk_read_random(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/read_random");
 | |
|     
 | |
|     for size in [1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = setup_populated_backend(backend_type, size, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let generator = DataGenerator::new(42);
 | |
|             
 | |
|             // Pre-generate random indices for fair comparison
 | |
|             let indices: Vec<usize> = (0..size)
 | |
|                 .map(|_| rand::random::<usize>() % size)
 | |
|                 .collect();
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend.name()), size),
 | |
|                 &(backend, indices),
 | |
|                 |b, (backend, indices)| {
 | |
|                     b.iter(|| {
 | |
|                         for &idx in indices {
 | |
|                             let key = generator.generate_key("bench:key", idx);
 | |
|                             backend.storage.get(&key).unwrap();
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk update operations
 | |
| fn bench_bulk_update(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/update");
 | |
|     
 | |
|     for size in [100, 1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size),
 | |
|                 &(backend_type, size),
 | |
|                 |b, &(backend_type, size)| {
 | |
|                     b.iter_batched(
 | |
|                         || {
 | |
|                             let backend = setup_populated_backend(backend_type, size, 100).unwrap();
 | |
|                             let mut generator = DataGenerator::new(43); // Different seed for updates
 | |
|                             let updates = generator.generate_string_pairs(size, 100);
 | |
|                             (backend, updates)
 | |
|                         },
 | |
|                         |(backend, updates)| {
 | |
|                             for (key, value) in updates {
 | |
|                                 backend.storage.set(key, value).unwrap();
 | |
|                             }
 | |
|                         },
 | |
|                         BatchSize::SmallInput
 | |
|                     );
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk delete operations
 | |
| fn bench_bulk_delete(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/delete");
 | |
|     
 | |
|     for size in [100, 1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size),
 | |
|                 &(backend_type, size),
 | |
|                 |b, &(backend_type, size)| {
 | |
|                     b.iter_batched(
 | |
|                         || {
 | |
|                             let backend = setup_populated_backend(backend_type, size, 100).unwrap();
 | |
|                             let generator = DataGenerator::new(42);
 | |
|                             let keys: Vec<String> = (0..size)
 | |
|                                 .map(|i| generator.generate_key("bench:key", i))
 | |
|                                 .collect();
 | |
|                             (backend, keys)
 | |
|                         },
 | |
|                         |(backend, keys)| {
 | |
|                             for key in keys {
 | |
|                                 backend.storage.del(key).unwrap();
 | |
|                             }
 | |
|                         },
 | |
|                         BatchSize::SmallInput
 | |
|                     );
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk hash insert operations
 | |
| fn bench_bulk_hash_insert(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/hash_insert");
 | |
|     
 | |
|     for size in [100, 1_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size),
 | |
|                 &(backend_type, size),
 | |
|                 |b, &(backend_type, size)| {
 | |
|                     b.iter_batched(
 | |
|                         || {
 | |
|                             let backend = BenchmarkBackend::new(backend_type).unwrap();
 | |
|                             let mut generator = DataGenerator::new(42);
 | |
|                             let data = generator.generate_hash_data(size, 10, 100);
 | |
|                             (backend, data)
 | |
|                         },
 | |
|                         |(backend, data)| {
 | |
|                             for (key, fields) in data {
 | |
|                                 backend.storage.hset(&key, fields).unwrap();
 | |
|                             }
 | |
|                         },
 | |
|                         BatchSize::SmallInput
 | |
|                     );
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk hash read operations (HGETALL)
 | |
| fn bench_bulk_hash_read(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/hash_read");
 | |
|     
 | |
|     for size in [100, 1_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = setup_populated_backend_hashes(backend_type, size, 10, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let generator = DataGenerator::new(42);
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend.name()), size),
 | |
|                 &(backend, size),
 | |
|                 |b, (backend, size)| {
 | |
|                     b.iter(|| {
 | |
|                         for i in 0..*size {
 | |
|                             let key = generator.generate_key("bench:hash", i);
 | |
|                             backend.storage.hgetall(&key).unwrap();
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk list insert operations
 | |
| fn bench_bulk_list_insert(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/list_insert");
 | |
|     
 | |
|     for size in [100, 1_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size),
 | |
|                 &(backend_type, size),
 | |
|                 |b, &(backend_type, size)| {
 | |
|                     b.iter_batched(
 | |
|                         || {
 | |
|                             let backend = BenchmarkBackend::new(backend_type).unwrap();
 | |
|                             let mut generator = DataGenerator::new(42);
 | |
|                             let data = generator.generate_list_data(size, 10, 100);
 | |
|                             (backend, data)
 | |
|                         },
 | |
|                         |(backend, data)| {
 | |
|                             for (key, elements) in data {
 | |
|                                 backend.storage.rpush(&key, elements).unwrap();
 | |
|                             }
 | |
|                         },
 | |
|                         BatchSize::SmallInput
 | |
|                     );
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark bulk list read operations (LRANGE)
 | |
| fn bench_bulk_list_read(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/list_read");
 | |
|     
 | |
|     for size in [100, 1_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = setup_populated_backend_lists(backend_type, size, 10, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let generator = DataGenerator::new(42);
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend.name()), size),
 | |
|                 &(backend, size),
 | |
|                 |b, (backend, size)| {
 | |
|                     b.iter(|| {
 | |
|                         for i in 0..*size {
 | |
|                             let key = generator.generate_key("bench:list", i);
 | |
|                             backend.storage.lrange(&key, 0, -1).unwrap();
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| /// Benchmark mixed workload (70% reads, 30% writes)
 | |
| fn bench_mixed_workload(c: &mut Criterion) {
 | |
|     let mut group = c.benchmark_group("bulk_ops/mixed_workload");
 | |
|     
 | |
|     for size in [1_000, 10_000] {
 | |
|         for backend_type in BackendType::all() {
 | |
|             let backend = setup_populated_backend(backend_type, size, 100)
 | |
|                 .expect("Failed to setup backend");
 | |
|             let mut generator = DataGenerator::new(42);
 | |
|             
 | |
|             group.bench_with_input(
 | |
|                 BenchmarkId::new(format!("{}/size", backend.name()), size),
 | |
|                 &(backend, size),
 | |
|                 |b, (backend, size)| {
 | |
|                     b.iter(|| {
 | |
|                         for i in 0..*size {
 | |
|                             if i % 10 < 7 {
 | |
|                                 // 70% reads
 | |
|                                 let key = generator.generate_key("bench:key", i % size);
 | |
|                                 backend.storage.get(&key).unwrap();
 | |
|                             } else {
 | |
|                                 // 30% writes
 | |
|                                 let key = generator.generate_key("bench:key", i);
 | |
|                                 let value = generator.generate_value(100);
 | |
|                                 backend.storage.set(key, value).unwrap();
 | |
|                             }
 | |
|                         }
 | |
|                     });
 | |
|                 }
 | |
|             );
 | |
|         }
 | |
|     }
 | |
|     
 | |
|     group.finish();
 | |
| }
 | |
| 
 | |
| criterion_group!(
 | |
|     benches,
 | |
|     bench_bulk_insert,
 | |
|     bench_bulk_read_sequential,
 | |
|     bench_bulk_read_random,
 | |
|     bench_bulk_update,
 | |
|     bench_bulk_delete,
 | |
|     bench_bulk_hash_insert,
 | |
|     bench_bulk_hash_read,
 | |
|     bench_bulk_list_insert,
 | |
|     bench_bulk_list_read,
 | |
|     bench_mixed_workload,
 | |
| );
 | |
| 
 | |
| criterion_main!(benches); |