benchmarking
This commit is contained in:
		
							
								
								
									
										337
									
								
								benches/memory_profile.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										337
									
								
								benches/memory_profile.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,337 @@ | ||||
| // benches/memory_profile.rs | ||||
| use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId, BatchSize}; | ||||
| use std::alloc::{GlobalAlloc, Layout, System}; | ||||
| use std::sync::atomic::{AtomicUsize, Ordering}; | ||||
|  | ||||
| mod common; | ||||
| use common::*; | ||||
|  | ||||
| // Simple memory tracking allocator | ||||
| struct TrackingAllocator; | ||||
|  | ||||
| static ALLOCATED: AtomicUsize = AtomicUsize::new(0); | ||||
| static DEALLOCATED: AtomicUsize = AtomicUsize::new(0); | ||||
| static PEAK: AtomicUsize = AtomicUsize::new(0); | ||||
| static ALLOC_COUNT: AtomicUsize = AtomicUsize::new(0); | ||||
|  | ||||
| unsafe impl GlobalAlloc for TrackingAllocator { | ||||
|     unsafe fn alloc(&self, layout: Layout) -> *mut u8 { | ||||
|         let ret = System.alloc(layout); | ||||
|         if !ret.is_null() { | ||||
|             let size = layout.size(); | ||||
|             ALLOCATED.fetch_add(size, Ordering::SeqCst); | ||||
|             ALLOC_COUNT.fetch_add(1, Ordering::SeqCst); | ||||
|              | ||||
|             // Update peak if necessary | ||||
|             let current = ALLOCATED.load(Ordering::SeqCst) - DEALLOCATED.load(Ordering::SeqCst); | ||||
|             let mut peak = PEAK.load(Ordering::SeqCst); | ||||
|             while current > peak { | ||||
|                 match PEAK.compare_exchange_weak(peak, current, Ordering::SeqCst, Ordering::SeqCst) { | ||||
|                     Ok(_) => break, | ||||
|                     Err(x) => peak = x, | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         ret | ||||
|     } | ||||
|  | ||||
|     unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { | ||||
|         System.dealloc(ptr, layout); | ||||
|         DEALLOCATED.fetch_add(layout.size(), Ordering::SeqCst); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[global_allocator] | ||||
| static GLOBAL: TrackingAllocator = TrackingAllocator; | ||||
|  | ||||
| /// Reset memory tracking counters | ||||
| fn reset_memory_tracking() { | ||||
|     ALLOCATED.store(0, Ordering::SeqCst); | ||||
|     DEALLOCATED.store(0, Ordering::SeqCst); | ||||
|     PEAK.store(0, Ordering::SeqCst); | ||||
|     ALLOC_COUNT.store(0, Ordering::SeqCst); | ||||
| } | ||||
|  | ||||
| /// Get current memory stats | ||||
| fn get_memory_stats() -> (usize, usize, usize) { | ||||
|     let allocated = ALLOCATED.load(Ordering::SeqCst); | ||||
|     let deallocated = DEALLOCATED.load(Ordering::SeqCst); | ||||
|     let peak = PEAK.load(Ordering::SeqCst); | ||||
|     let alloc_count = ALLOC_COUNT.load(Ordering::SeqCst); | ||||
|      | ||||
|     let current = allocated.saturating_sub(deallocated); | ||||
|     (current, peak, alloc_count) | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for single SET operations | ||||
| fn profile_memory_set(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/set"); | ||||
|      | ||||
|     for backend_type in BackendType::all() { | ||||
|         group.bench_with_input( | ||||
|             BenchmarkId::new(backend_type.name(), "100bytes"), | ||||
|             &backend_type, | ||||
|             |b, &backend_type| { | ||||
|                 b.iter_batched( | ||||
|                     || { | ||||
|                         reset_memory_tracking(); | ||||
|                         let backend = BenchmarkBackend::new(backend_type).unwrap(); | ||||
|                         let mut generator = DataGenerator::new(42); | ||||
|                         let key = generator.generate_key("bench:key", 0); | ||||
|                         let value = generator.generate_value(100); | ||||
|                         (backend, key, value) | ||||
|                     }, | ||||
|                     |(backend, key, value)| { | ||||
|                         backend.storage.set(key, value).unwrap(); | ||||
|                         let (current, peak, allocs) = get_memory_stats(); | ||||
|                         println!("{}: current={}, peak={}, allocs={}",  | ||||
|                                  backend.name(), current, peak, allocs); | ||||
|                     }, | ||||
|                     BatchSize::SmallInput | ||||
|                 ); | ||||
|             } | ||||
|         ); | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for single GET operations | ||||
| fn profile_memory_get(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/get"); | ||||
|      | ||||
|     for backend_type in BackendType::all() { | ||||
|         let backend = setup_populated_backend(backend_type, 1_000, 100) | ||||
|             .expect("Failed to setup backend"); | ||||
|         let generator = DataGenerator::new(42); | ||||
|          | ||||
|         group.bench_with_input( | ||||
|             BenchmarkId::new(backend.name(), "100bytes"), | ||||
|             &backend, | ||||
|             |b, backend| { | ||||
|                 b.iter_batched( | ||||
|                     || { | ||||
|                         reset_memory_tracking(); | ||||
|                         generator.generate_key("bench:key", 0) | ||||
|                     }, | ||||
|                     |key| { | ||||
|                         backend.storage.get(&key).unwrap(); | ||||
|                         let (current, peak, allocs) = get_memory_stats(); | ||||
|                         println!("{}: current={}, peak={}, allocs={}",  | ||||
|                                  backend.name(), current, peak, allocs); | ||||
|                     }, | ||||
|                     BatchSize::SmallInput | ||||
|                 ); | ||||
|             } | ||||
|         ); | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for bulk insert operations | ||||
| fn profile_memory_bulk_insert(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/bulk_insert"); | ||||
|      | ||||
|     for size in [100, 1_000] { | ||||
|         for backend_type in BackendType::all() { | ||||
|             group.bench_with_input( | ||||
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size), | ||||
|                 &(backend_type, size), | ||||
|                 |b, &(backend_type, size)| { | ||||
|                     b.iter_batched( | ||||
|                         || { | ||||
|                             reset_memory_tracking(); | ||||
|                             let backend = BenchmarkBackend::new(backend_type).unwrap(); | ||||
|                             let mut generator = DataGenerator::new(42); | ||||
|                             let data = generator.generate_string_pairs(size, 100); | ||||
|                             (backend, data) | ||||
|                         }, | ||||
|                         |(backend, data)| { | ||||
|                             for (key, value) in data { | ||||
|                                 backend.storage.set(key, value).unwrap(); | ||||
|                             } | ||||
|                             let (current, peak, allocs) = get_memory_stats(); | ||||
|                             println!("{} (n={}): current={}, peak={}, allocs={}, bytes_per_record={}",  | ||||
|                                      backend.name(), size, current, peak, allocs, peak / size); | ||||
|                         }, | ||||
|                         BatchSize::SmallInput | ||||
|                     ); | ||||
|                 } | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for hash operations | ||||
| fn profile_memory_hash_ops(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/hash_ops"); | ||||
|      | ||||
|     for backend_type in BackendType::all() { | ||||
|         group.bench_with_input( | ||||
|             BenchmarkId::new(backend_type.name(), "hset"), | ||||
|             &backend_type, | ||||
|             |b, &backend_type| { | ||||
|                 b.iter_batched( | ||||
|                     || { | ||||
|                         reset_memory_tracking(); | ||||
|                         let backend = BenchmarkBackend::new(backend_type).unwrap(); | ||||
|                         let mut generator = DataGenerator::new(42); | ||||
|                         let key = generator.generate_key("bench:hash", 0); | ||||
|                         let fields = vec![ | ||||
|                             ("field1".to_string(), generator.generate_value(100)), | ||||
|                             ("field2".to_string(), generator.generate_value(100)), | ||||
|                             ("field3".to_string(), generator.generate_value(100)), | ||||
|                         ]; | ||||
|                         (backend, key, fields) | ||||
|                     }, | ||||
|                     |(backend, key, fields)| { | ||||
|                         backend.storage.hset(&key, fields).unwrap(); | ||||
|                         let (current, peak, allocs) = get_memory_stats(); | ||||
|                         println!("{}: current={}, peak={}, allocs={}",  | ||||
|                                  backend.name(), current, peak, allocs); | ||||
|                     }, | ||||
|                     BatchSize::SmallInput | ||||
|                 ); | ||||
|             } | ||||
|         ); | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for list operations | ||||
| fn profile_memory_list_ops(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/list_ops"); | ||||
|      | ||||
|     for backend_type in BackendType::all() { | ||||
|         group.bench_with_input( | ||||
|             BenchmarkId::new(backend_type.name(), "rpush"), | ||||
|             &backend_type, | ||||
|             |b, &backend_type| { | ||||
|                 b.iter_batched( | ||||
|                     || { | ||||
|                         reset_memory_tracking(); | ||||
|                         let backend = BenchmarkBackend::new(backend_type).unwrap(); | ||||
|                         let mut generator = DataGenerator::new(42); | ||||
|                         let key = generator.generate_key("bench:list", 0); | ||||
|                         let elements = vec![ | ||||
|                             generator.generate_value(100), | ||||
|                             generator.generate_value(100), | ||||
|                             generator.generate_value(100), | ||||
|                         ]; | ||||
|                         (backend, key, elements) | ||||
|                     }, | ||||
|                     |(backend, key, elements)| { | ||||
|                         backend.storage.rpush(&key, elements).unwrap(); | ||||
|                         let (current, peak, allocs) = get_memory_stats(); | ||||
|                         println!("{}: current={}, peak={}, allocs={}",  | ||||
|                                  backend.name(), current, peak, allocs); | ||||
|                     }, | ||||
|                     BatchSize::SmallInput | ||||
|                 ); | ||||
|             } | ||||
|         ); | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory usage for scan operations | ||||
| fn profile_memory_scan(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/scan"); | ||||
|      | ||||
|     for size in [1_000, 10_000] { | ||||
|         for backend_type in BackendType::all() { | ||||
|             let backend = setup_populated_backend(backend_type, size, 100) | ||||
|                 .expect("Failed to setup backend"); | ||||
|              | ||||
|             group.bench_with_input( | ||||
|                 BenchmarkId::new(format!("{}/size", backend.name()), size), | ||||
|                 &backend, | ||||
|                 |b, backend| { | ||||
|                     b.iter(|| { | ||||
|                         reset_memory_tracking(); | ||||
|                         let mut cursor = 0u64; | ||||
|                         let mut total = 0; | ||||
|                         loop { | ||||
|                             let (next_cursor, items) = backend.storage | ||||
|                                 .scan(cursor, None, Some(100)) | ||||
|                                 .unwrap(); | ||||
|                             total += items.len(); | ||||
|                             if next_cursor == 0 { | ||||
|                                 break; | ||||
|                             } | ||||
|                             cursor = next_cursor; | ||||
|                         } | ||||
|                         let (current, peak, allocs) = get_memory_stats(); | ||||
|                         println!("{} (n={}): scanned={}, current={}, peak={}, allocs={}",  | ||||
|                                  backend.name(), size, total, current, peak, allocs); | ||||
|                         total | ||||
|                     }); | ||||
|                 } | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| /// Profile memory efficiency (bytes per record stored) | ||||
| fn profile_memory_efficiency(c: &mut Criterion) { | ||||
|     let mut group = c.benchmark_group("memory_profile/efficiency"); | ||||
|      | ||||
|     for size in [1_000, 10_000] { | ||||
|         for backend_type in BackendType::all() { | ||||
|             group.bench_with_input( | ||||
|                 BenchmarkId::new(format!("{}/size", backend_type.name()), size), | ||||
|                 &(backend_type, size), | ||||
|                 |b, &(backend_type, size)| { | ||||
|                     b.iter_batched( | ||||
|                         || { | ||||
|                             reset_memory_tracking(); | ||||
|                             let backend = BenchmarkBackend::new(backend_type).unwrap(); | ||||
|                             let mut generator = DataGenerator::new(42); | ||||
|                             let data = generator.generate_string_pairs(size, 100); | ||||
|                             (backend, data) | ||||
|                         }, | ||||
|                         |(backend, data)| { | ||||
|                             let data_size: usize = data.iter() | ||||
|                                 .map(|(k, v)| k.len() + v.len()) | ||||
|                                 .sum(); | ||||
|                              | ||||
|                             for (key, value) in data { | ||||
|                                 backend.storage.set(key, value).unwrap(); | ||||
|                             } | ||||
|                              | ||||
|                             let (current, peak, allocs) = get_memory_stats(); | ||||
|                             let overhead_pct = ((peak as f64 - data_size as f64) / data_size as f64) * 100.0; | ||||
|                              | ||||
|                             println!("{} (n={}): data_size={}, peak={}, overhead={:.1}%, bytes_per_record={}, allocs={}",  | ||||
|                                      backend.name(), size, data_size, peak, overhead_pct,  | ||||
|                                      peak / size, allocs); | ||||
|                         }, | ||||
|                         BatchSize::SmallInput | ||||
|                     ); | ||||
|                 } | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     group.finish(); | ||||
| } | ||||
|  | ||||
| criterion_group!( | ||||
|     benches, | ||||
|     profile_memory_set, | ||||
|     profile_memory_get, | ||||
|     profile_memory_bulk_insert, | ||||
|     profile_memory_hash_ops, | ||||
|     profile_memory_list_ops, | ||||
|     profile_memory_scan, | ||||
|     profile_memory_efficiency, | ||||
| ); | ||||
|  | ||||
| criterion_main!(benches); | ||||
		Reference in New Issue
	
	Block a user