...
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
use core::str;
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
@@ -12,34 +12,56 @@ use crate::storage::Storage;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
pub storages: Vec<Arc<Storage>>,
|
||||
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
|
||||
pub option: options::DBOption,
|
||||
pub client_name: Option<String>,
|
||||
pub selected_db: usize, // per-connection
|
||||
pub selected_db: u64, // Changed from usize to u64
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub async fn new(option: options::DBOption) -> Self {
|
||||
// Eagerly create N db files: <dir>/<index>.db
|
||||
let mut storages = Vec::with_capacity(option.databases as usize);
|
||||
for i in 0..option.databases {
|
||||
let db_file_path = PathBuf::from(option.dir.clone()).join(format!("{}.db", i));
|
||||
println!("will open db file path (db {}): {}", i, db_file_path.display());
|
||||
let storage = Storage::new(db_file_path).expect("Failed to initialize storage");
|
||||
storages.push(Arc::new(storage));
|
||||
}
|
||||
|
||||
Server {
|
||||
storages,
|
||||
db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())),
|
||||
option,
|
||||
client_name: None,
|
||||
selected_db: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn current_storage(&self) -> &Storage {
|
||||
self.storages[self.selected_db].as_ref()
|
||||
pub fn current_storage(&self) -> Result<Arc<Storage>, DBError> {
|
||||
let mut cache = self.db_cache.write().unwrap();
|
||||
|
||||
if let Some(storage) = cache.get(&self.selected_db) {
|
||||
return Ok(storage.clone());
|
||||
}
|
||||
|
||||
// Check database limit if set
|
||||
if let Some(max_db) = self.option.max_databases {
|
||||
if self.selected_db >= max_db {
|
||||
return Err(DBError(format!("DB index {} is out of range (max: {})", self.selected_db, max_db - 1)));
|
||||
}
|
||||
}
|
||||
|
||||
// Create new database file
|
||||
let db_file_path = std::path::PathBuf::from(self.option.dir.clone())
|
||||
.join(format!("{}.db", self.selected_db));
|
||||
|
||||
println!("Creating new db file: {}", db_file_path.display());
|
||||
|
||||
let storage = Arc::new(Storage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?);
|
||||
|
||||
cache.insert(self.selected_db, storage.clone());
|
||||
Ok(storage)
|
||||
}
|
||||
|
||||
fn should_encrypt_db(&self, db_index: u64) -> bool {
|
||||
// You can implement logic here to determine which databases should be encrypted
|
||||
// For now, let's say databases with even numbers are encrypted if key is provided
|
||||
self.option.encryption_key.is_some() && db_index % 2 == 0
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
@@ -104,6 +126,5 @@ impl Server {
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user