3 Commits

Author SHA1 Message Date
ee163bb6bf ... 2025-08-16 15:16:15 +02:00
84611dd245 ... 2025-08-16 15:10:55 +02:00
200d0c928d ... 2025-08-16 14:22:56 +02:00
52 changed files with 1057 additions and 3420 deletions

73
Cargo.lock generated
View File

@@ -221,12 +221,6 @@ dependencies = [
"generic-array",
]
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.10.1"
@@ -661,28 +655,33 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "herocrypto"
version = "0.1.0"
dependencies = [
"libcrypto",
"libcryptoa",
"redis",
"thiserror",
]
[[package]]
name = "herodb"
version = "0.0.1"
version = "0.1.0"
dependencies = [
"age",
"anyhow",
"base64 0.22.1",
"bincode",
"byteorder",
"bytes",
"chacha20poly1305",
"clap",
"ed25519-dalek",
"futures",
"libcryptoa",
"libdbstorage",
"log",
"rand",
"redb",
"redis",
"secrecy",
"serde",
"serde_json",
"sha2",
"thiserror",
"tokio",
]
@@ -949,6 +948,41 @@ version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libcrypto"
version = "0.1.0"
dependencies = [
"chacha20poly1305",
"rand",
"sha2",
"thiserror",
]
[[package]]
name = "libcryptoa"
version = "0.1.0"
dependencies = [
"age",
"base64 0.22.1",
"ed25519-dalek",
"rand",
"secrecy",
"thiserror",
]
[[package]]
name = "libdbstorage"
version = "0.1.0"
dependencies = [
"bincode",
"libcrypto",
"redb",
"serde",
"serde_json",
"thiserror",
"tokio",
]
[[package]]
name = "litemap"
version = "0.8.0"
@@ -1530,6 +1564,15 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
name = "supervisor"
version = "0.1.0"
[[package]]
name = "supervisorrpc"
version = "0.1.0"
dependencies = [
"herocrypto",
"redis",
"tokio",
]
[[package]]
name = "syn"
version = "1.0.109"

View File

@@ -1,12 +1,38 @@
[workspace]
members = [
"herodb",
"supervisor",
]
resolver = "2"
members = [
"crates/herodb",
"crates/libdbstorage",
"crates/libcrypto",
"crates/libcryptoa",
"crates/herocrypto",
"crates/supervisor",
"crates/supervisorrpc",
]
# You can define shared profiles for all workspace members here
[profile.release]
lto = true
codegen-units = 1
strip = true
[workspace.dependencies]
# Common
anyhow = "1.0"
tokio = { version = "1", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror = "1.0"
log = "0.4"
bytes = "1.3"
# Crypto - Asymmetric
age = "0.10"
secrecy = "0.8"
ed25519-dalek = "2"
base64 = "0.22"
# Crypto - Symmetric & Utilities
chacha20poly1305 = "0.10"
rand = "0.8"
sha2 = "0.10"
# Database
redb = "2.1"
# CLI
clap = { version = "4.5", features = ["derive"] }

View File

@@ -0,0 +1,10 @@
[package]
name = "herocrypto"
version = "0.1.0"
edition = "2021"
[dependencies]
redis = { version = "0.24", features = ["tokio-comp"] }
thiserror = { workspace = true }
libcrypto = { path = "../libcrypto" }
libcryptoa = { path = "../libcryptoa" }

View File

@@ -0,0 +1,45 @@
// In crates/herocrypto/src/lib.rs
use redis::{Commands, RedisResult};
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error("Redis connection error: {0}")]
Redis(#[from] redis::RedisError),
#[error("Asymmetric crypto error: {0}")]
Asymmetric(#[from] libcryptoa::AsymmetricCryptoError),
#[error("Key not found in database: {0}")]
KeyNotFound(String),
#[error("Command failed on server: {0}")]
CommandError(String),
}
pub struct HeroCrypto {
// e.g., using a connection manager from redis-rs
client: redis::Client,
}
impl HeroCrypto {
pub fn new(redis_url: &str) -> Result<Self, Error> {
Ok(Self { client: redis::Client::open(redis_url)? })
}
// --- High-level functions to be implemented ---
/// Generates a new keypair and stores it in HeroDB under the given name.
pub async fn generate_keypair(&self, name: &str) -> Result<(), Error> {
let mut con = self.client.get_async_connection().await?;
let (_pub, _priv): (String, String) = redis::cmd("AGE")
.arg("KEYGEN")
.arg(name)
.query_async(&mut con)
.await?;
Ok(())
}
/// Encrypts a message using a key stored in HeroDB.
pub async fn encrypt_by_name(&self, key_name: &str, plaintext: &str) -> Result<String, Error> {
// Implementation will call 'AGE ENCRYPTNAME ...'
unimplemented!()
}
}

31
crates/herodb/Cargo.toml Normal file
View File

@@ -0,0 +1,31 @@
[package]
name = "herodb"
version = "0.1.0"
edition = "2021"
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
[[bin]]
name = "herodb"
path = "src/main.rs"
[dependencies]
# Workspace dependencies
anyhow = { workspace = true }
tokio = { workspace = true }
serde = { workspace = true }
log = { workspace = true }
clap = { workspace = true }
bytes = { workspace = true }
base64 = { workspace = true }
age = { workspace = true }
secrecy = { workspace = true }
ed25519-dalek = { workspace = true }
rand = { workspace = true }
# Local Crate Dependencies
libdbstorage = { path = "../libdbstorage" }
# We will create these libraries in the next steps
libcryptoa = { path = "../libcryptoa" }
[dev-dependencies]
redis = { version = "0.24", features = ["aio", "tokio-comp"] }

267
crates/herodb/README.md Normal file
View File

@@ -0,0 +1,267 @@
# Build Your Own Redis in Rust
This project is to build a toy Redis-Server clone that's capable of parsing Redis protocol and handling basic Redis commands, parsing and initializing Redis from RDB file,
supporting leader-follower replication, redis streams (queue), redis batch commands in transaction.
You can find all the source code and commit history in [my github repo](https://github.com/fangpin/redis-rs).
## Main features
+ Parse Redis protocol
+ Handle basic Redis commands
+ Parse and initialize Redis from RDB file
+ Leader-follower Replication
## Prerequisites
install `redis-cli` first (an implementation of redis client for test purpose)
```sh
cargo install mini-redis
```
Learn about:
- [Redis protocoal](https://redis.io/docs/latest/develop/reference/protocol-spec)
- [RDB file format](https://rdb.fnordig.de/file_format.html)
- [Redis replication](https://redis.io/docs/management/replication/)
## Start the Redis-rs server
```sh
# start as master
cargo run -- --dir /some/db/path --dbfilename dump.rdb
# start as slave
cargo run -- --dir /some/db/path --dbfilename dump.rdb --port 6380 --replicaof "localhost 6379"
```
## Supported Commands
```sh
# basic commands
redis-cli PING
redis-cli ECHO hey
redis-cli SET foo bar
redis-cli SET foo bar px/ex 100
redis-cli GET foo
redis-cli SET foo 2
redis-cli INCR foo
redis-cli INCR missing_key
redis-cli TYPE some_key
redis-cli KEYS "*"
# leader-follower replication related commands
redis-cli CONFIG GET dbfilename
redis-cli INFO replication
# streams related commands
redis-cli XADD stream_key 1526919030474-0 temperature 36 humidity 95
redis-cli XADD stream_key 1526919030474-* temperature 37 humidity 94
redis-cli XADD stream_key "*" foo bar
## read stream
redis-cli XRANGE stream_key 0-2 0-3
## query with + -
redis-cli XRANGE some_key - 1526985054079
## query single stream using xread
redis-cli XREAD streams some_key 1526985054069-0
## query multiple stream using xread
redis-cli XREAD streams stream_key other_stream_key 0-0 0-1
## blocking reads without timeout
redis-cli XREAD block 0 streams some_key 1526985054069-0
# transactions related commands
## start a transaction and exec all queued commands in a transaction
redis-cli
> MULTI
> set foo 1
> incr foo
> exec
## start a transaction and queued commands and cancel transaction then
redis-cli
> MULTI
> set foo 1
> incr foo
> discard
```
## RDB Persistence
Get Redis-rs server config
```sh
redis-cli CONFIG GET dbfilename
```
### RDB file format overview
Here are the different sections of the [RDB file](https://rdb.fnordig.de/file_format.html), in order:
+ Header section
+ Metadata section
+ Database section
+ End of file section
#### Header section
start with some magic number
```sh
52 45 44 49 53 30 30 31 31 // Magic string + version number (ASCII): "REDIS0011".
```
#### Metadata section
contains zero or more "metadata subsections", which each specify a single metadata attribute
e.g.
```sh
FA // Indicates the start of a metadata subsection.
09 72 65 64 69 73 2D 76 65 72 // The name of the metadata attribute (string encoded): "redis-ver".
06 36 2E 30 2E 31 36 // The value of the metadata attribute (string encoded): "6.0.16".
```
#### Database section
contains zero or more "database subsections," which each describe a single database.
e.g.
```sh
FE // Indicates the start of a database subsection.
00 /* The index of the database (size encoded). Here, the index is 0. */
FB // Indicates that hash table size information follows.
03 /* The size of the hash table that stores the keys and values (size encoded). Here, the total key-value hash table size is 3. */
02 /* The size of the hash table that stores the expires of the keys (size encoded). Here, the number of keys with an expiry is 2. */
```
```sh
00 /* The 1-byte flag that specifies the values type and encoding. Here, the flag is 0, which means "string." */
06 66 6F 6F 62 61 72 // The name of the key (string encoded). Here, it's "foobar".
06 62 61 7A 71 75 78 // The value (string encoded). Here, it's "bazqux".
```
```sh
FC /* Indicates that this key ("foo") has an expire, and that the expire timestamp is expressed in milliseconds. */
15 72 E7 07 8F 01 00 00 /* The expire timestamp, expressed in Unix time, stored as an 8-byte unsigned long, in little-endian (read right-to-left). Here, the expire timestamp is 1713824559637. */
00 // Value type is string.
03 66 6F 6F // Key name is "foo".
03 62 61 72 // Value is "bar".
```
```sh
FD /* Indicates that this key ("baz") has an expire, and that the expire timestamp is expressed in seconds. */
52 ED 2A 66 /* The expire timestamp, expressed in Unix time, stored as an 4-byte unsigned integer, in little-endian (read right-to-left). Here, the expire timestamp is 1714089298. */
00 // Value type is string.
03 62 61 7A // Key name is "baz".
03 71 75 78 // Value is "qux".
```
In summary,
- Optional expire information (one of the following):
- Timestamp in seconds:
- FD
- Expire timestamp in seconds (4-byte unsigned integer)
- Timestamp in milliseconds:
- FC
- Expire timestamp in milliseconds (8-byte unsigned long)
- Value type (1-byte flag)
- Key (string encoded)
- Value (encoding depends on value type)
#### End of file section
```sh
FF /* Indicates that the file is ending, and that the checksum follows. */
89 3b b7 4e f8 0f 77 19 // An 8-byte CRC64 checksum of the entire file.
```
#### Size encoding
```sh
/* If the first two bits are 0b00:
The size is the remaining 6 bits of the byte.
In this example, the size is 10: */
0A
00001010
/* If the first two bits are 0b01:
The size is the next 14 bits
(remaining 6 bits in the first byte, combined with the next byte),
in big-endian (read left-to-right).
In this example, the size is 700: */
42 BC
01000010 10111100
/* If the first two bits are 0b10:
Ignore the remaining 6 bits of the first byte.
The size is the next 4 bytes, in big-endian (read left-to-right).
In this example, the size is 17000: */
80 00 00 42 68
10000000 00000000 00000000 01000010 01101000
/* If the first two bits are 0b11:
The remaining 6 bits specify a type of string encoding.
See string encoding section. */
```
#### String encoding
+ The size of the string (size encoded).
+ The string.
```sh
/* The 0x0D size specifies that the string is 13 characters long. The remaining characters spell out "Hello, World!". */
0D 48 65 6C 6C 6F 2C 20 57 6F 72 6C 64 21
```
For sizes that begin with 0b11, the remaining 6 bits indicate a type of string format:
```sh
/* The 0xC0 size indicates the string is an 8-bit integer. In this example, the string is "123". */
C0 7B
/* The 0xC1 size indicates the string is a 16-bit integer. The remaining bytes are in little-endian (read right-to-left). In this example, the string is "12345". */
C1 39 30
/* The 0xC2 size indicates the string is a 32-bit integer. The remaining bytes are in little-endian (read right-to-left), In this example, the string is "1234567". */
C2 87 D6 12 00
/* The 0xC3 size indicates that the string is compressed with the LZF algorithm. You will not encounter LZF-compressed strings in this challenge. */
C3 ...
```
## Replication
Redis server [leader-follower replication](https://redis.io/docs/management/replication/).
Run multiple Redis servers with one acting as the "master" and the others as "replicas". Changes made to the master will be automatically replicated to replicas.
### Send Handshake (follower -> master)
1. When the follower starts, it will send a PING command to the master as RESP Array.
2. Then 2 REPLCONF (replication config) commands are sent to master from follower to communicate the port and the sync protocol. One is *REPLCONF listening-port <PORT>* and the other is *REPLCONF capa psync2*. psnync2 is an example sync protocol supported in this project.
3. The follower sends the *PSYNC* command to master with replication id and offset to start the replication process.
### Receive Handshake (master -> follower)
1. Response a PONG message to follower.
2. Response an OK message to follower for both REPLCONF commands.
3. Response a *+FULLRESYNC <REPL_ID> 0\r\n* to follower with the replication id and offset.
### RDB file transfer
When the follower starts, it sends a *PSYNC ? -1* command to tell master that it doesn't have any data yet, and needs a full resync.
Then the master send a *FULLRESYNC* response to the follower as an acknowledgement.
Finally, the master send the RDB file to represent its current state to the follower. The follower should load the RDB file received to the memory, replacing its current state.
### Receive write commands (master -> follower)
The master sends following write commands to the follower with the offset info.
The sending is to reuse the same TCP connection of handshake and RDB file transfer.
As the all the commands are encoded as RESP Array just like a normal client command, so the follower could reuse the same logic to handler the replicate commands from master. The only difference is the commands are coming from the master and no need response back.
## Streams
A stream is identified by a key, and it contains multiple entries.
Each entry consists of one or more key-value pairs, and is assigned a unique ID.
[More about redis streams](https://redis.io/docs/latest/develop/data-types/streams/)
[Radix tree](https://en.wikipedia.org/wiki/Radix_tree)
It looks like a list of key-value pairs.
```sh
entries:
- id: 1526985054069-0 # (ID of the first entry)
temperature: 36 # (A key value pair in the first entry)
humidity: 95 # (Another key value pair in the first entry)
- id: 1526985054079-0 # (ID of the second entry)
temperature: 37 # (A key value pair in the first entry)
humidity: 94 # (Another key value pair in the first entry)
# ... (and so on)
```
Examples of Redis stream use cases include:
- Event sourcing (e.g., tracking user actions, clicks, etc.)
- Sensor monitoring (e.g., readings from devices in the field)
- Notifications (e.g., storing a record of each user's notifications in a separate stream)
## Transaction
When *MULTI* command is called in a connection, redis just queued all following commands until *EXEC* or *DISCARD* command is called.
*EXEC* command will execute all queued commands and return an array representation of all execution result (including), instead the *DISCARD* command just clear all queued commands.
The transactions among each client connection are independent.

View File

@@ -10,146 +10,164 @@
// age:signpriv:{name} -> Ed25519 signing secret key (private, used to sign)
// - Base64 wrapping for ciphertext/signature binary blobs.
use std::str::FromStr;
use secrecy::ExposeSecret;
use age::{Decryptor, Encryptor};
use age::x25519;
use ed25519_dalek::{Signature, Signer, Verifier, SigningKey, VerifyingKey};
use base64::{engine::general_purpose::STANDARD as B64, Engine as _};
use crate::protocol::Protocol;
use crate::server::Server;
use crate::error::DBError;
use libdbstorage::DBError;
use libcryptoa::AsymmetricCryptoError;
// ---------- Internal helpers ----------
// ---------- Storage helpers ----------
#[derive(Debug)]
pub enum AgeWireError {
ParseKey,
Crypto(String),
Utf8,
SignatureLen,
NotFound(&'static str), // which kind of key was missing
Storage(String),
fn sget(server: &Server, key: &str) -> Result<Option<String>, DBError> {
let st = server.current_storage()?;
st.get(key)
}
fn sset(server: &Server, key: &str, val: &str) -> Result<(), DBError> {
let st = server.current_storage()?;
st.set(key.to_string(), val.to_string())
}
impl AgeWireError {
fn to_protocol(self) -> Protocol {
match self {
AgeWireError::ParseKey => Protocol::err("ERR age: invalid key"),
AgeWireError::Crypto(e) => Protocol::err(&format!("ERR age: {e}")),
AgeWireError::Utf8 => Protocol::err("ERR age: invalid UTF-8 plaintext"),
AgeWireError::SignatureLen => Protocol::err("ERR age: bad signature length"),
AgeWireError::NotFound(w) => Protocol::err(&format!("ERR age: missing {w}")),
AgeWireError::Storage(e) => Protocol::err(&format!("ERR storage: {e}")),
}
fn enc_pub_key_key(name: &str) -> String { format!("age:key:{name}") }
fn enc_priv_key_key(name: &str) -> String { format!("age:privkey:{name}") }
fn sign_pub_key_key(name: &str) -> String { format!("age:signpub:{name}") }
fn sign_priv_key_key(name: &str) -> String { format!("age:signpriv:{name}") }
// ---------- Command handlers (RESP Protocol) ----------
// Basic (stateless) ones kept for completeness
pub async fn cmd_age_genenc() -> Protocol {
let (recip, ident) = libcryptoa::gen_enc_keypair();
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
}
pub async fn cmd_age_gensign() -> Protocol {
let (verify, secret) = libcryptoa::gen_sign_keypair();
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
}
pub async fn cmd_age_encrypt(recipient: &str, message: &str) -> Protocol {
match libcryptoa::encrypt_b64(recipient, message) {
Ok(b64) => Protocol::BulkString(b64),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
}
fn parse_recipient(s: &str) -> Result<x25519::Recipient, AgeWireError> {
x25519::Recipient::from_str(s).map_err(|_| AgeWireError::ParseKey)
}
fn parse_identity(s: &str) -> Result<x25519::Identity, AgeWireError> {
x25519::Identity::from_str(s).map_err(|_| AgeWireError::ParseKey)
}
fn parse_ed25519_signing_key(s: &str) -> Result<SigningKey, AgeWireError> {
// Parse base64-encoded signing key
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
if bytes.len() != 32 {
return Err(AgeWireError::ParseKey);
pub async fn cmd_age_decrypt(identity: &str, ct_b64: &str) -> Protocol {
match libcryptoa::decrypt_b64(identity, ct_b64) {
Ok(pt) => Protocol::BulkString(pt),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
Ok(SigningKey::from_bytes(&key_bytes))
}
fn parse_ed25519_verifying_key(s: &str) -> Result<VerifyingKey, AgeWireError> {
// Parse base64-encoded verifying key
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
if bytes.len() != 32 {
return Err(AgeWireError::ParseKey);
pub async fn cmd_age_sign(secret: &str, message: &str) -> Protocol {
match libcryptoa::sign_b64(secret, message) {
Ok(b64sig) => Protocol::BulkString(b64sig),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
VerifyingKey::from_bytes(&key_bytes).map_err(|_| AgeWireError::ParseKey)
}
// ---------- Stateless crypto helpers (string in/out) ----------
pub fn gen_enc_keypair() -> (String, String) {
let id = x25519::Identity::generate();
let pk = id.to_public();
(pk.to_string(), id.to_string().expose_secret().to_string()) // (recipient, identity)
}
pub fn gen_sign_keypair() -> (String, String) {
use rand::RngCore;
use rand::rngs::OsRng;
// Generate random 32 bytes for the signing key
let mut secret_bytes = [0u8; 32];
OsRng.fill_bytes(&mut secret_bytes);
let signing_key = SigningKey::from_bytes(&secret_bytes);
let verifying_key = signing_key.verifying_key();
// Encode as base64 for storage
let signing_key_b64 = B64.encode(signing_key.to_bytes());
let verifying_key_b64 = B64.encode(verifying_key.to_bytes());
(verifying_key_b64, signing_key_b64) // (verify_pub, signing_secret)
}
/// Encrypt `msg` for `recipient_str` (X25519). Returns base64(ciphertext).
pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result<String, AgeWireError> {
let recipient = parse_recipient(recipient_str)?;
let enc = Encryptor::with_recipients(vec![Box::new(recipient)])
.expect("failed to create encryptor"); // Handle Option<Encryptor>
let mut out = Vec::new();
{
use std::io::Write;
let mut w = enc.wrap_output(&mut out).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
w.write_all(msg.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
w.finish().map_err(|e| AgeWireError::Crypto(e.to_string()))?;
pub async fn cmd_age_verify(verify_pub: &str, message: &str, sig_b64: &str) -> Protocol {
match libcryptoa::verify_b64(verify_pub, message, sig_b64) {
Ok(true) => Protocol::SimpleString("1".to_string()),
Ok(false) => Protocol::SimpleString("0".to_string()),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
Ok(B64.encode(out))
}
/// Decrypt base64(ciphertext) with `identity_str`. Returns plaintext String.
pub fn decrypt_b64(identity_str: &str, ct_b64: &str) -> Result<String, AgeWireError> {
let id = parse_identity(identity_str)?;
let ct = B64.decode(ct_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
let dec = Decryptor::new(&ct[..]).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
// The decrypt method returns a Result<StreamReader, DecryptError>
let mut r = match dec {
Decryptor::Recipients(d) => d.decrypt(std::iter::once(&id as &dyn age::Identity))
.map_err(|e| AgeWireError::Crypto(e.to_string()))?,
Decryptor::Passphrase(_) => return Err(AgeWireError::Crypto("Expected recipients, got passphrase".to_string())),
// ---------- NEW: Persistent, named-key commands ----------
pub async fn cmd_age_keygen(server: &Server, name: &str) -> Protocol {
let (recip, ident) = libcryptoa::gen_enc_keypair();
if let Err(e) = sset(server, &enc_pub_key_key(name), &recip) { return Protocol::err(&e.0); }
if let Err(e) = sset(server, &enc_priv_key_key(name), &ident) { return Protocol::err(&e.0); }
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
}
pub async fn cmd_age_signkeygen(server: &Server, name: &str) -> Protocol {
let (verify, secret) = libcryptoa::gen_sign_keypair();
if let Err(e) = sset(server, &sign_pub_key_key(name), &verify) { return Protocol::err(&e.0); }
if let Err(e) = sset(server, &sign_priv_key_key(name), &secret) { return Protocol::err(&e.0); }
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
}
pub async fn cmd_age_encrypt_name(server: &Server, name: &str, message: &str) -> Protocol {
let recip = match sget(server, &enc_pub_key_key(name)) {
Ok(Some(v)) => v,
Ok(None) => return Protocol::err(&format!("ERR age: missing recipient (age:key:{name})")),
Err(e) => return Protocol::err(&e.0),
};
let mut pt = Vec::new();
use std::io::Read;
r.read_to_end(&mut pt).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
String::from_utf8(pt).map_err(|_| AgeWireError::Utf8)
}
/// Sign bytes of `msg` (detached). Returns base64(signature bytes, 64 bytes).
pub fn sign_b64(signing_secret_str: &str, msg: &str) -> Result<String, AgeWireError> {
let signing_key = parse_ed25519_signing_key(signing_secret_str)?;
let sig = signing_key.sign(msg.as_bytes());
Ok(B64.encode(sig.to_bytes()))
}
/// Verify detached signature (base64) for `msg` with pubkey.
pub fn verify_b64(verify_pub_str: &str, msg: &str, sig_b64: &str) -> Result<bool, AgeWireError> {
let verifying_key = parse_ed25519_verifying_key(verify_pub_str)?;
let sig_bytes = B64.decode(sig_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
if sig_bytes.len() != 64 {
return Err(AgeWireError::SignatureLen);
match libcryptoa::encrypt_b64(&recip, message) {
Ok(ct) => Protocol::BulkString(ct),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
let sig = Signature::from_bytes(sig_bytes[..].try_into().unwrap());
Ok(verifying_key.verify(msg.as_bytes(), &sig).is_ok())
}
pub async fn cmd_age_decrypt_name(server: &Server, name: &str, ct_b64: &str) -> Protocol {
let ident = match sget(server, &enc_priv_key_key(name)) {
Ok(Some(v)) => v,
Ok(None) => return Protocol::err(&format!("ERR age: missing identity (age:privkey:{name})")),
Err(e) => return Protocol::err(&e.0),
};
match libcryptoa::decrypt_b64(&ident, ct_b64) {
Ok(pt) => Protocol::BulkString(pt),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
}
pub async fn cmd_age_sign_name(server: &Server, name: &str, message: &str) -> Protocol {
let sec = match sget(server, &sign_priv_key_key(name)) {
Ok(Some(v)) => v,
Ok(None) => return Protocol::err(&format!("ERR age: missing signing secret (age:signpriv:{name})")),
Err(e) => return Protocol::err(&e.0),
};
match libcryptoa::sign_b64(&sec, message) {
Ok(sig) => Protocol::BulkString(sig),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
}
pub async fn cmd_age_verify_name(server: &Server, name: &str, message: &str, sig_b64: &str) -> Protocol {
let pubk = match sget(server, &sign_pub_key_key(name)) {
Ok(Some(v)) => v,
Ok(None) => return Protocol::err(&format!("ERR age: missing verify pubkey (age:signpub:{name})")),
Err(e) => return Protocol::err(&e.0),
};
match libcryptoa::verify_b64(&pubk, message, sig_b64) {
Ok(true) => Protocol::SimpleString("1".to_string()),
Ok(false) => Protocol::SimpleString("0".to_string()),
Err(e) => Protocol::err(&format!("ERR age: {e}")),
}
}
pub async fn cmd_age_list(server: &Server) -> Protocol {
// Returns 4 arrays: ["encpub", <names...>], ["encpriv", ...], ["signpub", ...], ["signpriv", ...]
let st = match server.current_storage() { Ok(s) => s, Err(e) => return Protocol::err(&e.0) };
let pull = |pat: &str, prefix: &str| -> Result<Vec<String>, DBError> {
let keys = st.keys(pat)?;
let mut names: Vec<String> = keys.into_iter()
.filter_map(|k| k.strip_prefix(prefix).map(|x| x.to_string()))
.collect();
names.sort();
Ok(names)
};
let encpub = match pull("age:key:*", "age:key:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
let encpriv = match pull("age:privkey:*", "age:privkey:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
let signpub = match pull("age:signpub:*", "age:signpub:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
let signpriv= match pull("age:signpriv:*", "age:signpriv:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
let to_arr = |label: &str, v: Vec<String>| {
let mut out = vec![Protocol::BulkString(label.to_string())];
out.push(Protocol::Array(v.into_iter().map(Protocol::BulkString).collect()));
Protocol::Array(out)
};
Protocol::Array(vec![
to_arr("encpub", encpub),
to_arr("encpriv", encpriv),
to_arr("signpub", signpub),
to_arr("signpriv", signpriv),
])
}
// ---------- Storage helpers ----------

View File

@@ -1,7 +1,8 @@
use crate::{error::DBError, protocol::Protocol, server::Server};
use crate::protocol::Protocol;
use crate::server::Server;
use libdbstorage::DBError;
use libcryptoa;
use serde::Serialize;
use tokio::time::{timeout, Duration};
use futures::future::select_all;
#[derive(Debug, Clone)]
pub enum Cmd {
@@ -12,12 +13,7 @@ pub enum Cmd {
Set(String, String),
SetPx(String, String, u128),
SetEx(String, String, u128),
// Advanced SET with options: (key, value, ex_ms, nx, xx, get)
SetOpts(String, String, Option<u128>, bool, bool, bool),
MGet(Vec<String>),
MSet(Vec<(String, String)>),
Keys,
DbSize,
ConfigGet(String),
Info(Option<String>),
Del(String),
@@ -37,31 +33,19 @@ pub enum Cmd {
HLen(String),
HMGet(String, Vec<String>),
HSetNx(String, String, String),
HIncrBy(String, String, i64),
HIncrByFloat(String, String, f64),
HScan(String, u64, Option<String>, Option<u64>), // key, cursor, pattern, count
Scan(u64, Option<String>, Option<u64>), // cursor, pattern, count
Ttl(String),
Expire(String, i64),
PExpire(String, i64),
ExpireAt(String, i64),
PExpireAt(String, i64),
Persist(String),
Exists(String),
ExistsMulti(Vec<String>),
DelMulti(Vec<String>),
Quit,
Client(Vec<String>),
ClientSetName(String),
ClientGetName,
Command(Vec<String>),
// List commands
LPush(String, Vec<String>),
RPush(String, Vec<String>),
LPop(String, Option<u64>),
RPop(String, Option<u64>),
BLPop(Vec<String>, f64),
BRPop(Vec<String>, f64),
LLen(String),
LRem(String, i64, String),
LTrim(String, i64, i64),
@@ -109,51 +93,14 @@ impl Cmd {
"ping" => Cmd::Ping,
"get" => Cmd::Get(cmd[1].clone()),
"set" => {
if cmd.len() < 3 {
return Err(DBError("wrong number of arguments for SET".to_string()));
}
let key = cmd[1].clone();
let val = cmd[2].clone();
// Parse optional flags: EX sec | PX ms | NX | XX | GET
let mut ex_ms: Option<u128> = None;
let mut nx = false;
let mut xx = false;
let mut getflag = false;
let mut i = 3;
while i < cmd.len() {
match cmd[i].to_lowercase().as_str() {
"ex" => {
if i + 1 >= cmd.len() {
return Err(DBError("ERR syntax error".to_string()));
}
let secs: u128 = cmd[i + 1].parse().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
ex_ms = Some(secs * 1000);
i += 2;
}
"px" => {
if i + 1 >= cmd.len() {
return Err(DBError("ERR syntax error".to_string()));
}
let ms: u128 = cmd[i + 1].parse().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
ex_ms = Some(ms);
i += 2;
}
"nx" => { nx = true; i += 1; }
"xx" => { xx = true; i += 1; }
"get" => { getflag = true; i += 1; }
_ => {
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
}
}
}
// If no options, keep legacy behavior
if ex_ms.is_none() && !nx && !xx && !getflag {
Cmd::Set(key, val)
if cmd.len() == 5 && cmd[3].to_lowercase() == "px" {
Cmd::SetPx(cmd[1].clone(), cmd[2].clone(), cmd[4].parse().unwrap())
} else if cmd.len() == 5 && cmd[3].to_lowercase() == "ex" {
Cmd::SetEx(cmd[1].clone(), cmd[2].clone(), cmd[4].parse().unwrap())
} else if cmd.len() == 3 {
Cmd::Set(cmd[1].clone(), cmd[2].clone())
} else {
Cmd::SetOpts(key, val, ex_ms, nx, xx, getflag)
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
}
}
"setex" => {
@@ -162,24 +109,6 @@ impl Cmd {
}
Cmd::SetEx(cmd[1].clone(), cmd[3].clone(), cmd[2].parse().unwrap())
}
"mget" => {
if cmd.len() < 2 {
return Err(DBError("wrong number of arguments for MGET command".to_string()));
}
Cmd::MGet(cmd[1..].to_vec())
}
"mset" => {
if cmd.len() < 3 || ((cmd.len() - 1) % 2 != 0) {
return Err(DBError("wrong number of arguments for MSET command".to_string()));
}
let mut pairs = Vec::new();
let mut i = 1;
while i + 1 < cmd.len() {
pairs.push((cmd[i].clone(), cmd[i + 1].clone()));
i += 2;
}
Cmd::MSet(pairs)
}
"config" => {
if cmd.len() != 3 || cmd[1].to_lowercase() != "get" {
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
@@ -194,12 +123,6 @@ impl Cmd {
Cmd::Keys
}
}
"dbsize" => {
if cmd.len() != 1 {
return Err(DBError(format!("wrong number of arguments for DBSIZE command")));
}
Cmd::DbSize
}
"info" => {
let section = if cmd.len() == 2 {
Some(cmd[1].clone())
@@ -209,14 +132,10 @@ impl Cmd {
Cmd::Info(section)
}
"del" => {
if cmd.len() < 2 {
return Err(DBError(format!("wrong number of arguments for DEL command")));
}
if cmd.len() == 2 {
Cmd::Del(cmd[1].clone())
} else {
Cmd::DelMulti(cmd[1..].to_vec())
if cmd.len() != 2 {
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
}
Cmd::Del(cmd[1].clone())
}
"type" => {
if cmd.len() != 2 {
@@ -310,20 +229,6 @@ impl Cmd {
}
Cmd::HSetNx(cmd[1].clone(), cmd[2].clone(), cmd[3].clone())
}
"hincrby" => {
if cmd.len() != 4 {
return Err(DBError(format!("wrong number of arguments for HINCRBY command")));
}
let delta = cmd[3].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
Cmd::HIncrBy(cmd[1].clone(), cmd[2].clone(), delta)
}
"hincrbyfloat" => {
if cmd.len() != 4 {
return Err(DBError(format!("wrong number of arguments for HINCRBYFLOAT command")));
}
let delta = cmd[3].parse::<f64>().map_err(|_| DBError("ERR value is not a valid float".to_string()))?;
Cmd::HIncrByFloat(cmd[1].clone(), cmd[2].clone(), delta)
}
"hscan" => {
if cmd.len() < 3 {
return Err(DBError(format!("wrong number of arguments for HSCAN command")));
@@ -405,49 +310,11 @@ impl Cmd {
}
Cmd::Ttl(cmd[1].clone())
}
"expire" => {
if cmd.len() != 3 {
return Err(DBError("wrong number of arguments for EXPIRE command".to_string()));
}
let secs = cmd[2].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
Cmd::Expire(cmd[1].clone(), secs)
}
"pexpire" => {
if cmd.len() != 3 {
return Err(DBError("wrong number of arguments for PEXPIRE command".to_string()));
}
let ms = cmd[2].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
Cmd::PExpire(cmd[1].clone(), ms)
}
"expireat" => {
if cmd.len() != 3 {
return Err(DBError("wrong number of arguments for EXPIREAT command".to_string()));
}
let ts = cmd[2].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
Cmd::ExpireAt(cmd[1].clone(), ts)
}
"pexpireat" => {
if cmd.len() != 3 {
return Err(DBError("wrong number of arguments for PEXPIREAT command".to_string()));
}
let ts_ms = cmd[2].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
Cmd::PExpireAt(cmd[1].clone(), ts_ms)
}
"persist" => {
if cmd.len() != 2 {
return Err(DBError("wrong number of arguments for PERSIST command".to_string()));
}
Cmd::Persist(cmd[1].clone())
}
"exists" => {
if cmd.len() < 2 {
if cmd.len() != 2 {
return Err(DBError(format!("wrong number of arguments for EXISTS command")));
}
if cmd.len() == 2 {
Cmd::Exists(cmd[1].clone())
} else {
Cmd::ExistsMulti(cmd[1..].to_vec())
}
Cmd::Exists(cmd[1].clone())
}
"quit" => {
if cmd.len() != 1 {
@@ -478,10 +345,6 @@ impl Cmd {
Cmd::Client(vec![])
}
}
"command" => {
let args = if cmd.len() > 1 { cmd[1..].to_vec() } else { vec![] };
Cmd::Command(args)
}
"lpush" => {
if cmd.len() < 3 {
return Err(DBError(format!("wrong number of arguments for LPUSH command")));
@@ -516,28 +379,6 @@ impl Cmd {
};
Cmd::RPop(cmd[1].clone(), count)
}
"blpop" => {
if cmd.len() < 3 {
return Err(DBError(format!("wrong number of arguments for BLPOP command")));
}
// keys are all but the last argument
let keys = cmd[1..cmd.len()-1].to_vec();
let timeout_f = cmd[cmd.len()-1]
.parse::<f64>()
.map_err(|_| DBError("ERR timeout is not a number".to_string()))?;
Cmd::BLPop(keys, timeout_f)
}
"brpop" => {
if cmd.len() < 3 {
return Err(DBError(format!("wrong number of arguments for BRPOP command")));
}
// keys are all but the last argument
let keys = cmd[1..cmd.len()-1].to_vec();
let timeout_f = cmd[cmd.len()-1]
.parse::<f64>()
.map_err(|_| DBError("ERR timeout is not a number".to_string()))?;
Cmd::BRPop(keys, timeout_f)
}
"llen" => {
if cmd.len() != 2 {
return Err(DBError(format!("wrong number of arguments for LLEN command")));
@@ -650,14 +491,9 @@ impl Cmd {
Cmd::Set(k, v) => set_cmd(server, &k, &v).await,
Cmd::SetPx(k, v, x) => set_px_cmd(server, &k, &v, &x).await,
Cmd::SetEx(k, v, x) => set_ex_cmd(server, &k, &v, &x).await,
Cmd::SetOpts(k, v, ex_ms, nx, xx, getflag) => set_with_opts_cmd(server, &k, &v, ex_ms, nx, xx, getflag).await,
Cmd::MGet(keys) => mget_cmd(server, &keys).await,
Cmd::MSet(pairs) => mset_cmd(server, &pairs).await,
Cmd::Del(k) => del_cmd(server, &k).await,
Cmd::DelMulti(keys) => del_multi_cmd(server, &keys).await,
Cmd::ConfigGet(name) => config_get_cmd(&name, server),
Cmd::Keys => keys_cmd(server).await,
Cmd::DbSize => dbsize_cmd(server).await,
Cmd::Info(section) => info_cmd(server, &section).await,
Cmd::Type(k) => type_cmd(server, &k).await,
Cmd::Incr(key) => incr_cmd(server, &key).await,
@@ -685,30 +521,19 @@ impl Cmd {
Cmd::HLen(key) => hlen_cmd(server, &key).await,
Cmd::HMGet(key, fields) => hmget_cmd(server, &key, &fields).await,
Cmd::HSetNx(key, field, value) => hsetnx_cmd(server, &key, &field, &value).await,
Cmd::HIncrBy(key, field, delta) => hincrby_cmd(server, &key, &field, delta).await,
Cmd::HIncrByFloat(key, field, delta) => hincrbyfloat_cmd(server, &key, &field, delta).await,
Cmd::HScan(key, cursor, pattern, count) => hscan_cmd(server, &key, &cursor, pattern.as_deref(), &count).await,
Cmd::Scan(cursor, pattern, count) => scan_cmd(server, &cursor, pattern.as_deref(), &count).await,
Cmd::Ttl(key) => ttl_cmd(server, &key).await,
Cmd::Expire(key, secs) => expire_cmd(server, &key, secs).await,
Cmd::PExpire(key, ms) => pexpire_cmd(server, &key, ms).await,
Cmd::ExpireAt(key, ts_secs) => expireat_cmd(server, &key, ts_secs).await,
Cmd::PExpireAt(key, ts_ms) => pexpireat_cmd(server, &key, ts_ms).await,
Cmd::Persist(key) => persist_cmd(server, &key).await,
Cmd::Exists(key) => exists_cmd(server, &key).await,
Cmd::ExistsMulti(keys) => exists_multi_cmd(server, &keys).await,
Cmd::Quit => Ok(Protocol::SimpleString("OK".to_string())),
Cmd::Client(_) => Ok(Protocol::SimpleString("OK".to_string())),
Cmd::ClientSetName(name) => client_setname_cmd(server, &name).await,
Cmd::ClientGetName => client_getname_cmd(server).await,
Cmd::Command(args) => command_cmd(&args),
// List commands
Cmd::LPush(key, elements) => lpush_cmd(server, &key, &elements).await,
Cmd::RPush(key, elements) => rpush_cmd(server, &key, &elements).await,
Cmd::LPop(key, count) => lpop_cmd(server, &key, &count).await,
Cmd::RPop(key, count) => rpop_cmd(server, &key, &count).await,
Cmd::BLPop(keys, timeout) => blpop_cmd(server, &keys, timeout).await,
Cmd::BRPop(keys, timeout) => brpop_cmd(server, &keys, timeout).await,
Cmd::LLen(key) => llen_cmd(server, &key).await,
Cmd::LRem(key, count, element) => lrem_cmd(server, &key, count, &element).await,
Cmd::LTrim(key, start, stop) => ltrim_cmd(server, &key, start, stop).await,
@@ -716,12 +541,12 @@ impl Cmd {
Cmd::LRange(key, start, stop) => lrange_cmd(server, &key, start, stop).await,
Cmd::FlushDb => flushdb_cmd(server).await,
// AGE (rage): stateless
Cmd::AgeGenEnc => Ok(crate::age::cmd_age_genenc().await),
Cmd::AgeGenSign => Ok(crate::age::cmd_age_gensign().await),
Cmd::AgeEncrypt(recipient, message) => Ok(crate::age::cmd_age_encrypt(&recipient, &message).await),
Cmd::AgeDecrypt(identity, ct_b64) => Ok(crate::age::cmd_age_decrypt(&identity, &ct_b64).await),
Cmd::AgeSign(secret, message) => Ok(crate::age::cmd_age_sign(&secret, &message).await),
Cmd::AgeVerify(vpub, msg, sig_b64) => Ok(crate::age::cmd_age_verify(&vpub, &msg, &sig_b64).await),
Cmd::AgeGenEnc => Ok(libcryptoa::gen_enc_keypair().await),
Cmd::AgeGenSign => Ok(libcryptoa::gen_sign_keypair().await),
Cmd::AgeEncrypt(recipient, message) => Ok(libcryptoa::encrypt_b64(&recipient, &message).await),
Cmd::AgeDecrypt(identity, ct_b64) => Ok(libcryptoa::decrypt_b64(&identity, &ct_b64).await),
Cmd::AgeSign(secret, message) => Ok(libcryptoa::sign_b64(&secret, &message).await),
Cmd::AgeVerify(vpub, msg, sig_b64) => Ok(libcryptoa::verify_b64(&vpub, &msg, &sig_b64).await),
// AGE (rage): persistent named keys
Cmd::AgeKeygen(name) => Ok(crate::age::cmd_age_keygen(server, &name).await),
@@ -839,188 +664,16 @@ async fn rpop_cmd(server: &Server, key: &str, count: &Option<u64>) -> Result<Pro
}
}
// BLPOP implementation
async fn blpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Result<Protocol, DBError> {
// Immediate, non-blocking attempt in key order
for k in keys {
let elems = server.current_storage()?.lpop(k, 1)?;
if !elems.is_empty() {
return Ok(Protocol::Array(vec![
Protocol::BulkString(k.clone()),
Protocol::BulkString(elems[0].clone()),
]));
}
}
// If timeout is zero, return immediately with Null
if timeout_secs <= 0.0 {
return Ok(Protocol::Null);
}
// Register waiters for each key
let db_index = server.selected_db;
let mut ids: Vec<u64> = Vec::with_capacity(keys.len());
let mut names: Vec<String> = Vec::with_capacity(keys.len());
let mut rxs: Vec<tokio::sync::oneshot::Receiver<(String, String)>> = Vec::with_capacity(keys.len());
for k in keys {
let (id, rx) = server.register_waiter(db_index, k, crate::server::PopSide::Left).await;
ids.push(id);
names.push(k.clone());
rxs.push(rx);
}
// Wait for the first delivery or timeout
let wait_fut = async move {
let mut futures_vec = rxs;
loop {
if futures_vec.is_empty() {
return None;
}
let (res, idx, remaining) = select_all(futures_vec).await;
match res {
Ok((k, elem)) => {
return Some((k, elem, idx, remaining));
}
Err(_canceled) => {
// That waiter was canceled; continue with the rest
futures_vec = remaining;
continue;
}
}
}
};
match timeout(Duration::from_secs_f64(timeout_secs), wait_fut).await {
Ok(Some((k, elem, idx, _remaining))) => {
// Unregister other waiters
for (i, key_name) in names.iter().enumerate() {
if i != idx {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
}
Ok(Protocol::Array(vec![
Protocol::BulkString(k),
Protocol::BulkString(elem),
]))
}
Ok(None) => {
// No futures left; unregister all waiters
for (i, key_name) in names.iter().enumerate() {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
Ok(Protocol::Null)
}
Err(_elapsed) => {
// Timeout: unregister all waiters
for (i, key_name) in names.iter().enumerate() {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
Ok(Protocol::Null)
}
}
}
// BRPOP implementation (mirror of BLPOP, popping from the right)
async fn brpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Result<Protocol, DBError> {
// Immediate, non-blocking attempt in key order using RPOP
for k in keys {
let elems = server.current_storage()?.rpop(k, 1)?;
if !elems.is_empty() {
return Ok(Protocol::Array(vec![
Protocol::BulkString(k.clone()),
Protocol::BulkString(elems[0].clone()),
]));
}
}
// If timeout is zero, return immediately with Null
if timeout_secs <= 0.0 {
return Ok(Protocol::Null);
}
// Register waiters for each key (Right side)
let db_index = server.selected_db;
let mut ids: Vec<u64> = Vec::with_capacity(keys.len());
let mut names: Vec<String> = Vec::with_capacity(keys.len());
let mut rxs: Vec<tokio::sync::oneshot::Receiver<(String, String)>> = Vec::with_capacity(keys.len());
for k in keys {
let (id, rx) = server.register_waiter(db_index, k, crate::server::PopSide::Right).await;
ids.push(id);
names.push(k.clone());
rxs.push(rx);
}
// Wait for the first delivery or timeout
let wait_fut = async move {
let mut futures_vec = rxs;
loop {
if futures_vec.is_empty() {
return None;
}
let (res, idx, remaining) = select_all(futures_vec).await;
match res {
Ok((k, elem)) => {
return Some((k, elem, idx, remaining));
}
Err(_canceled) => {
// That waiter was canceled; continue with the rest
futures_vec = remaining;
continue;
}
}
}
};
match timeout(Duration::from_secs_f64(timeout_secs), wait_fut).await {
Ok(Some((k, elem, idx, _remaining))) => {
// Unregister other waiters
for (i, key_name) in names.iter().enumerate() {
if i != idx {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
}
Ok(Protocol::Array(vec![
Protocol::BulkString(k),
Protocol::BulkString(elem),
]))
}
Ok(None) => {
// No futures left; unregister all waiters
for (i, key_name) in names.iter().enumerate() {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
Ok(Protocol::Null)
}
Err(_elapsed) => {
// Timeout: unregister all waiters
for (i, key_name) in names.iter().enumerate() {
server.unregister_waiter(db_index, key_name, ids[i]).await;
}
Ok(Protocol::Null)
}
}
}
async fn lpush_cmd(server: &Server, key: &str, elements: &[String]) -> Result<Protocol, DBError> {
match server.current_storage()?.lpush(key, elements.to_vec()) {
Ok(len) => {
// Attempt to deliver to any blocked BLPOP waiters
let _ = server.drain_waiters_after_push(key).await;
Ok(Protocol::SimpleString(len.to_string()))
}
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
async fn rpush_cmd(server: &Server, key: &str, elements: &[String]) -> Result<Protocol, DBError> {
match server.current_storage()?.rpush(key, elements.to_vec()) {
Ok(len) => {
// Attempt to deliver to any blocked BLPOP waiters
let _ = server.drain_waiters_after_push(key).await;
Ok(Protocol::SimpleString(len.to_string()))
}
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
@@ -1086,13 +739,6 @@ async fn keys_cmd(server: &Server) -> Result<Protocol, DBError> {
))
}
async fn dbsize_cmd(server: &Server) -> Result<Protocol, DBError> {
match server.current_storage()?.dbsize() {
Ok(n) => Ok(Protocol::SimpleString(n.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
#[derive(Serialize)]
struct ServerInfo {
redis_version: String,
@@ -1114,19 +760,17 @@ async fn info_cmd(server: &Server, section: &Option<String>) -> Result<Protocol,
info_string.push_str(&format!("# Keyspace\n"));
info_string.push_str(&format!("db{}:keys=0,expires=0,avg_ttl=0\n", info.selected_db));
match section {
Some(s) => {
let sl = s.to_lowercase();
if sl == "replication" {
Ok(Protocol::BulkString(
"role:master\nmaster_replid:8371b4fb1155b71f4a04d3e1bc3e18c4a990aeea\nmaster_repl_offset:0\n".to_string()
))
} else {
// Return general info for unknown sections (e.g., SERVER)
Ok(Protocol::BulkString(info_string))
}
Some(s) => match s.as_str() {
"replication" => Ok(Protocol::BulkString(
"role:master\nmaster_replid:8371b4fb1155b71f4a04d3e1bc3e18c4a990aeea\nmaster_repl_offset:0\n".to_string()
)),
_ => Err(DBError(format!("unsupported section {:?}", s))),
},
None => {
Ok(Protocol::BulkString(info_string))
}
None => Ok(Protocol::BulkString(info_string)),
}
}
@@ -1167,109 +811,6 @@ async fn set_cmd(server: &Server, k: &str, v: &str) -> Result<Protocol, DBError>
Ok(Protocol::SimpleString("OK".to_string()))
}
// Advanced SET with options: EX/PX/NX/XX/GET
async fn set_with_opts_cmd(
server: &Server,
key: &str,
value: &str,
ex_ms: Option<u128>,
nx: bool,
xx: bool,
get_old: bool,
) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
// Determine existence (for NX/XX)
let exists = storage.exists(key)?;
// If both NX and XX, condition can never be satisfied -> no-op
let mut should_set = true;
if nx && exists {
should_set = false;
}
if xx && !exists {
should_set = false;
}
// Fetch old value if needed for GET
let old_val = if get_old {
storage.get(key)?
} else {
None
};
if should_set {
if let Some(ms) = ex_ms {
storage.setx(key.to_string(), value.to_string(), ms)?;
} else {
storage.set(key.to_string(), value.to_string())?;
}
}
if get_old {
// Return previous value (or Null), regardless of NX/XX outcome only if set executed?
// We follow Redis semantics: return old value if set executed, else Null
if should_set {
Ok(old_val.map_or(Protocol::Null, Protocol::BulkString))
} else {
Ok(Protocol::Null)
}
} else {
if should_set {
Ok(Protocol::SimpleString("OK".to_string()))
} else {
Ok(Protocol::Null)
}
}
}
// MGET: return array of bulk strings or Null for missing
async fn mget_cmd(server: &Server, keys: &[String]) -> Result<Protocol, DBError> {
let mut out: Vec<Protocol> = Vec::with_capacity(keys.len());
let storage = server.current_storage()?;
for k in keys {
match storage.get(k)? {
Some(v) => out.push(Protocol::BulkString(v)),
None => out.push(Protocol::Null),
}
}
Ok(Protocol::Array(out))
}
// MSET: set multiple key/value pairs, return OK
async fn mset_cmd(server: &Server, pairs: &[(String, String)]) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
for (k, v) in pairs {
storage.set(k.clone(), v.clone())?;
}
Ok(Protocol::SimpleString("OK".to_string()))
}
// DEL with multiple keys: return count of keys actually deleted
async fn del_multi_cmd(server: &Server, keys: &[String]) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
let mut deleted = 0i64;
for k in keys {
if storage.exists(k)? {
storage.del(k.clone())?;
deleted += 1;
}
}
Ok(Protocol::SimpleString(deleted.to_string()))
}
// EXISTS with multiple keys: return count existing
async fn exists_multi_cmd(server: &Server, keys: &[String]) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
let mut count = 0i64;
for k in keys {
if storage.exists(k)? {
count += 1;
}
}
Ok(Protocol::SimpleString(count.to_string()))
}
async fn get_cmd(server: &Server, k: &str) -> Result<Protocol, DBError> {
let v = server.current_storage()?.get(k)?;
Ok(v.map_or(Protocol::Null, Protocol::BulkString))
@@ -1362,32 +903,6 @@ async fn hsetnx_cmd(server: &Server, key: &str, field: &str, value: &str) -> Res
}
}
async fn hincrby_cmd(server: &Server, key: &str, field: &str, delta: i64) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
let current = storage.hget(key, field)?;
let base: i64 = match current {
Some(v) => v.parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?,
None => 0,
};
let new_val = base.checked_add(delta).ok_or_else(|| DBError("ERR increment or decrement would overflow".to_string()))?;
// Update the field
storage.hset(key, vec![(field.to_string(), new_val.to_string())])?;
Ok(Protocol::SimpleString(new_val.to_string()))
}
async fn hincrbyfloat_cmd(server: &Server, key: &str, field: &str, delta: f64) -> Result<Protocol, DBError> {
let storage = server.current_storage()?;
let current = storage.hget(key, field)?;
let base: f64 = match current {
Some(v) => v.parse::<f64>().map_err(|_| DBError("ERR value is not a valid float".to_string()))?,
None => 0.0,
};
let new_val = base + delta;
// Update the field
storage.hset(key, vec![(field.to_string(), new_val.to_string())])?;
Ok(Protocol::SimpleString(new_val.to_string()))
}
async fn scan_cmd(
server: &Server,
cursor: &u64,
@@ -1445,51 +960,6 @@ async fn exists_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
}
}
// EXPIRE key seconds -> 1 if timeout set, 0 otherwise
async fn expire_cmd(server: &Server, key: &str, secs: i64) -> Result<Protocol, DBError> {
if secs < 0 {
return Ok(Protocol::SimpleString("0".to_string()));
}
match server.current_storage()?.expire_seconds(key, secs as u64) {
Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
// PEXPIRE key milliseconds -> 1 if timeout set, 0 otherwise
async fn pexpire_cmd(server: &Server, key: &str, ms: i64) -> Result<Protocol, DBError> {
if ms < 0 {
return Ok(Protocol::SimpleString("0".to_string()));
}
match server.current_storage()?.pexpire_millis(key, ms as u128) {
Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
// PERSIST key -> 1 if timeout removed, 0 otherwise
async fn persist_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
match server.current_storage()?.persist(key) {
Ok(removed) => Ok(Protocol::SimpleString(if removed { "1" } else { "0" }.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
// EXPIREAT key timestamp-seconds -> 1 if timeout set, 0 otherwise
async fn expireat_cmd(server: &Server, key: &str, ts_secs: i64) -> Result<Protocol, DBError> {
match server.current_storage()?.expire_at_seconds(key, ts_secs) {
Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
// PEXPIREAT key timestamp-milliseconds -> 1 if timeout set, 0 otherwise
async fn pexpireat_cmd(server: &Server, key: &str, ts_ms: i64) -> Result<Protocol, DBError> {
match server.current_storage()?.pexpire_at_millis(key, ts_ms) {
Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())),
Err(e) => Ok(Protocol::err(&e.0)),
}
}
async fn client_setname_cmd(server: &mut Server, name: &str) -> Result<Protocol, DBError> {
server.client_name = Some(name.to_string());
Ok(Protocol::SimpleString("OK".to_string()))
@@ -1501,19 +971,3 @@ async fn client_getname_cmd(server: &Server) -> Result<Protocol, DBError> {
None => Ok(Protocol::Null),
}
}
// Minimal COMMAND subcommands stub to satisfy redis-cli probes.
// - COMMAND DOCS ... => return empty array
// - COMMAND INFO ... => return empty array
// - Any other => empty array
fn command_cmd(args: &[String]) -> Result<Protocol, DBError> {
if args.is_empty() {
return Ok(Protocol::Array(vec![]));
}
let sub = args[0].to_lowercase();
match sub.as_str() {
"docs" => Ok(Protocol::Array(vec![])),
"info" => Ok(Protocol::Array(vec![])),
_ => Ok(Protocol::Array(vec![])),
}
}

View File

@@ -1,8 +1,4 @@
pub mod age; // NEW
pub mod cmd;
pub mod crypto;
pub mod error;
pub mod options;
pub mod protocol;
pub mod server;
pub mod storage;

View File

@@ -19,10 +19,6 @@ impl fmt::Display for Protocol {
impl Protocol {
pub fn from(protocol: &str) -> Result<(Self, &str), DBError> {
if protocol.is_empty() {
// Incomplete frame; caller should read more bytes
return Err(DBError("[incomplete] empty".to_string()));
}
let ret = match protocol.chars().nth(0) {
Some('+') => Self::parse_simple_string_sfx(&protocol[1..]),
Some('$') => Self::parse_bulk_string_sfx(&protocol[1..]),
@@ -105,20 +101,21 @@ impl Protocol {
let size = Self::parse_usize(&protocol[..len_end])?;
let data_start = len_end + 2;
let data_end = data_start + size;
// If we don't yet have the full bulk payload + trailing CRLF, signal INCOMPLETE
if protocol.len() < data_end + 2 {
return Err(DBError("[incomplete] bulk body".to_string()));
}
if &protocol[data_end..data_end + 2] != "\r\n" {
return Err(DBError("[incomplete] bulk terminator".to_string()));
}
let s = Self::parse_string(&protocol[data_start..data_end])?;
Ok((Protocol::BulkString(s), &protocol[data_end + 2..]))
if protocol.len() < data_end + 2 || &protocol[data_end..data_end+2] != "\r\n" {
Err(DBError(format!(
"[new bulk string] unmatched string length in prototocl {:?}",
protocol,
)))
} else {
Ok((Protocol::BulkString(s), &protocol[data_end + 2..]))
}
} else {
// No CRLF after bulk length header yet
Err(DBError("[incomplete] bulk header".to_string()))
Err(DBError(format!(
"[new bulk string] unsupported protocol: {:?}",
protocol
)))
}
}
@@ -128,25 +125,16 @@ impl Protocol {
let mut remaining = &s[len_end + 2..];
let mut vec = vec![];
for _ in 0..array_len {
match Protocol::from(remaining) {
Ok((p, rem)) => {
vec.push(p);
remaining = rem;
}
Err(e) => {
// Propagate incomplete so caller can read more bytes
if e.0.starts_with("[incomplete]") {
return Err(e);
} else {
return Err(e);
}
}
}
let (p, rem) = Protocol::from(remaining)?;
vec.push(p);
remaining = rem;
}
Ok((Protocol::Array(vec), remaining))
} else {
// No CRLF after array header yet
Err(DBError("[incomplete] array header".to_string()))
Err(DBError(format!(
"[new array] unsupported protocol: {:?}",
s
)))
}
}

136
crates/herodb/server.rs Normal file
View File

@@ -0,0 +1,136 @@
use core::str;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use crate::cmd::Cmd;
use crate::error::DBError;
use crate::options;
use crate::protocol::Protocol;
use crate::storage::Storage;
#[derive(Clone)]
pub struct Server {
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
pub option: options::DBOption,
pub client_name: Option<String>,
pub selected_db: u64, // Changed from usize to u64
pub queued_cmd: Option<Vec<(Cmd, Protocol)>>,
}
impl Server {
pub async fn new(option: options::DBOption) -> Self {
Server {
db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())),
option,
client_name: None,
selected_db: 0,
queued_cmd: None,
}
}
pub fn current_storage(&self) -> Result<Arc<libdbstorage::Storage>, libdbstorage::DBError> {
let mut cache = self.db_cache.write().unwrap();
if let Some(storage) = cache.get(&self.selected_db) {
return Ok(storage.clone());
}
// Create new database file
let db_file_path = std::path::PathBuf::from(self.option.dir.clone())
.join(format!("{}.db", self.selected_db));
// Ensure the directory exists before creating the database file
if let Some(parent_dir) = db_file_path.parent() {
std::fs::create_dir_all(parent_dir).map_err(|e| {
DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e))
})?;
}
println!("Creating new db file: {}", db_file_path.display());
let storage = Arc::new(Storage::new(
db_file_path,
self.should_encrypt_db(self.selected_db),
self.option.encryption_key.as_deref()
)?);
cache.insert(self.selected_db, storage.clone());
Ok(storage)
}
fn should_encrypt_db(&self, db_index: u64) -> bool {
// DB 0-9 are non-encrypted, DB 10+ are encrypted
self.option.encrypt && db_index >= 10
}
pub async fn handle(
&mut self,
mut stream: tokio::net::TcpStream,
) -> Result<(), DBError> {
let mut buf = [0; 512];
loop {
let len = match stream.read(&mut buf).await {
Ok(0) => {
println!("[handle] connection closed");
return Ok(());
}
Ok(len) => len,
Err(e) => {
println!("[handle] read error: {:?}", e);
return Err(e.into());
}
};
let mut s = str::from_utf8(&buf[..len])?;
while !s.is_empty() {
let (cmd, protocol, remaining) = match Cmd::from(s) {
Ok((cmd, protocol, remaining)) => (cmd, protocol, remaining),
Err(e) => {
println!("\x1b[31;1mprotocol error: {:?}\x1b[0m", e);
(Cmd::Unknow("protocol_error".to_string()), Protocol::err(&format!("protocol error: {}", e.0)), "")
}
};
s = remaining;
if self.option.debug {
println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol);
} else {
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
}
// Check if this is a QUIT command before processing
let is_quit = matches!(cmd, Cmd::Quit);
let res = match cmd.run(self).await {
Ok(p) => p,
Err(e) => {
if self.option.debug {
eprintln!("[run error] {:?}", e);
}
Protocol::err(&format!("ERR {}", e.0))
}
};
if self.option.debug {
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", self.queued_cmd);
println!("\x1b[32;1mgoing to send response {}\x1b[0m", res.encode());
} else {
print!("queued cmd {:?}", self.queued_cmd);
println!("going to send response {}", res.encode());
}
_ = stream.write(res.encode().as_bytes()).await?;
// If this was a QUIT command, close the connection
if is_quit {
println!("[handle] QUIT command received, closing connection");
return Ok(());
}
}
}
}
}

View File

@@ -0,0 +1 @@
fn main() {}

View File

@@ -16,9 +16,9 @@ fn get_redis_connection(port: u16) -> Connection {
}
}
Err(e) => {
if attempts >= 120 {
if attempts >= 20 {
panic!(
"Failed to connect to Redis server after 120 attempts: {}",
"Failed to connect to Redis server after 20 attempts: {}",
e
);
}
@@ -88,8 +88,8 @@ fn setup_server() -> (ServerProcessGuard, u16) {
test_dir,
};
// Give the server time to build and start (cargo run may compile first)
std::thread::sleep(Duration::from_millis(2500));
// Give the server a moment to start
std::thread::sleep(Duration::from_millis(500));
(guard, port)
}

View File

@@ -93,16 +93,9 @@ async fn test_basic_redis_functionality() {
assert!(response.contains("string"));
// Test QUIT to close connection gracefully
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
stream.write_all("*1\r\n$4\r\nQUIT\r\n".as_bytes()).await.unwrap();
let mut buffer = [0; 1024];
let n = stream.read(&mut buffer).await.unwrap();
let response = String::from_utf8_lossy(&buffer[..n]);
let response = send_redis_command(port, "*1\r\n$4\r\nQUIT\r\n").await;
assert!(response.contains("OK"));
// Ensure the stream is closed
stream.shutdown().await.unwrap();
// Stop the server
server_handle.abort();
@@ -156,8 +149,6 @@ async fn test_hash_operations() {
assert!(response.contains("value2"));
// Stop the server
// For hash operations, we don't have a persistent stream, so we'll just abort the server.
// The server should handle closing its connections.
server_handle.abort();
println!("✅ All hash operations tests passed!");
@@ -211,16 +202,9 @@ async fn test_transaction_operations() {
assert!(response.contains("OK")); // Should contain array of OK responses
// Verify commands were executed
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n".as_bytes()).await.unwrap();
let n = stream.read(&mut buffer).await.unwrap();
let response = String::from_utf8_lossy(&buffer[..n]);
let response = send_redis_command(port, "*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n").await;
assert!(response.contains("value1"));
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n".as_bytes()).await.unwrap();
let n = stream.read(&mut buffer).await.unwrap();
let response = String::from_utf8_lossy(&buffer[..n]);
assert!(response.contains("value2"));
// Stop the server
server_handle.abort();

View File

@@ -0,0 +1,10 @@
[package]
name = "libcrypto"
version = "0.1.0"
edition = "2021"
[dependencies]
chacha20poly1305 = { workspace = true }
rand = { workspace = true }
sha2 = { workspace = true }
thiserror = { workspace = true }

View File

@@ -1,25 +1,24 @@
// In crates/libcrypto/src/lib.rs
use chacha20poly1305::{
aead::{Aead, KeyInit, OsRng},
XChaCha20Poly1305, XNonce,
};
use rand::RngCore;
use sha2::{Digest, Sha256};
use thiserror::Error;
const VERSION: u8 = 1;
const NONCE_LEN: usize = 24;
const TAG_LEN: usize = 16;
#[derive(Debug)]
#[derive(Error, Debug)]
pub enum CryptoError {
Format, // wrong length / header
Version(u8), // unknown version
Decrypt, // wrong key or corrupted data
}
impl From<CryptoError> for crate::error::DBError {
fn from(e: CryptoError) -> Self {
crate::error::DBError(format!("Crypto error: {:?}", e))
}
#[error("invalid format: data too short")]
Format,
#[error("unknown version: {0}")]
Version(u8),
#[error("decryption failed: wrong key or corrupted data")]
Decrypt,
}
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)

View File

@@ -0,0 +1,12 @@
[package]
name = "libcryptoa"
version = "0.1.0"
edition = "2021"
[dependencies]
age = { workspace = true }
secrecy = { workspace = true }
ed25519-dalek = { workspace = true }
base64 = { workspace = true }
rand = { workspace = true }
thiserror = { workspace = true }

View File

@@ -0,0 +1,100 @@
// In crates/libcryptoa/src/lib.rs
use std::str::FromStr;
use age::{Decryptor, Encryptor, x25519};
use base64::{engine::general_purpose::STANDARD as B64, Engine as _};
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey};
use secrecy::ExposeSecret;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AsymmetricCryptoError {
#[error("key parsing failed")]
ParseKey,
#[error("age crypto error: {0}")]
Age(String),
#[error("invalid utf-8 in plaintext")]
Utf8,
#[error("invalid signature length")]
SignatureLen,
#[error("signature verification failed")]
Verify,
#[error("base64 decoding failed: {0}")]
Base64(#[from] base64::DecodeError),
#[error("io error: {0}")]
Io(#[from] std::io::Error),
}
fn parse_recipient(s: &str) -> Result<x25519::Recipient, AsymmetricCryptoError> {
x25519::Recipient::from_str(s).map_err(|_| AsymmetricCryptoError::ParseKey)
}
fn parse_identity(s: &str) -> Result<x25519::Identity, AsymmetricCryptoError> {
x25519::Identity::from_str(s).map_err(|_| AsymmetricCryptoError::ParseKey)
}
fn parse_ed25519_signing_key(s: &str) -> Result<SigningKey, AsymmetricCryptoError> {
let bytes = B64.decode(s)?;
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AsymmetricCryptoError::ParseKey)?;
Ok(SigningKey::from_bytes(&key_bytes))
}
fn parse_ed25519_verifying_key(s: &str) -> Result<VerifyingKey, AsymmetricCryptoError> {
let bytes = B64.decode(s)?;
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AsymmetricCryptoError::ParseKey)?;
VerifyingKey::from_bytes(&key_bytes).map_err(|_| AsymmetricCryptoError::ParseKey)
}
pub fn gen_enc_keypair() -> (String, String) {
let id = x25519::Identity::generate();
let pk = id.to_public();
(pk.to_string(), id.to_string().expose_secret().to_string())
}
pub fn gen_sign_keypair() -> (String, String) {
let signing_key = SigningKey::generate(&mut rand::rngs::OsRng);
let verifying_key = signing_key.verifying_key();
(B64.encode(verifying_key.to_bytes()), B64.encode(signing_key.to_bytes()))
}
pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result<String, AsymmetricCryptoError> {
let recipient = parse_recipient(recipient_str)?;
let encryptor = Encryptor::with_recipients(vec![Box::new(recipient)])
.ok_or_else(|| AsymmetricCryptoError::Age("Failed to create encryptor".into()))?;
let mut encrypted = vec![];
let mut writer = encryptor.wrap_output(&mut encrypted)?;
std::io::Write::write_all(&mut writer, msg.as_bytes())?;
writer.finish()?;
Ok(B64.encode(encrypted))
}
pub fn decrypt_b64(identity_str: &str, ct_b64: &str) -> Result<String, AsymmetricCryptoError> {
let identity = parse_identity(identity_str)?;
let ct = B64.decode(ct_b64)?;
let decryptor = Decryptor::new(&ct[..]).map_err(|e| AsymmetricCryptoError::Age(e.to_string()))?;
let mut decrypted = vec![];
if let Decryptor::Recipients(d) = decryptor {
let mut reader = d.decrypt(std::iter::once(&identity as &dyn age::Identity))
.map_err(|e| AsymmetricCryptoError::Age(e.to_string()))?;
std::io::Read::read_to_end(&mut reader, &mut decrypted)?;
String::from_utf8(decrypted).map_err(|_| AsymmetricCryptoError::Utf8)
} else {
Err(AsymmetricCryptoError::Age("Passphrase decryption not supported".into()))
}
}
pub fn sign_b64(signing_secret_str: &str, msg: &str) -> Result<String, AsymmetricCryptoError> {
let signing_key = parse_ed25519_signing_key(signing_secret_str)?;
let signature = signing_key.sign(msg.as_bytes());
Ok(B64.encode(signature.to_bytes()))
}
pub fn verify_b64(verify_pub_str: &str, msg: &str, sig_b64: &str) -> Result<bool, AsymmetricCryptoError> {
let verifying_key = parse_ed25519_verifying_key(verify_pub_str)?;
let sig_bytes = B64.decode(sig_b64)?;
let signature = Signature::from_slice(&sig_bytes).map_err(|_| AsymmetricCryptoError::SignatureLen)?;
Ok(verifying_key.verify(msg.as_bytes(), &signature).is_ok())
}

View File

@@ -0,0 +1,15 @@
[package]
name = "libdbstorage"
version = "0.1.0"
edition = "2021"
[dependencies]
redb = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
thiserror = { workspace = true }
# Local Crate Dependencies
libcrypto = { path = "../libcrypto" }
tokio = { version = "1", features = ["full"] }
bincode = "1.3.3"

View File

@@ -87,8 +87,3 @@ impl From<serde_json::Error> for DBError {
}
}
impl From<chacha20poly1305::Error> for DBError {
fn from(item: chacha20poly1305::Error) -> Self {
DBError(item.to_string())
}
}

View File

@@ -1,25 +1,18 @@
// In crates/libdbstorage/src/lib.rs
use std::{
path::Path,
time::{SystemTime, UNIX_EPOCH},
};
use libcrypto::CryptoFactory; // Correct import
use redb::{Database, TableDefinition};
use serde::{Deserialize, Serialize};
use crate::crypto::CryptoFactory;
use crate::error::DBError;
pub mod error; // Declare the error module
pub use error::DBError; // Re-export for users of this crate
// Re-export modules
mod storage_basic;
mod storage_hset;
mod storage_lists;
mod storage_extra;
// Re-export implementations
// Note: These imports are used by the impl blocks in the submodules
// The compiler shows them as unused because they're not directly used in this file
// but they're needed for the Storage struct methods to be available
pub use storage_extra::*;
// Declare storage module
pub mod storage;
// Table definitions for different Redis data types
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
@@ -118,7 +111,7 @@ impl Storage {
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
if let Some(crypto) = &self.crypto {
Ok(crypto.decrypt(data)?)
Ok(crypto.decrypt(data).map_err(|e| DBError(e.to_string()))?)
} else {
Ok(data.to_vec())
}

View File

@@ -0,0 +1,4 @@
pub mod storage_basic;
pub mod storage_hset;
pub mod storage_lists;
pub mod storage_extra;

View File

@@ -1,6 +1,6 @@
use redb::{ReadableTable};
use crate::error::DBError;
use super::*;
use crate::{Storage, TYPES_TABLE, STRINGS_TABLE, HASHES_TABLE, LISTS_TABLE, STREAMS_META_TABLE, STREAMS_DATA_TABLE, EXPIRATION_TABLE, now_in_millis};
impl Storage {
pub fn flushdb(&self) -> Result<(), DBError> {
@@ -215,31 +215,4 @@ impl Storage {
Ok(keys)
}
}
impl Storage {
pub fn dbsize(&self) -> Result<i64, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
let mut count: i64 = 0;
let mut iter = types_table.iter()?;
while let Some(entry) = iter.next() {
let entry = entry?;
let key = entry.0.value();
let ty = entry.1.value();
if ty == "string" {
if let Some(expires_at) = expiration_table.get(key)? {
if now_in_millis() > expires_at.value() as u128 {
// Skip logically expired string keys
continue;
}
}
}
count += 1;
}
Ok(count)
}
}

View File

@@ -1,6 +1,6 @@
use redb::{ReadableTable};
use crate::error::DBError;
use super::*;
use crate::{Storage, TYPES_TABLE, STRINGS_TABLE, EXPIRATION_TABLE, now_in_millis};
impl Storage {
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
@@ -98,116 +98,6 @@ impl Storage {
None => Ok(false), // Key does not exist
}
}
// -------- Expiration helpers (string keys only, consistent with TTL/EXISTS) --------
// Set expiry in seconds; returns true if applied (key exists and is string), false otherwise
pub fn expire_seconds(&self, key: &str, secs: u64) -> Result<bool, DBError> {
// Determine eligibility first to avoid holding borrows across commit
let mut applied = false;
let write_txn = self.db.begin_write()?;
{
let types_table = write_txn.open_table(TYPES_TABLE)?;
let is_string = types_table
.get(key)?
.map(|v| v.value() == "string")
.unwrap_or(false);
if is_string {
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
let expires_at = now_in_millis() + (secs as u128) * 1000;
expiration_table.insert(key, &(expires_at as u64))?;
applied = true;
}
}
write_txn.commit()?;
Ok(applied)
}
// Set expiry in milliseconds; returns true if applied (key exists and is string), false otherwise
pub fn pexpire_millis(&self, key: &str, ms: u128) -> Result<bool, DBError> {
let mut applied = false;
let write_txn = self.db.begin_write()?;
{
let types_table = write_txn.open_table(TYPES_TABLE)?;
let is_string = types_table
.get(key)?
.map(|v| v.value() == "string")
.unwrap_or(false);
if is_string {
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
let expires_at = now_in_millis() + ms;
expiration_table.insert(key, &(expires_at as u64))?;
applied = true;
}
}
write_txn.commit()?;
Ok(applied)
}
// Remove expiry if present; returns true if removed, false otherwise
pub fn persist(&self, key: &str) -> Result<bool, DBError> {
let mut removed = false;
let write_txn = self.db.begin_write()?;
{
let types_table = write_txn.open_table(TYPES_TABLE)?;
let is_string = types_table
.get(key)?
.map(|v| v.value() == "string")
.unwrap_or(false);
if is_string {
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
if expiration_table.remove(key)?.is_some() {
removed = true;
}
}
}
write_txn.commit()?;
Ok(removed)
}
// Absolute EXPIREAT in seconds since epoch
// Returns true if applied (key exists and is string), false otherwise
pub fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result<bool, DBError> {
let mut applied = false;
let write_txn = self.db.begin_write()?;
{
let types_table = write_txn.open_table(TYPES_TABLE)?;
let is_string = types_table
.get(key)?
.map(|v| v.value() == "string")
.unwrap_or(false);
if is_string {
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
let expires_at_ms: u128 = if ts_secs <= 0 { 0 } else { (ts_secs as u128) * 1000 };
expiration_table.insert(key, &((expires_at_ms as u64)))?;
applied = true;
}
}
write_txn.commit()?;
Ok(applied)
}
// Absolute PEXPIREAT in milliseconds since epoch
// Returns true if applied (key exists and is string), false otherwise
pub fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result<bool, DBError> {
let mut applied = false;
let write_txn = self.db.begin_write()?;
{
let types_table = write_txn.open_table(TYPES_TABLE)?;
let is_string = types_table
.get(key)?
.map(|v| v.value() == "string")
.unwrap_or(false);
if is_string {
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
let expires_at_ms: u128 = if ts_ms <= 0 { 0 } else { ts_ms as u128 };
expiration_table.insert(key, &((expires_at_ms as u64)))?;
applied = true;
}
}
write_txn.commit()?;
Ok(applied)
}
}
// Utility function for glob pattern matching

View File

@@ -1,6 +1,6 @@
use redb::{ReadableTable};
use crate::error::DBError;
use super::*;
use crate::{Storage, TYPES_TABLE, HASHES_TABLE};
impl Storage {
// ✅ ENCRYPTION APPLIED: Values are encrypted before storage
@@ -12,30 +12,20 @@ impl Storage {
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") | None => { // Proceed if hash or new key
// Set the type to hash (only if new key or existing hash)
types_table.insert(key, "hash")?;
for (field, value) in pairs {
// Check if field already exists
let exists = hashes_table.get((key, field.as_str()))?.is_some();
// Encrypt the value before storing
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
hashes_table.insert((key, field.as_str()), encrypted.as_slice())?;
if !exists {
new_fields += 1;
}
}
// Set the type to hash
types_table.insert(key, "hash")?;
for (field, value) in pairs {
// Check if field already exists
let exists = hashes_table.get((key, field.as_str()))?.is_some();
// Encrypt the value before storing
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
hashes_table.insert((key, field.as_str()), encrypted.as_slice())?;
if !exists {
new_fields += 1;
}
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
}
}
@@ -48,10 +38,8 @@ impl Storage {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = types_table.get(key)?.map(|v| v.value().to_string());
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
match hashes_table.get((key, field))? {
Some(data) => {
@@ -62,8 +50,7 @@ impl Storage {
None => Ok(None),
}
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(None),
_ => Ok(None),
}
}
@@ -71,13 +58,9 @@ impl Storage {
pub fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut result = Vec::new();
@@ -94,8 +77,7 @@ impl Storage {
Ok(result)
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(Vec::new()),
_ => Ok(Vec::new()),
}
}
@@ -104,42 +86,41 @@ impl Storage {
let mut deleted = 0i64;
// First check if key exists and is a hash
let key_type = {
let is_hash = {
let types_table = write_txn.open_table(TYPES_TABLE)?;
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
let result = match types_table.get(key)? {
Some(type_val) => type_val.value() == "hash",
None => false,
};
result
};
match key_type.as_deref() {
Some("hash") => {
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
for field in fields {
if hashes_table.remove((key, field.as_str()))?.is_some() {
deleted += 1;
}
}
// Check if hash is now empty and remove type if so
let mut has_fields = false;
let mut iter = hashes_table.iter()?;
while let Some(entry) = iter.next() {
let entry = entry?;
let (hash_key, _) = entry.0.value();
if hash_key == key {
has_fields = true;
break;
}
}
drop(iter);
if !has_fields {
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
types_table.remove(key)?;
if is_hash {
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
for field in fields {
if hashes_table.remove((key, field.as_str()))?.is_some() {
deleted += 1;
}
}
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => {} // Key does not exist, nothing to delete, return 0 deleted
// Check if hash is now empty and remove type if so
let mut has_fields = false;
let mut iter = hashes_table.iter()?;
while let Some(entry) = iter.next() {
let entry = entry?;
let (hash_key, _) = entry.0.value();
if hash_key == key {
has_fields = true;
break;
}
}
drop(iter);
if !has_fields {
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
types_table.remove(key)?;
}
}
write_txn.commit()?;
@@ -149,31 +130,22 @@ impl Storage {
pub fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
Ok(hashes_table.get((key, field))?.is_some())
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(false),
_ => Ok(false),
}
}
pub fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut result = Vec::new();
@@ -188,8 +160,7 @@ impl Storage {
Ok(result)
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(Vec::new()),
_ => Ok(Vec::new()),
}
}
@@ -197,13 +168,9 @@ impl Storage {
pub fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut result = Vec::new();
@@ -220,21 +187,16 @@ impl Storage {
Ok(result)
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(Vec::new()),
_ => Ok(Vec::new()),
}
}
pub fn hlen(&self, key: &str) -> Result<i64, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut count = 0i64;
@@ -249,8 +211,7 @@ impl Storage {
Ok(count)
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(0),
_ => Ok(0),
}
}
@@ -258,13 +219,9 @@ impl Storage {
pub fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut result = Vec::new();
@@ -281,8 +238,7 @@ impl Storage {
Ok(result)
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok(fields.into_iter().map(|_| None).collect()),
_ => Ok(fields.into_iter().map(|_| None).collect()),
}
}
@@ -295,25 +251,15 @@ impl Storage {
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") | None => { // Proceed if hash or new key
// Check if field already exists
if hashes_table.get((key, field))?.is_none() {
// Set the type to hash (only if new key or existing hash)
types_table.insert(key, "hash")?;
// Encrypt the value before storing
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
hashes_table.insert((key, field), encrypted.as_slice())?;
result = true;
}
}
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
// Check if field already exists
if hashes_table.get((key, field))?.is_none() {
// Set the type to hash
types_table.insert(key, "hash")?;
// Encrypt the value before storing
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
hashes_table.insert((key, field), encrypted.as_slice())?;
result = true;
}
}
@@ -325,13 +271,9 @@ impl Storage {
pub fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
let read_txn = self.db.begin_read()?;
let types_table = read_txn.open_table(TYPES_TABLE)?;
let key_type = {
let access_guard = types_table.get(key)?;
access_guard.map(|v| v.value().to_string())
};
match key_type.as_deref() {
Some("hash") => {
match types_table.get(key)? {
Some(type_val) if type_val.value() == "hash" => {
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
let mut result = Vec::new();
let mut current_cursor = 0u64;
@@ -370,8 +312,7 @@ impl Storage {
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
Ok((next_cursor, result))
}
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
None => Ok((0, Vec::new())),
_ => Ok((0, Vec::new())),
}
}
}

View File

@@ -1,6 +1,6 @@
use redb::{ReadableTable};
use crate::error::DBError;
use super::*;
use crate::{Storage, TYPES_TABLE, LISTS_TABLE};
impl Storage {
// ✅ ENCRYPTION APPLIED: Elements are encrypted before storage
@@ -25,7 +25,7 @@ impl Storage {
};
// Add elements to the front (left)
for element in elements.into_iter() {
for element in elements.into_iter().rev() {
list.insert(0, element);
}

View File

@@ -0,0 +1,18 @@
[package]
name = "supervisorrpc"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "supervisorrpc"
path = "src/main.rs"
[dependencies]
# Example dependencies for an RPC server
# axum = "0.7"
# jsonrpsee = { version = "0.22", features = ["server"] }
# openrpc-types = "0.7"
tokio = { workspace = true }
redis = { version = "0.24", features = ["tokio-comp"] }
herocrypto = { path = "../herocrypto" }

View File

@@ -0,0 +1,12 @@
// To be implemented:
// 1. Define an OpenRPC schema for supervisor functions (e.g., server status, key rotation).
// 2. Implement an HTTP/TCP server (e.g., using Axum or jsonrpsee) that serves the schema
// and handles RPC calls.
// 3. Implement support for Unix domain sockets in addition to TCP.
// 4. Use the `herocrypto` or `redis-rs` crate to interact with the main `herodb` instance.
#[tokio::main]
async fn main() {
println!("Supervisor RPC server starting... (not implemented)");
// Server setup code will go here.
}

View File

@@ -1,28 +0,0 @@
[package]
name = "herodb"
version = "0.0.1"
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
edition = "2021"
[dependencies]
anyhow = "1.0.59"
bytes = "1.3.0"
thiserror = "1.0.32"
tokio = { version = "1.23.0", features = ["full"] }
clap = { version = "4.5.20", features = ["derive"] }
byteorder = "1.4.3"
futures = "0.3"
redb = "2.1.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bincode = "1.3.3"
chacha20poly1305 = "0.10.1"
rand = "0.8"
sha2 = "0.10"
age = "0.10"
secrecy = "0.8"
ed25519-dalek = "2"
base64 = "0.22"
[dev-dependencies]
redis = { version = "0.24", features = ["aio", "tokio-comp"] }

View File

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -euo pipefail
export SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
echo "I am in $SCRIPT_DIR"
cd "$SCRIPT_DIR"
cargo build

View File

@@ -1,99 +0,0 @@
### Cargo.toml
```toml
[dependencies]
chacha20poly1305 = { version = "0.10", features = ["xchacha20"] }
rand = "0.8"
sha2 = "0.10"
```
### `crypto_factory.rs`
```rust
use chacha20poly1305::{
aead::{Aead, KeyInit, OsRng},
XChaCha20Poly1305, Key, XNonce,
};
use rand::RngCore;
use sha2::{Digest, Sha256};
const VERSION: u8 = 1;
const NONCE_LEN: usize = 24;
const TAG_LEN: usize = 16;
#[derive(Debug)]
pub enum CryptoError {
Format, // wrong length / header
Version(u8), // unknown version
Decrypt, // wrong key or corrupted data
}
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)
pub struct CryptoFactory {
key: Key<XChaCha20Poly1305>,
}
impl CryptoFactory {
/// Accepts any secret bytes; turns them into a 32-byte key (SHA-256).
/// (If your secret is already 32 bytes, this is still fine.)
pub fn new<S: AsRef<[u8]>>(secret: S) -> Self {
let mut h = Sha256::new();
h.update(b"xchacha20poly1305-factory:v1"); // domain separation
h.update(secret.as_ref());
let digest = h.finalize(); // 32 bytes
let key = Key::<XChaCha20Poly1305>::from_slice(&digest).to_owned();
Self { key }
}
/// Output layout: [version:1][nonce:24][ciphertext||tag]
pub fn encrypt(&self, plaintext: &[u8]) -> Vec<u8> {
let cipher = XChaCha20Poly1305::new(&self.key);
let mut nonce_bytes = [0u8; NONCE_LEN];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = XNonce::from_slice(&nonce_bytes);
let mut out = Vec::with_capacity(1 + NONCE_LEN + plaintext.len() + TAG_LEN);
out.push(VERSION);
out.extend_from_slice(&nonce_bytes);
let ct = cipher.encrypt(nonce, plaintext).expect("encrypt");
out.extend_from_slice(&ct);
out
}
pub fn decrypt(&self, blob: &[u8]) -> Result<Vec<u8>, CryptoError> {
if blob.len() < 1 + NONCE_LEN + TAG_LEN {
return Err(CryptoError::Format);
}
let ver = blob[0];
if ver != VERSION {
return Err(CryptoError::Version(ver));
}
let nonce = XNonce::from_slice(&blob[1..1 + NONCE_LEN]);
let ct = &blob[1 + NONCE_LEN..];
let cipher = XChaCha20Poly1305::new(&self.key);
cipher.decrypt(nonce, ct).map_err(|_| CryptoError::Decrypt)
}
}
```
### Tiny usage example
```rust
fn main() {
let f = CryptoFactory::new(b"super-secret-key-material");
let val = b"\x00\xFFbinary\x01\x02\x03";
let blob = f.encrypt(val);
let roundtrip = f.decrypt(&blob).unwrap();
assert_eq!(roundtrip, val);
}
```
Thats it: `new(secret)`, `encrypt(bytes)`, `decrypt(bytes)`.
You can stash the returned `blob` directly in your storage layer behind Redis.

View File

@@ -1,80 +0,0 @@
========================
CODE SNIPPETS
========================
TITLE: 1PC+C Commit Strategy Vulnerability Example
DESCRIPTION: Illustrates a scenario where a partially committed transaction might appear complete due to the non-cryptographic checksum (XXH3) used in the 1PC+C commit strategy. This requires controlling page flush order, introducing a crash during fsync, and ensuring valid checksums for partially written data.
SOURCE: https://github.com/cberner/redb/blob/master/docs/design.md#_snippet_9
LANGUAGE: rust
CODE:
```
table.insert(malicious_key, malicious_value);
table.insert(good_key, good_value);
txn.commit();
```
LANGUAGE: rust
CODE:
```
table.insert(malicious_key, malicious_value);
txn.commit();
```
----------------------------------------
TITLE: Basic Key-Value Operations in redb
DESCRIPTION: Demonstrates the fundamental usage of redb for creating a database, opening a table, inserting a key-value pair, and retrieving the value within separate read and write transactions.
SOURCE: https://github.com/cberner/redb/blob/master/README.md#_snippet_0
LANGUAGE: rust
CODE:
```
use redb::{Database, Error, ReadableTable, TableDefinition};
const TABLE: TableDefinition<&str, u64> = TableDefinition::new("my_data");
fn main() -> Result<(), Error> {
let db = Database::create("my_db.redb")?;
let write_txn = db.begin_write()?;
{
let mut table = write_txn.open_table(TABLE)?;
table.insert("my_key", &123)?;
}
write_txn.commit()?;
let read_txn = db.begin_read()?;
let table = read_txn.open_table(TABLE)?;
assert_eq!(table.get("my_key")?.unwrap().value(), 123);
Ok(())
}
```
## What *redb* currently supports:
* Simple operations like creating databases, inserting key-value pairs, opening and reading tables ([GitHub][1]).
* No mention of operations such as:
* Iterating over keys with a given prefix.
* Range queries based on string prefixes.
* Specialized prefixfiltered lookups.
## implement range scans as follows
You can implement prefix-like functionality using **range scans** combined with manual checks, similar to using a `BTreeSet` in Rust:
```rust
for key in table.range(prefix..).keys() {
if !key.starts_with(prefix) {
break;
}
// process key
}
```
This pattern iterates keys starting at the prefix, and stops once a key no longer matches the prefix—this works because the keys are sorted ([GitHub][1]).

View File

@@ -1,150 +0,0 @@
]
# INFO
**What it does**
Returns server stats in a human-readable text block, optionally filtered by sections. Typical sections: `server`, `clients`, `memory`, `persistence`, `stats`, `replication`, `cpu`, `commandstats`, `latencystats`, `cluster`, `modules`, `keyspace`, `errorstats`. Special args: `all`, `default`, `everything`. The reply is a **Bulk String** with `# <Section>` headers and `key:value` lines. ([Redis][1])
**Syntax**
```
INFO [section [section ...]]
```
**Return (RESP2/RESP3)**: Bulk String. ([Redis][1])
**RESP request/response**
```
# Request: whole default set
*1\r\n$4\r\nINFO\r\n
# Request: a specific section, e.g., clients
*2\r\n$4\r\nINFO\r\n$7\r\nclients\r\n
# Response (prefix shown; body is long)
$1234\r\n# Server\r\nredis_version:7.4.0\r\n...\r\n# Clients\r\nconnected_clients:3\r\n...\r\n
```
(Reply type/format per RESP spec and the INFO page.) ([Redis][2])
---
# Connection “name” (there is **no** top-level `NAME` command)
Redis doesnt have a standalone `NAME` command. Connection names are handled via `CLIENT SETNAME` and retrieved via `CLIENT GETNAME`. ([Redis][3])
## CLIENT SETNAME
Assigns a human label to the current connection (shown in `CLIENT LIST`, logs, etc.). No spaces allowed in the name; empty string clears it. Length is limited by Redis string limits (practically huge). **Reply**: Simple String `OK`. ([Redis][4])
**Syntax**
```
CLIENT SETNAME connection-name
```
**RESP**
```
# Set the name "myapp"
*3\r\n$6\r\nCLIENT\r\n$7\r\nSETNAME\r\n$5\r\nmyapp\r\n
# Reply
+OK\r\n
```
## CLIENT GETNAME
Returns the current connections name or **Null Bulk String** if unset. ([Redis][5])
**Syntax**
```
CLIENT GETNAME
```
**RESP**
```
# Before SETNAME:
*2\r\n$6\r\nCLIENT\r\n$7\r\nGETNAME\r\n
$-1\r\n # nil (no name)
# After SETNAME myapp:
*2\r\n$6\r\nCLIENT\r\n$7\r\nGETNAME\r\n
$5\r\nmyapp\r\n
```
(Null/Bulk String encoding per RESP spec.) ([Redis][2])
---
# CLIENT (container command + key subcommands)
`CLIENT` is a **container**; use subcommands like `CLIENT LIST`, `CLIENT INFO`, `CLIENT ID`, `CLIENT KILL`, `CLIENT TRACKING`, etc. Call `CLIENT HELP` to enumerate them. ([Redis][3])
## CLIENT LIST
Shows all connections as a single **Bulk String**: one line per client with `field=value` pairs (includes `id`, `addr`, `name`, `db`, `user`, `resp`, and more). Filters: `TYPE` and `ID`. **Return**: Bulk String (RESP2/RESP3). ([Redis][6])
**Syntax**
```
CLIENT LIST [TYPE <NORMAL|MASTER|REPLICA|PUBSUB>] [ID client-id ...]
```
**RESP**
```
*2\r\n$6\r\nCLIENT\r\n$4\r\nLIST\r\n
# Reply (single Bulk String; example with one line shown)
$188\r\nid=7 addr=127.0.0.1:60840 laddr=127.0.0.1:6379 fd=8 name=myapp age=12 idle=3 flags=N db=0 ...\r\n
```
## CLIENT INFO
Returns info for **this** connection only (same format/fields as a single line of `CLIENT LIST`). **Return**: Bulk String. Available since 6.2.0. ([Redis][7])
**Syntax**
```
CLIENT INFO
```
**RESP**
```
*2\r\n$6\r\nCLIENT\r\n$4\r\nINFO\r\n
$160\r\nid=7 addr=127.0.0.1:60840 laddr=127.0.0.1:6379 fd=8 name=myapp db=0 user=default resp=2 ...\r\n
```
---
# RESP notes youll need for your parser
* **Requests** are Arrays: `*N\r\n` followed by `N` Bulk Strings for verb/args.
* **Common replies here**: Simple String (`+OK\r\n`), Bulk String (`$<len>\r\n...\r\n`), and **Null Bulk String** (`$-1\r\n`). (These cover `INFO`, `CLIENT LIST/INFO`, `CLIENT GETNAME`, `CLIENT SETNAME`.) ([Redis][2])
---
## Sources (checked)
* INFO command (syntax, sections, behavior). ([Redis][1])
* RESP spec (request/response framing, Bulk/Null Bulk Strings). ([Redis][2])
* CLIENT container + subcommands index. ([Redis][3])
* CLIENT LIST (fields, bulk-string return, filters). ([Redis][6])
* CLIENT INFO (exists since 6.2, reply format). ([Redis][7])
* CLIENT SETNAME (no spaces; clears with empty string; huge length OK). ([Redis][4])
* CLIENT GETNAME (nil if unset). ([Redis][5])
If you want, I can fold this into a tiny Rust “command + RESP” test harness that exercises `INFO`, `CLIENT SETNAME/GETNAME`, `CLIENT LIST`, and `CLIENT INFO` against your in-mem RESP parser.
[1]: https://redis.io/docs/latest/commands/info/ "INFO | Docs"
[2]: https://redis.io/docs/latest/develop/reference/protocol-spec/?utm_source=chatgpt.com "Redis serialization protocol specification | Docs"
[3]: https://redis.io/docs/latest/commands/client/ "CLIENT | Docs"
[4]: https://redis.io/docs/latest/commands/client-setname/?utm_source=chatgpt.com "CLIENT SETNAME | Docs"
[5]: https://redis.io/docs/latest/commands/client-getname/?utm_source=chatgpt.com "CLIENT GETNAME | Docs"
[6]: https://redis.io/docs/latest/commands/client-list/ "CLIENT LIST | Docs"
[7]: https://redis.io/docs/latest/commands/client-info/?utm_source=chatgpt.com "CLIENT INFO | Docs"

View File

@@ -1,251 +0,0 @@
Got it 👍 — lets break this down properly.
Redis has two broad classes youre asking about:
1. **Basic key-space functions** (SET, GET, DEL, EXISTS, etc.)
2. **Iteration commands** (`SCAN`, `SSCAN`, `HSCAN`, `ZSCAN`)
And for each Ill show:
* What it does
* How it works at a high level
* Its **RESP protocol implementation** (the actual wire format).
---
# 1. Basic Key-Space Commands
### `SET key value`
* Stores a string value at a key.
* Overwrites if the key already exists.
**Protocol (RESP2):**
```
*3
$3
SET
$3
foo
$3
bar
```
(client sends: array of 3 bulk strings: `["SET", "foo", "bar"]`)
**Reply:**
```
+OK
```
---
### `GET key`
* Retrieves the string value stored at the key.
* Returns `nil` if key doesnt exist.
**Protocol:**
```
*2
$3
GET
$3
foo
```
**Reply:**
```
$3
bar
```
(or `$-1` for nil)
---
### `DEL key [key ...]`
* Removes one or more keys.
* Returns number of keys actually removed.
**Protocol:**
```
*2
$3
DEL
$3
foo
```
**Reply:**
```
:1
```
(integer reply = number of deleted keys)
---
### `EXISTS key [key ...]`
* Checks if one or more keys exist.
* Returns count of existing keys.
**Protocol:**
```
*2
$6
EXISTS
$3
foo
```
**Reply:**
```
:1
```
---
### `KEYS pattern`
* Returns all keys matching a glob-style pattern.
⚠️ Not efficient in production (O(N)), better to use `SCAN`.
**Protocol:**
```
*2
$4
KEYS
$1
*
```
**Reply:**
```
*2
$3
foo
$3
bar
```
(array of bulk strings with key names)
---
# 2. Iteration Commands (`SCAN` family)
### `SCAN cursor [MATCH pattern] [COUNT n]`
* Iterates the keyspace incrementally.
* Client keeps sending back the cursor from previous call until it returns `0`.
**Protocol example:**
```
*2
$4
SCAN
$1
0
```
**Reply:**
```
*2
$1
0
*2
$3
foo
$3
bar
```
Explanation:
* First element = new cursor (`"0"` means iteration finished).
* Second element = array of keys returned in this batch.
---
### `HSCAN key cursor [MATCH pattern] [COUNT n]`
* Like `SCAN`, but iterates fields of a hash.
**Protocol:**
```
*3
$5
HSCAN
$3
myh
$1
0
```
**Reply:**
```
*2
$1
0
*4
$5
field
$5
value
$5
age
$2
42
```
(Array of alternating field/value pairs)
---
### `SSCAN key cursor [MATCH pattern] [COUNT n]`
* Iterates members of a set.
Protocol and reply structure same as SCAN.
---
### `ZSCAN key cursor [MATCH pattern] [COUNT n]`
* Iterates members of a sorted set with scores.
* Returns alternating `member`, `score`.
---
# Quick Comparison
| Command | Purpose | Return Type |
| -------- | ----------------------------- | --------------------- |
| `SET` | Store a string value | Simple string `+OK` |
| `GET` | Retrieve a string value | Bulk string / nil |
| `DEL` | Delete keys | Integer (count) |
| `EXISTS` | Check existence | Integer (count) |
| `KEYS` | List all matching keys (slow) | Array of bulk strings |
| `SCAN` | Iterate over keys (safe) | `[cursor, array]` |
| `HSCAN` | Iterate over hash fields | `[cursor, array]` |
| `SSCAN` | Iterate over set members | `[cursor, array]` |
| `ZSCAN` | Iterate over sorted set | `[cursor, array]` |
##

View File

@@ -1,307 +0,0 @@
# 🔑 Redis `HSET` and Related Hash Commands
## 1. `HSET`
* **Purpose**: Set the value of one or more fields in a hash.
* **Syntax**:
```bash
HSET key field value [field value ...]
```
* **Return**:
* Integer: number of fields that were newly added.
* **RESP Protocol**:
```
*4
$4
HSET
$3
key
$5
field
$5
value
```
(If multiple field-value pairs: `*6`, `*8`, etc.)
---
## 2. `HSETNX`
* **Purpose**: Set the value of a hash field only if it does **not** exist.
* **Syntax**:
```bash
HSETNX key field value
```
* **Return**:
* `1` if field was set.
* `0` if field already exists.
* **RESP Protocol**:
```
*4
$6
HSETNX
$3
key
$5
field
$5
value
```
---
## 3. `HGET`
* **Purpose**: Get the value of a hash field.
* **Syntax**:
```bash
HGET key field
```
* **Return**:
* Bulk string (value) or `nil` if field does not exist.
* **RESP Protocol**:
```
*3
$4
HGET
$3
key
$5
field
```
---
## 4. `HGETALL`
* **Purpose**: Get all fields and values in a hash.
* **Syntax**:
```bash
HGETALL key
```
* **Return**:
* Array of `[field1, value1, field2, value2, ...]`.
* **RESP Protocol**:
```
*2
$7
HGETALL
$3
key
```
---
## 5. `HMSET` (⚠️ Deprecated, use `HSET`)
* **Purpose**: Set multiple field-value pairs.
* **Syntax**:
```bash
HMSET key field value [field value ...]
```
* **Return**:
* Always `OK`.
* **RESP Protocol**:
```
*6
$5
HMSET
$3
key
$5
field
$5
value
$5
field2
$5
value2
```
---
## 6. `HMGET`
* **Purpose**: Get values of multiple fields.
* **Syntax**:
```bash
HMGET key field [field ...]
```
* **Return**:
* Array of values (bulk strings or nils).
* **RESP Protocol**:
```
*4
$5
HMGET
$3
key
$5
field1
$5
field2
```
---
## 7. `HDEL`
* **Purpose**: Delete one or more fields from a hash.
* **Syntax**:
```bash
HDEL key field [field ...]
```
* **Return**:
* Integer: number of fields removed.
* **RESP Protocol**:
```
*3
$4
HDEL
$3
key
$5
field
```
---
## 8. `HEXISTS`
* **Purpose**: Check if a field exists.
* **Syntax**:
```bash
HEXISTS key field
```
* **Return**:
* `1` if exists, `0` if not.
* **RESP Protocol**:
```
*3
$7
HEXISTS
$3
key
$5
field
```
---
## 9. `HKEYS`
* **Purpose**: Get all field names in a hash.
* **Syntax**:
```bash
HKEYS key
```
* **Return**:
* Array of field names.
* **RESP Protocol**:
```
*2
$5
HKEYS
$3
key
```
---
## 10. `HVALS`
* **Purpose**: Get all values in a hash.
* **Syntax**:
```bash
HVALS key
```
* **Return**:
* Array of values.
* **RESP Protocol**:
```
*2
$5
HVALS
$3
key
```
---
## 11. `HLEN`
* **Purpose**: Get number of fields in a hash.
* **Syntax**:
```bash
HLEN key
```
* **Return**:
* Integer: number of fields.
* **RESP Protocol**:
```
*2
$4
HLEN
$3
key
```
## 12. `HSCAN`
* **Purpose**: Iterate fields/values of a hash (cursor-based scan).
* **Syntax**:
```bash
HSCAN key cursor [MATCH pattern] [COUNT count]
```
* **Return**:
* Array: `[new-cursor, [field1, value1, ...]]`
* **RESP Protocol**:
```
*3
$5
HSCAN
$3
key
$1
0
```

View File

@@ -1,259 +0,0 @@
# 1) Data model & basics
* A **queue** is a List at key `queue:<name>`.
* Common patterns:
* **Producer**: `LPUSH queue item` (or `RPUSH`)
* **Consumer (non-blocking)**: `RPOP queue` (or `LPOP`)
* **Consumer (blocking)**: `BRPOP queue timeout` (or `BLPOP`)
* If a key doesnt exist, its treated as an **empty list**; push **creates** the list; when the **last element is popped, the key is deleted**. ([Redis][1])
---
# 2) Commands to implement (queues via Lists)
## LPUSH / RPUSH
Prepend/append one or more elements. Create the list if it doesnt exist.
**Return**: Integer = new length of the list.
**Syntax**
```
LPUSH key element [element ...]
RPUSH key element [element ...]
```
**RESP (example)**
```
*3\r\n$5\r\nLPUSH\r\n$5\r\nqueue\r\n$5\r\njob-1\r\n
:1\r\n
```
Refs: semantics & multi-arg ordering. ([Redis][1])
### LPUSHX / RPUSHX (optional but useful)
Like LPUSH/RPUSH, **but only if the list exists**.
**Return**: Integer = new length (0 if key didnt exist).
```
LPUSHX key element [element ...]
RPUSHX key element [element ...]
```
Refs: command index. ([Redis][2])
---
## LPOP / RPOP
Remove & return one (default) or **up to COUNT** elements since Redis 6.2.
If the list is empty or missing, **Null** is returned (Null Bulk or Null Array if COUNT>1).
**Return**:
* No COUNT: Bulk String or Null Bulk.
* With COUNT: Array of Bulk Strings (possibly empty) or Null Array if key missing.
**Syntax**
```
LPOP key [count]
RPOP key [count]
```
**RESP (no COUNT)**
```
*2\r\n$4\r\nRPOP\r\n$5\r\nqueue\r\n
$5\r\njob-1\r\n # or $-1\r\n if empty
```
**RESP (COUNT=2)**
```
*3\r\n$4\r\nLPOP\r\n$5\r\nqueue\r\n$1\r\n2\r\n
*2\r\n$5\r\njob-2\r\n$5\r\njob-3\r\n # or *-1\r\n if key missing
```
Refs: LPOP w/ COUNT; general pop semantics. ([Redis][3])
---
## BLPOP / BRPOP (blocking consumers)
Block until an element is available in any of the given lists or until `timeout` (seconds, **double**, `0` = forever).
**Return** on success: **Array \[key, element]**.
**Return** on timeout: **Null Array**.
**Syntax**
```
BLPOP key [key ...] timeout
BRPOP key [key ...] timeout
```
**RESP**
```
*3\r\n$5\r\nBRPOP\r\n$5\r\nqueue\r\n$1\r\n0\r\n # block forever
# Success reply
*2\r\n$5\r\nqueue\r\n$5\r\njob-4\r\n
# Timeout reply
*-1\r\n
```
**Implementation notes**
* If any listed key is non-empty at call time, reply **immediately** from the first non-empty key **by the commands key order**.
* Otherwise, put the client into a **blocked state** (register per-key waiters). On any `LPUSH/RPUSH` to those keys, **wake the earliest waiter** and serve it atomically.
* If timeout expires, return **Null Array** and clear the blocked state.
Refs: timeout semantics and return shape. ([Redis][4])
---
## LMOVE / BLMOVE (atomic move; replaces RPOPLPUSH/BRPOPLPUSH)
Atomically **pop from one side** of `source` and **push to one side** of `destination`.
* Use for **reliable queues** (move to a *processing* list).
* `BLMOVE` blocks like `BLPOP` when `source` is empty.
**Syntax**
```
LMOVE source destination LEFT|RIGHT LEFT|RIGHT
BLMOVE source destination LEFT|RIGHT LEFT|RIGHT timeout
```
**Return**: Bulk String element moved, or Null if `source` empty (LMOVE); `BLMOVE` blocks/Null on timeout.
**RESP (LMOVE RIGHT->LEFT)**
```
*5\r\n$5\r\nLMOVE\r\n$6\r\nsource\r\n$3\r\ndst\r\n$5\r\nRIGHT\r\n$4\r\nLEFT\r\n
$5\r\njob-5\r\n
```
**Notes**
* Prefer `LMOVE/BLMOVE` over deprecated `RPOPLPUSH/BRPOPLPUSH`.
* Pattern: consumer `LMOVE queue processing RIGHT LEFT` → work → `LREM processing 1 <elem>` to ACK; a reaper can requeue stale items.
Refs: LMOVE/BLMOVE behavior and reliable-queue pattern; deprecation of RPOPLPUSH. ([Redis][5])
*(Compat: you can still implement `RPOPLPUSH source dest` and `BRPOPLPUSH source dest timeout`, but mark them deprecated and map to LMOVE/BLMOVE.)* ([Redis][6])
---
## LLEN (length)
Useful for metrics/backpressure.
```
LLEN key
```
**RESP**
```
*2\r\n$4\r\nLLEN\r\n$5\r\nqueue\r\n
:3\r\n
```
Refs: list overview mentioning LLEN. ([Redis][7])
---
## LREM (ack for “reliable” processing)
Remove occurrences of `element` from the list (head→tail scan).
Use `count=1` to ACK a single processed item from `processing`.
```
LREM key count element
```
**RESP**
```
*4\r\n$4\r\nLREM\r\n$9\r\nprocessing\r\n$1\r\n1\r\n$5\r\njob-5\r\n
:1\r\n
```
Refs: reliable pattern mentions LREM to ACK. ([Redis][5])
---
## LTRIM (bounded queues / retention)
Keep only `[start, stop]` range; everything else is dropped.
Use to cap queue length after pushes.
```
LTRIM key start stop
```
**RESP**
```
*4\r\n$5\r\nLTRIM\r\n$5\r\nqueue\r\n$2\r\n0\r\n$3\r\n999\r\n
+OK\r\n
```
Refs: list overview includes LTRIM for retention. ([Redis][7])
---
## LRANGE / LINDEX (debugging / peeking)
* `LRANGE key start stop` → Array of elements (non-destructive).
* `LINDEX key index` → one element or Null.
These arent required for queue semantics, but handy. ([Redis][7])
---
# 3) Errors & types
* Wrong type: `-WRONGTYPE Operation against a key holding the wrong kind of value\r\n`
* Non-existing key:
* Push: creates the list (returns new length).
* Pop (non-blocking): returns **Null**.
* Blocking pop: **Null Array** on timeout. ([Redis][1])
---
# 4) Blocking engine (implementation sketch)
1. **Call time**: scan keys in user order. If a non-empty list is found, pop & reply immediately.
2. **Otherwise**: register the client as **blocked** on those keys with `deadline = now + timeout` (or infinite).
3. **On push to any key**: if waiters exist, **wake one** (FIFO) and serve its pop **atomically** with the push result.
4. **On timer**: for each blocked client whose deadline passed, reply `Null Array` and clear state.
5. **Connection close**: remove from any wait queues.
Refs for timeout/block semantics. ([Redis][4])
---
# 5) Reliable queue pattern (recommended)
* **Consume**: `LMOVE queue processing RIGHT LEFT` (or `BLMOVE ... 0`).
* **Process** the job.
* **ACK**: `LREM processing 1 <job>` when done.
* **Reaper**: auxiliary task that detects stale jobs (e.g., track job IDs + timestamps in a ZSET) and requeues them. (Lists dont include timestamps; pairing with a ZSET is standard practice.)
Refs: LMOVE docs pattern. ([Redis][5])
---
# 6) Minimal test matrix
* Push/pop happy path (both ends), with/without COUNT.
* Blocking pop: immediate availability, block + timeout, wake on push, multiple keys order, FIFO across multiple waiters.
* LMOVE/BLMOVE: RIGHT→LEFT pipeline, block + wake, cross-list atomicity, ACK via LREM.
* Type errors and key deletion on last pop.

View File

@@ -1,250 +0,0 @@
use core::str;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::sync::{Mutex, oneshot};
use std::sync::atomic::{AtomicU64, Ordering};
use crate::cmd::Cmd;
use crate::error::DBError;
use crate::options;
use crate::protocol::Protocol;
use crate::storage::Storage;
#[derive(Clone)]
pub struct Server {
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
pub option: options::DBOption,
pub client_name: Option<String>,
pub selected_db: u64, // Changed from usize to u64
pub queued_cmd: Option<Vec<(Cmd, Protocol)>>,
// BLPOP waiter registry: per (db_index, key) FIFO of waiters
pub list_waiters: Arc<Mutex<HashMap<u64, HashMap<String, Vec<Waiter>>>>>,
pub waiter_seq: Arc<AtomicU64>,
}
pub struct Waiter {
pub id: u64,
pub side: PopSide,
pub tx: oneshot::Sender<(String, String)>, // (key, element)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PopSide {
Left,
Right,
}
impl Server {
pub async fn new(option: options::DBOption) -> Self {
Server {
db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())),
option,
client_name: None,
selected_db: 0,
queued_cmd: None,
list_waiters: Arc::new(Mutex::new(HashMap::new())),
waiter_seq: Arc::new(AtomicU64::new(1)),
}
}
pub fn current_storage(&self) -> Result<Arc<Storage>, DBError> {
let mut cache = self.db_cache.write().unwrap();
if let Some(storage) = cache.get(&self.selected_db) {
return Ok(storage.clone());
}
// Create new database file
let db_file_path = std::path::PathBuf::from(self.option.dir.clone())
.join(format!("{}.db", self.selected_db));
// Ensure the directory exists before creating the database file
if let Some(parent_dir) = db_file_path.parent() {
std::fs::create_dir_all(parent_dir).map_err(|e| {
DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e))
})?;
}
println!("Creating new db file: {}", db_file_path.display());
let storage = Arc::new(Storage::new(
db_file_path,
self.should_encrypt_db(self.selected_db),
self.option.encryption_key.as_deref()
)?);
cache.insert(self.selected_db, storage.clone());
Ok(storage)
}
fn should_encrypt_db(&self, db_index: u64) -> bool {
// DB 0-9 are non-encrypted, DB 10+ are encrypted
self.option.encrypt && db_index >= 10
}
// ----- BLPOP waiter helpers -----
pub async fn register_waiter(&self, db_index: u64, key: &str, side: PopSide) -> (u64, oneshot::Receiver<(String, String)>) {
let id = self.waiter_seq.fetch_add(1, Ordering::Relaxed);
let (tx, rx) = oneshot::channel::<(String, String)>();
let mut guard = self.list_waiters.lock().await;
let per_db = guard.entry(db_index).or_insert_with(HashMap::new);
let q = per_db.entry(key.to_string()).or_insert_with(Vec::new);
q.push(Waiter { id, side, tx });
(id, rx)
}
pub async fn unregister_waiter(&self, db_index: u64, key: &str, id: u64) {
let mut guard = self.list_waiters.lock().await;
if let Some(per_db) = guard.get_mut(&db_index) {
if let Some(q) = per_db.get_mut(key) {
q.retain(|w| w.id != id);
if q.is_empty() {
per_db.remove(key);
}
}
if per_db.is_empty() {
guard.remove(&db_index);
}
}
}
// Called after LPUSH/RPUSH to deliver to blocked BLPOP waiters.
pub async fn drain_waiters_after_push(&self, key: &str) -> Result<(), DBError> {
let db_index = self.selected_db;
loop {
// Check if any waiter exists
let maybe_waiter = {
let mut guard = self.list_waiters.lock().await;
if let Some(per_db) = guard.get_mut(&db_index) {
if let Some(q) = per_db.get_mut(key) {
if !q.is_empty() {
// Pop FIFO
Some(q.remove(0))
} else {
None
}
} else {
None
}
} else {
None
}
};
let waiter = if let Some(w) = maybe_waiter { w } else { break };
// Pop one element depending on waiter side
let elems = match waiter.side {
PopSide::Left => self.current_storage()?.lpop(key, 1)?,
PopSide::Right => self.current_storage()?.rpop(key, 1)?,
};
if elems.is_empty() {
// Nothing to deliver; re-register waiter at the front to preserve order
let mut guard = self.list_waiters.lock().await;
let per_db = guard.entry(db_index).or_insert_with(HashMap::new);
let q = per_db.entry(key.to_string()).or_insert_with(Vec::new);
q.insert(0, waiter);
break;
} else {
let elem = elems[0].clone();
// Send to waiter; if receiver dropped, just continue
let _ = waiter.tx.send((key.to_string(), elem));
// Loop to try to satisfy more waiters if more elements remain
continue;
}
}
Ok(())
}
pub async fn handle(
&mut self,
mut stream: tokio::net::TcpStream,
) -> Result<(), DBError> {
// Accumulate incoming bytes to handle partial RESP frames
let mut acc = String::new();
let mut buf = vec![0u8; 8192];
loop {
let n = match stream.read(&mut buf).await {
Ok(0) => {
println!("[handle] connection closed");
return Ok(());
}
Ok(n) => n,
Err(e) => {
println!("[handle] read error: {:?}", e);
return Err(e.into());
}
};
// Append to accumulator. RESP for our usage is ASCII-safe.
acc.push_str(str::from_utf8(&buf[..n])?);
// Try to parse as many complete commands as are available in 'acc'.
loop {
let parsed = Cmd::from(&acc);
let (cmd, protocol, remaining) = match parsed {
Ok((cmd, protocol, remaining)) => (cmd, protocol, remaining),
Err(_e) => {
// Incomplete or invalid frame; assume incomplete and wait for more data.
// This avoids emitting spurious protocol_error for split frames.
break;
}
};
// Advance the accumulator to the unparsed remainder
acc = remaining.to_string();
if self.option.debug {
println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol);
} else {
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
}
// Check if this is a QUIT command before processing
let is_quit = matches!(cmd, Cmd::Quit);
let res = match cmd.run(self).await {
Ok(p) => p,
Err(e) => {
if self.option.debug {
eprintln!("[run error] {:?}", e);
}
Protocol::err(&format!("ERR {}", e.0))
}
};
if self.option.debug {
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", self.queued_cmd);
println!("\x1b[32;1mgoing to send response {}\x1b[0m", res.encode());
} else {
print!("queued cmd {:?}", self.queued_cmd);
println!("going to send response {}", res.encode());
}
_ = stream.write(res.encode().as_bytes()).await?;
// If this was a QUIT command, close the connection
if is_quit {
println!("[handle] QUIT command received, closing connection");
return Ok(());
}
// Continue parsing any further complete commands already in 'acc'
if acc.is_empty() {
break;
}
}
}
}
}

View File

@@ -1,892 +0,0 @@
use herodb::{options::DBOption, server::Server};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::time::{sleep, Duration};
// =========================
// Helpers
// =========================
async fn start_test_server(test_name: &str) -> (Server, u16) {
use std::sync::atomic::{AtomicU16, Ordering};
static PORT_COUNTER: AtomicU16 = AtomicU16::new(17100);
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
let test_dir = format!("/tmp/herodb_usage_suite_{}", test_name);
let _ = std::fs::remove_dir_all(&test_dir);
std::fs::create_dir_all(&test_dir).unwrap();
let option = DBOption {
dir: test_dir,
port,
debug: false,
encrypt: false,
encryption_key: None,
};
let server = Server::new(option).await;
(server, port)
}
async fn spawn_listener(server: Server, port: u16) {
tokio::spawn(async move {
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
.await
.expect("bind listener");
loop {
match listener.accept().await {
Ok((stream, _)) => {
let mut s_clone = server.clone();
tokio::spawn(async move {
let _ = s_clone.handle(stream).await;
});
}
Err(_e) => break,
}
}
});
}
/// Build RESP array for args ["PING"] -> "*1\r\n$4\r\nPING\r\n"
fn build_resp(args: &[&str]) -> String {
let mut s = format!("*{}\r\n", args.len());
for a in args {
s.push_str(&format!("${}\r\n{}\r\n", a.len(), a));
}
s
}
async fn connect(port: u16) -> TcpStream {
let mut attempts = 0;
loop {
match TcpStream::connect(format!("127.0.0.1:{}", port)).await {
Ok(s) => return s,
Err(_) if attempts < 30 => {
attempts += 1;
sleep(Duration::from_millis(100)).await;
}
Err(e) => panic!("Failed to connect: {}", e),
}
}
}
fn find_crlf(buf: &[u8], start: usize) -> Option<usize> {
let mut i = start;
while i + 1 < buf.len() {
if buf[i] == b'\r' && buf[i + 1] == b'\n' {
return Some(i);
}
i += 1;
}
None
}
fn parse_number_i64(buf: &[u8], start: usize, end: usize) -> Option<i64> {
let s = std::str::from_utf8(&buf[start..end]).ok()?;
s.parse::<i64>().ok()
}
// Return number of bytes that make up a complete RESP element starting at 'i', or None if incomplete.
fn parse_elem(buf: &[u8], i: usize) -> Option<usize> {
if i >= buf.len() {
return None;
}
match buf[i] {
b'+' | b'-' | b':' => {
let end = find_crlf(buf, i + 1)?;
Some(end + 2 - i)
}
b'$' => {
let hdr_end = find_crlf(buf, i + 1)?;
let n = parse_number_i64(buf, i + 1, hdr_end)?;
if n < 0 {
// Null bulk string: only header
Some(hdr_end + 2 - i)
} else {
let need = hdr_end + 2 + (n as usize) + 2;
if need <= buf.len() {
Some(need - i)
} else {
None
}
}
}
b'*' => {
let hdr_end = find_crlf(buf, i + 1)?;
let n = parse_number_i64(buf, i + 1, hdr_end)?;
if n < 0 {
// Null array: only header
Some(hdr_end + 2 - i)
} else {
let mut j = hdr_end + 2;
for _ in 0..(n as usize) {
let consumed = parse_elem(buf, j)?;
j += consumed;
}
Some(j - i)
}
}
_ => None,
}
}
fn resp_frame_len(buf: &[u8]) -> Option<usize> {
parse_elem(buf, 0)
}
async fn read_full_resp(stream: &mut TcpStream) -> String {
let mut buf: Vec<u8> = Vec::with_capacity(8192);
let mut tmp = vec![0u8; 4096];
loop {
if let Some(total) = resp_frame_len(&buf) {
if buf.len() >= total {
return String::from_utf8_lossy(&buf[..total]).to_string();
}
}
match tokio::time::timeout(Duration::from_secs(2), stream.read(&mut tmp)).await {
Ok(Ok(n)) => {
if n == 0 {
if let Some(total) = resp_frame_len(&buf) {
if buf.len() >= total {
return String::from_utf8_lossy(&buf[..total]).to_string();
}
}
return String::from_utf8_lossy(&buf).to_string();
}
buf.extend_from_slice(&tmp[..n]);
}
Ok(Err(e)) => panic!("read error: {}", e),
Err(_) => panic!("timeout waiting for reply"),
}
if buf.len() > 8 * 1024 * 1024 {
panic!("reply too large");
}
}
}
async fn send_cmd(stream: &mut TcpStream, args: &[&str]) -> String {
let req = build_resp(args);
stream.write_all(req.as_bytes()).await.unwrap();
read_full_resp(stream).await
}
// Assert helpers with clearer output
fn assert_contains(haystack: &str, needle: &str, ctx: &str) {
assert!(
haystack.contains(needle),
"ASSERT CONTAINS failed: '{}' not found in response.\nContext: {}\nResponse:\n{}",
needle,
ctx,
haystack
);
}
fn assert_eq_resp(actual: &str, expected: &str, ctx: &str) {
assert!(
actual == expected,
"ASSERT EQUAL failed.\nContext: {}\nExpected:\n{:?}\nActual:\n{:?}",
ctx,
expected,
actual
);
}
/// Extract the payload of a single RESP Bulk String reply.
/// Example input:
/// "$5\r\nhello\r\n" -> Some("hello".to_string())
fn extract_bulk_payload(resp: &str) -> Option<String> {
// find first CRLF after "$len"
let first = resp.find("\r\n")?;
let after = &resp[(first + 2)..];
// find next CRLF ending payload
let second = after.find("\r\n")?;
Some(after[..second].to_string())
}
// =========================
// Test suites
// =========================
#[tokio::test]
async fn test_01_connection_and_info() {
let (server, port) = start_test_server("conn_info").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// redis-cli may send COMMAND DOCS, our server replies empty array; harmless.
let pong = send_cmd(&mut s, &["PING"]).await;
assert_contains(&pong, "PONG", "PING should return PONG");
let echo = send_cmd(&mut s, &["ECHO", "hello"]).await;
assert_contains(&echo, "hello", "ECHO hello");
// INFO (general)
let info = send_cmd(&mut s, &["INFO"]).await;
assert_contains(&info, "redis_version", "INFO should include redis_version");
// INFO REPLICATION (static stub)
let repl = send_cmd(&mut s, &["INFO", "replication"]).await;
assert_contains(&repl, "role:master", "INFO replication role");
// CONFIG GET subset
let cfg = send_cmd(&mut s, &["CONFIG", "GET", "databases"]).await;
assert_contains(&cfg, "databases", "CONFIG GET databases");
assert_contains(&cfg, "16", "CONFIG GET databases value");
// CLIENT name
let setname = send_cmd(&mut s, &["CLIENT", "SETNAME", "myapp"]).await;
assert_contains(&setname, "OK", "CLIENT SETNAME");
let getname = send_cmd(&mut s, &["CLIENT", "GETNAME"]).await;
assert_contains(&getname, "myapp", "CLIENT GETNAME");
// SELECT db
let sel = send_cmd(&mut s, &["SELECT", "0"]).await;
assert_contains(&sel, "OK", "SELECT 0");
// QUIT should close connection after sending OK
let quit = send_cmd(&mut s, &["QUIT"]).await;
assert_contains(&quit, "OK", "QUIT should return OK");
}
#[tokio::test]
async fn test_02_strings_and_expiry() {
let (server, port) = start_test_server("strings").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// SET / GET
let set = send_cmd(&mut s, &["SET", "user:1", "alice"]).await;
assert_contains(&set, "OK", "SET user:1 alice");
let get = send_cmd(&mut s, &["GET", "user:1"]).await;
assert_contains(&get, "alice", "GET user:1");
// EXISTS / DEL
let ex1 = send_cmd(&mut s, &["EXISTS", "user:1"]).await;
assert_contains(&ex1, "1", "EXISTS user:1");
let del = send_cmd(&mut s, &["DEL", "user:1"]).await;
assert_contains(&del, "1", "DEL user:1");
let ex0 = send_cmd(&mut s, &["EXISTS", "user:1"]).await;
assert_contains(&ex0, "0", "EXISTS after DEL");
// INCR behavior
let i1 = send_cmd(&mut s, &["INCR", "count"]).await;
assert_contains(&i1, "1", "INCR new key -> 1");
let i2 = send_cmd(&mut s, &["INCR", "count"]).await;
assert_contains(&i2, "2", "INCR existing -> 2");
let _ = send_cmd(&mut s, &["SET", "notnum", "abc"]).await;
let ierr = send_cmd(&mut s, &["INCR", "notnum"]).await;
assert_contains(&ierr, "ERR", "INCR on non-numeric should ERR");
// Expiration via SET EX
let setex = send_cmd(&mut s, &["SET", "tmp:1", "boom", "EX", "1"]).await;
assert_contains(&setex, "OK", "SET tmp:1 EX 1");
let g_immediate = send_cmd(&mut s, &["GET", "tmp:1"]).await;
assert_contains(&g_immediate, "boom", "GET tmp:1 immediately");
let ttl = send_cmd(&mut s, &["TTL", "tmp:1"]).await;
// Implementation returns a SimpleString, accept any numeric content
assert!(
ttl.contains("1") || ttl.contains("0"),
"TTL should be 1 or 0, got: {}",
ttl
);
sleep(Duration::from_millis(1100)).await;
let g_after = send_cmd(&mut s, &["GET", "tmp:1"]).await;
assert_contains(&g_after, "$-1", "GET tmp:1 after expiry -> Null");
// TYPE
let _ = send_cmd(&mut s, &["SET", "t", "v"]).await;
let ty = send_cmd(&mut s, &["TYPE", "t"]).await;
assert_contains(&ty, "string", "TYPE string key");
let ty_none = send_cmd(&mut s, &["TYPE", "noexist"]).await;
assert_contains(&ty_none, "none", "TYPE nonexistent");
}
#[tokio::test]
async fn test_03_scan_and_keys() {
let (server, port) = start_test_server("scan").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
for i in 0..5 {
let _ = send_cmd(&mut s, &["SET", &format!("key{}", i), &format!("value{}", i)]).await;
}
let scan = send_cmd(&mut s, &["SCAN", "0", "MATCH", "key*", "COUNT", "10"]).await;
assert_contains(&scan, "key0", "SCAN should return keys with MATCH");
assert_contains(&scan, "key4", "SCAN should return last key");
let keys = send_cmd(&mut s, &["KEYS", "*"]).await;
assert_contains(&keys, "key0", "KEYS * includes key0");
assert_contains(&keys, "key4", "KEYS * includes key4");
}
#[tokio::test]
async fn test_04_hashes_suite() {
let (server, port) = start_test_server("hashes").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// HSET (single, returns number of new fields)
let h1 = send_cmd(&mut s, &["HSET", "profile:1", "name", "alice"]).await;
assert_contains(&h1, "1", "HSET new field -> 1");
// HGET
let hg = send_cmd(&mut s, &["HGET", "profile:1", "name"]).await;
assert_contains(&hg, "alice", "HGET existing field");
// HSET multiple
let h2 = send_cmd(&mut s, &["HSET", "profile:1", "age", "30", "city", "paris"]).await;
assert_contains(&h2, "2", "HSET added 2 new fields");
// HMGET
let hmg = send_cmd(&mut s, &["HMGET", "profile:1", "name", "age", "city", "nope"]).await;
assert_contains(&hmg, "alice", "HMGET name");
assert_contains(&hmg, "30", "HMGET age");
assert_contains(&hmg, "paris", "HMGET city");
assert_contains(&hmg, "$-1", "HMGET non-existent -> Null");
// HGETALL
let hga = send_cmd(&mut s, &["HGETALL", "profile:1"]).await;
assert_contains(&hga, "name", "HGETALL contains name");
assert_contains(&hga, "alice", "HGETALL contains alice");
// HLEN
let hlen = send_cmd(&mut s, &["HLEN", "profile:1"]).await;
assert_contains(&hlen, "3", "HLEN is 3");
// HEXISTS
let hex1 = send_cmd(&mut s, &["HEXISTS", "profile:1", "age"]).await;
assert_contains(&hex1, "1", "HEXISTS age true");
let hex0 = send_cmd(&mut s, &["HEXISTS", "profile:1", "nope"]).await;
assert_contains(&hex0, "0", "HEXISTS nope false");
// HKEYS / HVALS
let hkeys = send_cmd(&mut s, &["HKEYS", "profile:1"]).await;
assert_contains(&hkeys, "name", "HKEYS includes name");
let hvals = send_cmd(&mut s, &["HVALS", "profile:1"]).await;
assert_contains(&hvals, "alice", "HVALS includes alice");
// HSETNX
let hnx0 = send_cmd(&mut s, &["HSETNX", "profile:1", "name", "bob"]).await;
assert_contains(&hnx0, "0", "HSETNX existing field -> 0");
let hnx1 = send_cmd(&mut s, &["HSETNX", "profile:1", "nickname", "ali"]).await;
assert_contains(&hnx1, "1", "HSETNX new field -> 1");
// HSCAN
let hscan = send_cmd(&mut s, &["HSCAN", "profile:1", "0", "MATCH", "n*", "COUNT", "10"]).await;
assert_contains(&hscan, "name", "HSCAN matches fields starting with n");
assert_contains(&hscan, "nickname", "HSCAN nickname present");
// HDEL
let hdel = send_cmd(&mut s, &["HDEL", "profile:1", "city", "age"]).await;
assert_contains(&hdel, "2", "HDEL removed two fields");
}
#[tokio::test]
async fn test_05_lists_suite_including_blpop() {
let (server, port) = start_test_server("lists").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut a = connect(port).await;
// LPUSH / RPUSH / LLEN
let lp = send_cmd(&mut a, &["LPUSH", "q:jobs", "a", "b"]).await;
assert_contains(&lp, "2", "LPUSH added 2, length 2");
let rp = send_cmd(&mut a, &["RPUSH", "q:jobs", "c"]).await;
assert_contains(&rp, "3", "RPUSH now length 3");
let llen = send_cmd(&mut a, &["LLEN", "q:jobs"]).await;
assert_contains(&llen, "3", "LLEN 3");
// LINDEX / LRANGE
let lidx = send_cmd(&mut a, &["LINDEX", "q:jobs", "0"]).await;
assert_eq_resp(&lidx, "$1\r\nb\r\n", "LINDEX q:jobs 0 should be b");
let lr = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await;
assert_eq_resp(&lr, "*3\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n", "LRANGE q:jobs 0 -1 should be [b,a,c]");
// LTRIM
let ltrim = send_cmd(&mut a, &["LTRIM", "q:jobs", "0", "1"]).await;
assert_contains(&ltrim, "OK", "LTRIM OK");
let lr_post = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await;
assert_eq_resp(&lr_post, "*2\r\n$1\r\nb\r\n$1\r\na\r\n", "After LTRIM, list [b,a]");
// LREM remove first occurrence of b
let lrem = send_cmd(&mut a, &["LREM", "q:jobs", "1", "b"]).await;
assert_contains(&lrem, "1", "LREM removed 1");
// LPOP and RPOP
let lpop1 = send_cmd(&mut a, &["LPOP", "q:jobs"]).await;
assert_contains(&lpop1, "$1\r\na\r\n", "LPOP returns a");
let rpop_empty = send_cmd(&mut a, &["RPOP", "q:jobs"]).await; // empty now
assert_contains(&rpop_empty, "$-1", "RPOP on empty -> Null");
// LPOP with count on empty -> []
let lpop0 = send_cmd(&mut a, &["LPOP", "q:jobs", "2"]).await;
assert_eq_resp(&lpop0, "*0\r\n", "LPOP with count on empty returns empty array");
// BLPOP: block on one client, push from another
let c1 = connect(port).await;
let mut c2 = connect(port).await;
// Start BLPOP on c1
let blpop_task = tokio::spawn(async move {
let mut c1_local = c1;
send_cmd(&mut c1_local, &["BLPOP", "q:block", "5"]).await
});
// Give it time to register waiter
sleep(Duration::from_millis(150)).await;
// Push from c2 to wake BLPOP
let _ = send_cmd(&mut c2, &["LPUSH", "q:block", "x"]).await;
// Await BLPOP result
let blpop_res = blpop_task.await.expect("BLPOP task join");
assert_contains(&blpop_res, "q:block", "BLPOP returned key");
assert_contains(&blpop_res, "x", "BLPOP returned element");
}
#[tokio::test]
async fn test_06_flushdb_suite() {
let (server, port) = start_test_server("flushdb").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
let _ = send_cmd(&mut s, &["SET", "k1", "v1"]).await;
let _ = send_cmd(&mut s, &["HSET", "h1", "f", "v"]).await;
let _ = send_cmd(&mut s, &["LPUSH", "l1", "a"]).await;
let keys_before = send_cmd(&mut s, &["KEYS", "*"]).await;
assert_contains(&keys_before, "k1", "have string key before FLUSHDB");
assert_contains(&keys_before, "h1", "have hash key before FLUSHDB");
assert_contains(&keys_before, "l1", "have list key before FLUSHDB");
let fl = send_cmd(&mut s, &["FLUSHDB"]).await;
assert_contains(&fl, "OK", "FLUSHDB OK");
let keys_after = send_cmd(&mut s, &["KEYS", "*"]).await;
assert_eq_resp(&keys_after, "*0\r\n", "DB should be empty after FLUSHDB");
}
#[tokio::test]
async fn test_07_age_stateless_suite() {
let (server, port) = start_test_server("age_stateless").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// GENENC -> [recipient, identity]
let gen = send_cmd(&mut s, &["AGE", "GENENC"]).await;
assert!(
gen.starts_with("*2\r\n$"),
"AGE GENENC should return array [recipient, identity], got:\n{}",
gen
);
// Parse simple RESP array of two bulk strings to extract keys
fn parse_two_bulk_array(resp: &str) -> (String, String) {
// naive parse for tests
let mut lines = resp.lines();
let _ = lines.next(); // *2
// $len
let _ = lines.next();
let recip = lines.next().unwrap_or("").to_string();
let _ = lines.next();
let ident = lines.next().unwrap_or("").to_string();
(recip, ident)
}
let (recipient, identity) = parse_two_bulk_array(&gen);
assert!(
recipient.starts_with("age1") && identity.starts_with("AGE-SECRET-KEY-1"),
"Unexpected AGE key formats.\nrecipient: {}\nidentity: {}",
recipient,
identity
);
// ENCRYPT / DECRYPT
let ct = send_cmd(&mut s, &["AGE", "ENCRYPT", &recipient, "hello world"]).await;
let ct_b64 = extract_bulk_payload(&ct).expect("Failed to parse bulk payload from ENCRYPT");
let pt = send_cmd(&mut s, &["AGE", "DECRYPT", &identity, &ct_b64]).await;
assert_contains(&pt, "hello world", "AGE DECRYPT round-trip");
// GENSIGN -> [verify_pub_b64, sign_secret_b64]
let gensign = send_cmd(&mut s, &["AGE", "GENSIGN"]).await;
let (verify_pub, sign_secret) = parse_two_bulk_array(&gensign);
assert!(
!verify_pub.is_empty() && !sign_secret.is_empty(),
"GENSIGN returned empty keys"
);
// SIGN / VERIFY
let sig = send_cmd(&mut s, &["AGE", "SIGN", &sign_secret, "msg"]).await;
let sig_b64 = extract_bulk_payload(&sig).expect("Failed to parse bulk payload from SIGN");
let v_ok = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "msg", &sig_b64]).await;
assert_contains(&v_ok, "1", "VERIFY should be 1 for valid signature");
let v_bad = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "tampered", &sig_b64]).await;
assert_contains(&v_bad, "0", "VERIFY should be 0 for invalid message/signature");
}
#[tokio::test]
async fn test_08_age_persistent_named_suite() {
let (server, port) = start_test_server("age_persistent").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// KEYGEN + ENCRYPTNAME/DECRYPTNAME
let kg = send_cmd(&mut s, &["AGE", "KEYGEN", "app1"]).await;
assert!(
kg.starts_with("*2\r\n"),
"AGE KEYGEN should return [recipient, identity], got:\n{}",
kg
);
let ct = send_cmd(&mut s, &["AGE", "ENCRYPTNAME", "app1", "hello"]).await;
let ct_b64 = extract_bulk_payload(&ct).expect("Failed to parse bulk payload from ENCRYPTNAME");
let pt = send_cmd(&mut s, &["AGE", "DECRYPTNAME", "app1", &ct_b64]).await;
assert_contains(&pt, "hello", "DECRYPTNAME round-trip");
// SIGNKEYGEN + SIGNNAME/VERIFYNAME
let skg = send_cmd(&mut s, &["AGE", "SIGNKEYGEN", "app1"]).await;
assert!(
skg.starts_with("*2\r\n"),
"AGE SIGNKEYGEN should return [verify_pub, sign_secret], got:\n{}",
skg
);
let sig = send_cmd(&mut s, &["AGE", "SIGNNAME", "app1", "m"] ).await;
let sig_b64 = extract_bulk_payload(&sig).expect("Failed to parse bulk payload from SIGNNAME");
let v1 = send_cmd(&mut s, &["AGE", "VERIFYNAME", "app1", "m", &sig_b64]).await;
assert_contains(&v1, "1", "VERIFYNAME valid => 1");
let v0 = send_cmd(&mut s, &["AGE", "VERIFYNAME", "app1", "bad", &sig_b64]).await;
assert_contains(&v0, "0", "VERIFYNAME invalid => 0");
// AGE LIST
let lst = send_cmd(&mut s, &["AGE", "LIST"]).await;
assert_contains(&lst, "encpub", "AGE LIST label encpub");
assert_contains(&lst, "app1", "AGE LIST includes app1");
}
#[tokio::test]
async fn test_10_expire_pexpire_persist() {
let (server, port) = start_test_server("expire_suite").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// EXPIRE: seconds
let _ = send_cmd(&mut s, &["SET", "exp:s", "v"]).await;
let ex = send_cmd(&mut s, &["EXPIRE", "exp:s", "1"]).await;
assert_contains(&ex, "1", "EXPIRE exp:s 1 -> 1 (applied)");
let ttl1 = send_cmd(&mut s, &["TTL", "exp:s"]).await;
assert!(
ttl1.contains("1") || ttl1.contains("0"),
"TTL exp:s should be 1 or 0, got: {}",
ttl1
);
sleep(Duration::from_millis(1100)).await;
let get_after = send_cmd(&mut s, &["GET", "exp:s"]).await;
assert_contains(&get_after, "$-1", "GET after expiry should be Null");
let ttl_after = send_cmd(&mut s, &["TTL", "exp:s"]).await;
assert_contains(&ttl_after, "-2", "TTL after expiry -> -2");
let exists_after = send_cmd(&mut s, &["EXISTS", "exp:s"]).await;
assert_contains(&exists_after, "0", "EXISTS after expiry -> 0");
// PEXPIRE: milliseconds
let _ = send_cmd(&mut s, &["SET", "exp:ms", "v"]).await;
let pex = send_cmd(&mut s, &["PEXPIRE", "exp:ms", "1500"]).await;
assert_contains(&pex, "1", "PEXPIRE exp:ms 1500 -> 1 (applied)");
let ttl_ms1 = send_cmd(&mut s, &["TTL", "exp:ms"]).await;
assert!(
ttl_ms1.contains("1") || ttl_ms1.contains("0"),
"TTL exp:ms should be 1 or 0 soon after PEXPIRE, got: {}",
ttl_ms1
);
sleep(Duration::from_millis(1600)).await;
let exists_ms_after = send_cmd(&mut s, &["EXISTS", "exp:ms"]).await;
assert_contains(&exists_ms_after, "0", "EXISTS exp:ms after ms expiry -> 0");
// PERSIST: remove expiration
let _ = send_cmd(&mut s, &["SET", "exp:persist", "v"]).await;
let _ = send_cmd(&mut s, &["EXPIRE", "exp:persist", "5"]).await;
let ttl_pre = send_cmd(&mut s, &["TTL", "exp:persist"]).await;
assert!(
ttl_pre.contains("5") || ttl_pre.contains("4") || ttl_pre.contains("3") || ttl_pre.contains("2") || ttl_pre.contains("1") || ttl_pre.contains("0"),
"TTL exp:persist should be >=0 before persist, got: {}",
ttl_pre
);
let persist1 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await;
assert_contains(&persist1, "1", "PERSIST should remove expiration");
let ttl_post = send_cmd(&mut s, &["TTL", "exp:persist"]).await;
assert_contains(&ttl_post, "-1", "TTL after PERSIST -> -1 (no expiration)");
// Second persist should return 0 (nothing to remove)
let persist2 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await;
assert_contains(&persist2, "0", "PERSIST again -> 0 (no expiration to remove)");
}
#[tokio::test]
async fn test_11_set_with_options() {
let (server, port) = start_test_server("set_opts").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// SET with GET on non-existing key -> returns Null, sets value
let set_get1 = send_cmd(&mut s, &["SET", "s1", "v1", "GET"]).await;
assert_contains(&set_get1, "$-1", "SET s1 v1 GET returns Null when key didn't exist");
let g1 = send_cmd(&mut s, &["GET", "s1"]).await;
assert_contains(&g1, "v1", "GET s1 after first SET");
// SET with GET should return old value, then set to new
let set_get2 = send_cmd(&mut s, &["SET", "s1", "v2", "GET"]).await;
assert_contains(&set_get2, "v1", "SET s1 v2 GET returns previous value v1");
let g2 = send_cmd(&mut s, &["GET", "s1"]).await;
assert_contains(&g2, "v2", "GET s1 now v2");
// NX prevents update when key exists; with GET should return Null and not change
let set_nx = send_cmd(&mut s, &["SET", "s1", "v3", "NX", "GET"]).await;
assert_contains(&set_nx, "$-1", "SET s1 v3 NX GET returns Null when not set");
let g3 = send_cmd(&mut s, &["GET", "s1"]).await;
assert_contains(&g3, "v2", "GET s1 remains v2 after NX prevented write");
// NX allows set when key does not exist
let set_nx2 = send_cmd(&mut s, &["SET", "s2", "v10", "NX"]).await;
assert_contains(&set_nx2, "OK", "SET s2 v10 NX -> OK for new key");
let g4 = send_cmd(&mut s, &["GET", "s2"]).await;
assert_contains(&g4, "v10", "GET s2 is v10");
// XX requires existing key; with GET returns old value and sets new
let set_xx = send_cmd(&mut s, &["SET", "s2", "v11", "XX", "GET"]).await;
assert_contains(&set_xx, "v10", "SET s2 v11 XX GET returns previous v10");
let g5 = send_cmd(&mut s, &["GET", "s2"]).await;
assert_contains(&g5, "v11", "GET s2 is now v11");
// PX expiration path via SET options
let set_px = send_cmd(&mut s, &["SET", "s3", "vpx", "PX", "500"]).await;
assert_contains(&set_px, "OK", "SET s3 vpx PX 500 -> OK");
let ttl_px1 = send_cmd(&mut s, &["TTL", "s3"]).await;
assert!(
ttl_px1.contains("0") || ttl_px1.contains("1"),
"TTL s3 immediately after PX should be 1 or 0, got: {}",
ttl_px1
);
sleep(Duration::from_millis(650)).await;
let g6 = send_cmd(&mut s, &["GET", "s3"]).await;
assert_contains(&g6, "$-1", "GET s3 after PX expiry -> Null");
}
#[tokio::test]
async fn test_09_mget_mset_and_variadic_exists_del() {
let (server, port) = start_test_server("mget_mset_variadic").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// MSET multiple keys
let mset = send_cmd(&mut s, &["MSET", "k1", "v1", "k2", "v2", "k3", "v3"]).await;
assert_contains(&mset, "OK", "MSET k1 v1 k2 v2 k3 v3 -> OK");
// MGET should return values and Null for missing
let mget = send_cmd(&mut s, &["MGET", "k1", "k2", "nope", "k3"]).await;
// Expect an array with 4 entries; verify payloads
assert_contains(&mget, "v1", "MGET k1");
assert_contains(&mget, "v2", "MGET k2");
assert_contains(&mget, "v3", "MGET k3");
assert_contains(&mget, "$-1", "MGET missing returns Null");
// EXISTS variadic: count how many exist
let exists_multi = send_cmd(&mut s, &["EXISTS", "k1", "nope", "k3"]).await;
// Server returns SimpleString numeric, e.g. +2
assert_contains(&exists_multi, "2", "EXISTS k1 nope k3 -> 2");
// DEL variadic: delete multiple keys, return count deleted
let del_multi = send_cmd(&mut s, &["DEL", "k1", "k3", "nope"]).await;
assert_contains(&del_multi, "2", "DEL k1 k3 nope -> 2");
// Verify deletion
let exists_after = send_cmd(&mut s, &["EXISTS", "k1", "k3"]).await;
assert_contains(&exists_after, "0", "EXISTS k1 k3 after DEL -> 0");
// MGET after deletion should include Nulls for deleted keys
let mget_after = send_cmd(&mut s, &["MGET", "k1", "k2", "k3"]).await;
assert_contains(&mget_after, "$-1", "MGET k1 after DEL -> Null");
assert_contains(&mget_after, "v2", "MGET k2 remains");
assert_contains(&mget_after, "$-1", "MGET k3 after DEL -> Null");
}
#[tokio::test]
async fn test_12_hash_incr() {
let (server, port) = start_test_server("hash_incr").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// Integer increments
let _ = send_cmd(&mut s, &["HSET", "hinc", "a", "1"]).await;
let r1 = send_cmd(&mut s, &["HINCRBY", "hinc", "a", "2"]).await;
assert_contains(&r1, "3", "HINCRBY hinc a 2 -&gt; 3");
let r2 = send_cmd(&mut s, &["HINCRBY", "hinc", "a", "-1"]).await;
assert_contains(&r2, "2", "HINCRBY hinc a -1 -&gt; 2");
let r3 = send_cmd(&mut s, &["HINCRBY", "hinc", "b", "5"]).await;
assert_contains(&r3, "5", "HINCRBY hinc b 5 -&gt; 5");
// HINCRBY error on non-integer field
let _ = send_cmd(&mut s, &["HSET", "hinc", "s", "x"]).await;
let r_err = send_cmd(&mut s, &["HINCRBY", "hinc", "s", "1"]).await;
assert_contains(&r_err, "ERR", "HINCRBY on non-integer field should ERR");
// Float increments
let r4 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "f", "1.5"]).await;
assert_contains(&r4, "1.5", "HINCRBYFLOAT hinc f 1.5 -&gt; 1.5");
let r5 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "f", "2.5"]).await;
// Could be "4", "4.0", or "4.000000", accept "4" substring
assert_contains(&r5, "4", "HINCRBYFLOAT hinc f 2.5 -&gt; 4");
// HINCRBYFLOAT error on non-float field
let _ = send_cmd(&mut s, &["HSET", "hinc", "notf", "abc"]).await;
let r6 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "notf", "1"]).await;
assert_contains(&r6, "ERR", "HINCRBYFLOAT on non-float field should ERR");
}
#[tokio::test]
async fn test_05b_brpop_suite() {
let (server, port) = start_test_server("lists_brpop").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut a = connect(port).await;
// RPUSH some initial data, BRPOP should take from the right
let _ = send_cmd(&mut a, &["RPUSH", "q:rjobs", "1", "2"]).await;
let br_nonblock = send_cmd(&mut a, &["BRPOP", "q:rjobs", "0"]).await;
// Should pop the rightmost element "2"
assert_contains(&br_nonblock, "q:rjobs", "BRPOP returns key");
assert_contains(&br_nonblock, "2", "BRPOP returns rightmost element");
// Now test blocking BRPOP: start blocked client, then RPUSH from another client
let c1 = connect(port).await;
let mut c2 = connect(port).await;
// Start BRPOP on c1
let brpop_task = tokio::spawn(async move {
let mut c1_local = c1;
send_cmd(&mut c1_local, &["BRPOP", "q:blockr", "5"]).await
});
// Give it time to register waiter
sleep(Duration::from_millis(150)).await;
// Push from right to wake BRPOP
let _ = send_cmd(&mut c2, &["RPUSH", "q:blockr", "X"]).await;
// Await BRPOP result
let brpop_res = brpop_task.await.expect("BRPOP task join");
assert_contains(&brpop_res, "q:blockr", "BRPOP returned key");
assert_contains(&brpop_res, "X", "BRPOP returned element");
}
#[tokio::test]
async fn test_13_dbsize() {
let (server, port) = start_test_server("dbsize").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// Initially empty
let n0 = send_cmd(&mut s, &["DBSIZE"]).await;
assert_contains(&n0, "0", "DBSIZE initial should be 0");
// Add a string, a hash, and a list -> dbsize = 3
let _ = send_cmd(&mut s, &["SET", "s", "v"]).await;
let _ = send_cmd(&mut s, &["HSET", "h", "f", "v"]).await;
let _ = send_cmd(&mut s, &["LPUSH", "l", "a", "b"]).await;
let n3 = send_cmd(&mut s, &["DBSIZE"]).await;
assert_contains(&n3, "3", "DBSIZE after adding s,h,l should be 3");
// Expire the string and wait, dbsize should drop to 2
let _ = send_cmd(&mut s, &["PEXPIRE", "s", "400"]).await;
sleep(Duration::from_millis(500)).await;
let n2 = send_cmd(&mut s, &["DBSIZE"]).await;
assert_contains(&n2, "2", "DBSIZE after string expiry should be 2");
// Delete remaining keys and confirm 0
let _ = send_cmd(&mut s, &["DEL", "h"]).await;
let _ = send_cmd(&mut s, &["DEL", "l"]).await;
let n_final = send_cmd(&mut s, &["DBSIZE"]).await;
assert_contains(&n_final, "0", "DBSIZE after deleting all keys should be 0");
}
#[tokio::test]
async fn test_14_expireat_pexpireat() {
use std::time::{SystemTime, UNIX_EPOCH};
let (server, port) = start_test_server("expireat_suite").await;
spawn_listener(server, port).await;
sleep(Duration::from_millis(150)).await;
let mut s = connect(port).await;
// EXPIREAT: seconds since epoch
let now_secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as i64;
let _ = send_cmd(&mut s, &["SET", "exp:at:s", "v"]).await;
let exat = send_cmd(&mut s, &["EXPIREAT", "exp:at:s", &format!("{}", now_secs + 1)]).await;
assert_contains(&exat, "1", "EXPIREAT exp:at:s now+1s -> 1 (applied)");
let ttl1 = send_cmd(&mut s, &["TTL", "exp:at:s"]).await;
assert!(
ttl1.contains("1") || ttl1.contains("0"),
"TTL exp:at:s should be 1 or 0 shortly after EXPIREAT, got: {}",
ttl1
);
sleep(Duration::from_millis(1200)).await;
let exists_after_exat = send_cmd(&mut s, &["EXISTS", "exp:at:s"]).await;
assert_contains(&exists_after_exat, "0", "EXISTS exp:at:s after EXPIREAT expiry -> 0");
// PEXPIREAT: milliseconds since epoch
let now_ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as i64;
let _ = send_cmd(&mut s, &["SET", "exp:at:ms", "v"]).await;
let pexat = send_cmd(&mut s, &["PEXPIREAT", "exp:at:ms", &format!("{}", now_ms + 450)]).await;
assert_contains(&pexat, "1", "PEXPIREAT exp:at:ms now+450ms -> 1 (applied)");
let ttl2 = send_cmd(&mut s, &["TTL", "exp:at:ms"]).await;
assert!(
ttl2.contains("0") || ttl2.contains("1"),
"TTL exp:at:ms should be 0..1 soon after PEXPIREAT, got: {}",
ttl2
);
sleep(Duration::from_millis(600)).await;
let exists_after_pexat = send_cmd(&mut s, &["EXISTS", "exp:at:ms"]).await;
assert_contains(&exists_after_pexat, "0", "EXISTS exp:at:ms after PEXPIREAT expiry -> 0");
}

View File

@@ -12,6 +12,7 @@ echo ""
echo "2⃣ Running Comprehensive Redis Integration Tests (13 tests)..."
echo "----------------------------------------------------------------"
cargo test -p herodb --test redis_integration_tests -- --nocapture
cargo test -p herodb --test redis_basic_client -- --nocapture
cargo test -p herodb --test debug_hset -- --nocapture
cargo test -p herodb --test debug_hset_simple -- --nocapture