Compare commits
35 Commits
d3e28cafe4
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
58cb1e8d5e | ||
d3d92819cf | |||
4fd48f8b0d | |||
4bedf71c2d | |||
b9987a027b | |||
|
3b9756a4e1 | ||
f22a25f5a1 | |||
|
892e6e2b90 | ||
|
b9a9f3e6d6 | ||
|
463000c8f7 | ||
|
a92c90e9cb | ||
|
34808fc1c9 | ||
|
b644bf873f | ||
|
a306544a34 | ||
|
afa1033cd6 | ||
9177fa4091 | |||
51ab90c4ad | |||
30a09e6d53 | |||
542996a0ff | |||
63ab39b4b1 | |||
ee94d731d7 | |||
c7945624bd | |||
f8dd304820 | |||
5eab3b080c | |||
246304b9fa | |||
074be114c3 | |||
e51af83e45 | |||
dbd0635cd9 | |||
0000d82799 | |||
5502ff4bc5 | |||
0511dddd99 | |||
bec9b20ec7 | |||
ad255a9f51 | |||
7bcb673361 | |||
0f6e595000 |
1693
Cargo.lock
generated
1693
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
25
Cargo.toml
25
Cargo.toml
@@ -1,18 +1,9 @@
|
||||
[package]
|
||||
name = "redis-rs"
|
||||
version = "0.0.1"
|
||||
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
bincode = "1.3.3"
|
||||
[workspace]
|
||||
members = ["herodb"]
|
||||
resolver = "2"
|
||||
|
||||
# You can define shared profiles for all workspace members here
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
267
README.md
267
README.md
@@ -1,267 +0,0 @@
|
||||
# Build Your Own Redis in Rust
|
||||
|
||||
This project is to build a toy Redis-Server clone that's capable of parsing Redis protocol and handling basic Redis commands, parsing and initializing Redis from RDB file,
|
||||
supporting leader-follower replication, redis streams (queue), redis batch commands in transaction.
|
||||
|
||||
You can find all the source code and commit history in [my github repo](https://github.com/fangpin/redis-rs).
|
||||
|
||||
## Main features
|
||||
+ Parse Redis protocol
|
||||
+ Handle basic Redis commands
|
||||
+ Parse and initialize Redis from RDB file
|
||||
+ Leader-follower Replication
|
||||
|
||||
## Prerequisites
|
||||
install `redis-cli` first (an implementation of redis client for test purpose)
|
||||
```sh
|
||||
cargo install mini-redis
|
||||
```
|
||||
|
||||
Learn about:
|
||||
- [Redis protocoal](https://redis.io/docs/latest/develop/reference/protocol-spec)
|
||||
- [RDB file format](https://rdb.fnordig.de/file_format.html)
|
||||
- [Redis replication](https://redis.io/docs/management/replication/)
|
||||
|
||||
## Start the Redis-rs server
|
||||
```sh
|
||||
# start as master
|
||||
cargo run -- --dir /some/db/path --dbfilename dump.rdb
|
||||
# start as slave
|
||||
cargo run -- --dir /some/db/path --dbfilename dump.rdb --port 6380 --replicaof "localhost 6379"
|
||||
```
|
||||
|
||||
|
||||
## Supported Commands
|
||||
```sh
|
||||
# basic commands
|
||||
redis-cli PING
|
||||
redis-cli ECHO hey
|
||||
redis-cli SET foo bar
|
||||
redis-cli SET foo bar px/ex 100
|
||||
redis-cli GET foo
|
||||
redis-cli SET foo 2
|
||||
redis-cli INCR foo
|
||||
redis-cli INCR missing_key
|
||||
redis-cli TYPE some_key
|
||||
redis-cli KEYS "*"
|
||||
|
||||
# leader-follower replication related commands
|
||||
redis-cli CONFIG GET dbfilename
|
||||
redis-cli INFO replication
|
||||
|
||||
# streams related commands
|
||||
redis-cli XADD stream_key 1526919030474-0 temperature 36 humidity 95
|
||||
redis-cli XADD stream_key 1526919030474-* temperature 37 humidity 94
|
||||
redis-cli XADD stream_key "*" foo bar
|
||||
## read stream
|
||||
redis-cli XRANGE stream_key 0-2 0-3
|
||||
## query with + -
|
||||
redis-cli XRANGE some_key - 1526985054079
|
||||
## query single stream using xread
|
||||
redis-cli XREAD streams some_key 1526985054069-0
|
||||
## query multiple stream using xread
|
||||
redis-cli XREAD streams stream_key other_stream_key 0-0 0-1
|
||||
## blocking reads without timeout
|
||||
redis-cli XREAD block 0 streams some_key 1526985054069-0
|
||||
|
||||
|
||||
# transactions related commands
|
||||
## start a transaction and exec all queued commands in a transaction
|
||||
redis-cli
|
||||
> MULTI
|
||||
> set foo 1
|
||||
> incr foo
|
||||
> exec
|
||||
## start a transaction and queued commands and cancel transaction then
|
||||
redis-cli
|
||||
> MULTI
|
||||
> set foo 1
|
||||
> incr foo
|
||||
> discard
|
||||
|
||||
```
|
||||
|
||||
## RDB Persistence
|
||||
Get Redis-rs server config
|
||||
```sh
|
||||
redis-cli CONFIG GET dbfilename
|
||||
```
|
||||
### RDB file format overview
|
||||
Here are the different sections of the [RDB file](https://rdb.fnordig.de/file_format.html), in order:
|
||||
+ Header section
|
||||
+ Metadata section
|
||||
+ Database section
|
||||
+ End of file section
|
||||
#### Header section
|
||||
start with some magic number
|
||||
```sh
|
||||
52 45 44 49 53 30 30 31 31 // Magic string + version number (ASCII): "REDIS0011".
|
||||
```
|
||||
#### Metadata section
|
||||
contains zero or more "metadata subsections", which each specify a single metadata attribute
|
||||
e.g.
|
||||
```sh
|
||||
FA // Indicates the start of a metadata subsection.
|
||||
09 72 65 64 69 73 2D 76 65 72 // The name of the metadata attribute (string encoded): "redis-ver".
|
||||
06 36 2E 30 2E 31 36 // The value of the metadata attribute (string encoded): "6.0.16".
|
||||
```
|
||||
#### Database section
|
||||
contains zero or more "database subsections," which each describe a single database.
|
||||
e.g.
|
||||
```sh
|
||||
FE // Indicates the start of a database subsection.
|
||||
00 /* The index of the database (size encoded). Here, the index is 0. */
|
||||
|
||||
FB // Indicates that hash table size information follows.
|
||||
03 /* The size of the hash table that stores the keys and values (size encoded). Here, the total key-value hash table size is 3. */
|
||||
02 /* The size of the hash table that stores the expires of the keys (size encoded). Here, the number of keys with an expiry is 2. */
|
||||
```
|
||||
|
||||
```sh
|
||||
00 /* The 1-byte flag that specifies the value’s type and encoding. Here, the flag is 0, which means "string." */
|
||||
06 66 6F 6F 62 61 72 // The name of the key (string encoded). Here, it's "foobar".
|
||||
06 62 61 7A 71 75 78 // The value (string encoded). Here, it's "bazqux".
|
||||
```
|
||||
|
||||
```sh
|
||||
FC /* Indicates that this key ("foo") has an expire, and that the expire timestamp is expressed in milliseconds. */
|
||||
15 72 E7 07 8F 01 00 00 /* The expire timestamp, expressed in Unix time, stored as an 8-byte unsigned long, in little-endian (read right-to-left). Here, the expire timestamp is 1713824559637. */
|
||||
00 // Value type is string.
|
||||
03 66 6F 6F // Key name is "foo".
|
||||
03 62 61 72 // Value is "bar".
|
||||
```
|
||||
|
||||
```sh
|
||||
FD /* Indicates that this key ("baz") has an expire, and that the expire timestamp is expressed in seconds. */
|
||||
52 ED 2A 66 /* The expire timestamp, expressed in Unix time, stored as an 4-byte unsigned integer, in little-endian (read right-to-left). Here, the expire timestamp is 1714089298. */
|
||||
00 // Value type is string.
|
||||
03 62 61 7A // Key name is "baz".
|
||||
03 71 75 78 // Value is "qux".
|
||||
```
|
||||
|
||||
In summary,
|
||||
- Optional expire information (one of the following):
|
||||
- Timestamp in seconds:
|
||||
- FD
|
||||
- Expire timestamp in seconds (4-byte unsigned integer)
|
||||
- Timestamp in milliseconds:
|
||||
- FC
|
||||
- Expire timestamp in milliseconds (8-byte unsigned long)
|
||||
- Value type (1-byte flag)
|
||||
- Key (string encoded)
|
||||
- Value (encoding depends on value type)
|
||||
|
||||
#### End of file section
|
||||
```sh
|
||||
FF /* Indicates that the file is ending, and that the checksum follows. */
|
||||
89 3b b7 4e f8 0f 77 19 // An 8-byte CRC64 checksum of the entire file.
|
||||
```
|
||||
|
||||
#### Size encoding
|
||||
```sh
|
||||
/* If the first two bits are 0b00:
|
||||
The size is the remaining 6 bits of the byte.
|
||||
In this example, the size is 10: */
|
||||
0A
|
||||
00001010
|
||||
|
||||
/* If the first two bits are 0b01:
|
||||
The size is the next 14 bits
|
||||
(remaining 6 bits in the first byte, combined with the next byte),
|
||||
in big-endian (read left-to-right).
|
||||
In this example, the size is 700: */
|
||||
42 BC
|
||||
01000010 10111100
|
||||
|
||||
/* If the first two bits are 0b10:
|
||||
Ignore the remaining 6 bits of the first byte.
|
||||
The size is the next 4 bytes, in big-endian (read left-to-right).
|
||||
In this example, the size is 17000: */
|
||||
80 00 00 42 68
|
||||
10000000 00000000 00000000 01000010 01101000
|
||||
|
||||
/* If the first two bits are 0b11:
|
||||
The remaining 6 bits specify a type of string encoding.
|
||||
See string encoding section. */
|
||||
```
|
||||
|
||||
#### String encoding
|
||||
+ The size of the string (size encoded).
|
||||
+ The string.
|
||||
```sh
|
||||
/* The 0x0D size specifies that the string is 13 characters long. The remaining characters spell out "Hello, World!". */
|
||||
0D 48 65 6C 6C 6F 2C 20 57 6F 72 6C 64 21
|
||||
```
|
||||
For sizes that begin with 0b11, the remaining 6 bits indicate a type of string format:
|
||||
```sh
|
||||
/* The 0xC0 size indicates the string is an 8-bit integer. In this example, the string is "123". */
|
||||
C0 7B
|
||||
|
||||
/* The 0xC1 size indicates the string is a 16-bit integer. The remaining bytes are in little-endian (read right-to-left). In this example, the string is "12345". */
|
||||
C1 39 30
|
||||
|
||||
/* The 0xC2 size indicates the string is a 32-bit integer. The remaining bytes are in little-endian (read right-to-left), In this example, the string is "1234567". */
|
||||
C2 87 D6 12 00
|
||||
|
||||
/* The 0xC3 size indicates that the string is compressed with the LZF algorithm. You will not encounter LZF-compressed strings in this challenge. */
|
||||
C3 ...
|
||||
```
|
||||
|
||||
## Replication
|
||||
Redis server [leader-follower replication](https://redis.io/docs/management/replication/).
|
||||
Run multiple Redis servers with one acting as the "master" and the others as "replicas". Changes made to the master will be automatically replicated to replicas.
|
||||
|
||||
### Send Handshake (follower -> master)
|
||||
1. When the follower starts, it will send a PING command to the master as RESP Array.
|
||||
2. Then 2 REPLCONF (replication config) commands are sent to master from follower to communicate the port and the sync protocol. One is *REPLCONF listening-port <PORT>* and the other is *REPLCONF capa psync2*. psnync2 is an example sync protocol supported in this project.
|
||||
3. The follower sends the *PSYNC* command to master with replication id and offset to start the replication process.
|
||||
|
||||
### Receive Handshake (master -> follower)
|
||||
1. Response a PONG message to follower.
|
||||
2. Response an OK message to follower for both REPLCONF commands.
|
||||
3. Response a *+FULLRESYNC <REPL_ID> 0\r\n* to follower with the replication id and offset.
|
||||
|
||||
### RDB file transfer
|
||||
When the follower starts, it sends a *PSYNC ? -1* command to tell master that it doesn't have any data yet, and needs a full resync.
|
||||
|
||||
Then the master send a *FULLRESYNC* response to the follower as an acknowledgement.
|
||||
|
||||
Finally, the master send the RDB file to represent its current state to the follower. The follower should load the RDB file received to the memory, replacing its current state.
|
||||
|
||||
### Receive write commands (master -> follower)
|
||||
The master sends following write commands to the follower with the offset info.
|
||||
The sending is to reuse the same TCP connection of handshake and RDB file transfer.
|
||||
As the all the commands are encoded as RESP Array just like a normal client command, so the follower could reuse the same logic to handler the replicate commands from master. The only difference is the commands are coming from the master and no need response back.
|
||||
|
||||
|
||||
## Streams
|
||||
A stream is identified by a key, and it contains multiple entries.
|
||||
Each entry consists of one or more key-value pairs, and is assigned a unique ID.
|
||||
[More about redis streams](https://redis.io/docs/latest/develop/data-types/streams/)
|
||||
[Radix tree](https://en.wikipedia.org/wiki/Radix_tree)
|
||||
|
||||
|
||||
It looks like a list of key-value pairs.
|
||||
```sh
|
||||
entries:
|
||||
- id: 1526985054069-0 # (ID of the first entry)
|
||||
temperature: 36 # (A key value pair in the first entry)
|
||||
humidity: 95 # (Another key value pair in the first entry)
|
||||
|
||||
- id: 1526985054079-0 # (ID of the second entry)
|
||||
temperature: 37 # (A key value pair in the first entry)
|
||||
humidity: 94 # (Another key value pair in the first entry)
|
||||
|
||||
# ... (and so on)
|
||||
|
||||
```
|
||||
|
||||
Examples of Redis stream use cases include:
|
||||
- Event sourcing (e.g., tracking user actions, clicks, etc.)
|
||||
- Sensor monitoring (e.g., readings from devices in the field)
|
||||
- Notifications (e.g., storing a record of each user's notifications in a separate stream)
|
||||
|
||||
## Transaction
|
||||
When *MULTI* command is called in a connection, redis just queued all following commands until *EXEC* or *DISCARD* command is called.
|
||||
*EXEC* command will execute all queued commands and return an array representation of all execution result (including), instead the *DISCARD* command just clear all queued commands.
|
||||
The transactions among each client connection are independent.
|
64
_config.yml
64
_config.yml
@@ -1,64 +0,0 @@
|
||||
title: 从 0 到 1 由 Rust 构建 Redis
|
||||
description: 从 0 到 1 由 Rust 构建 Redis
|
||||
theme: just-the-docs
|
||||
|
||||
url: https://fangpin.github.io/redis-rs
|
||||
|
||||
aux_links:
|
||||
GitHub: https://fangpin.github.io/redis-rs
|
||||
|
||||
# logo: "/assets/images/just-the-docs.png"
|
||||
|
||||
search_enabled: true
|
||||
search:
|
||||
# Split pages into sections that can be searched individually
|
||||
# Supports 1 - 6, default: 2
|
||||
heading_level: 2
|
||||
# Maximum amount of previews per search result
|
||||
# Default: 3
|
||||
previews: 3
|
||||
# Maximum amount of words to display before a matched word in the preview
|
||||
# Default: 5
|
||||
preview_words_before: 5
|
||||
# Maximum amount of words to display after a matched word in the preview
|
||||
# Default: 10
|
||||
preview_words_after: 10
|
||||
# Set the search token separator
|
||||
# Default: /[\s\-/]+/
|
||||
# Example: enable support for hyphenated search words
|
||||
tokenizer_separator: /[\s/]+/
|
||||
# Display the relative url in search results
|
||||
# Supports true (default) or false
|
||||
rel_url: true
|
||||
# Enable or disable the search button that appears in the bottom right corner of every page
|
||||
# Supports true or false (default)
|
||||
button: false
|
||||
|
||||
|
||||
# Heading anchor links appear on hover over h1-h6 tags in page content
|
||||
# allowing users to deep link to a particular heading on a page.
|
||||
#
|
||||
# Supports true (default) or false
|
||||
heading_anchors: true
|
||||
|
||||
|
||||
# Footer content
|
||||
# appears at the bottom of every page's main content
|
||||
# Note: The footer_content option is deprecated and will be removed in a future major release. Please use `_includes/footer_custom.html` for more robust markup / liquid-based content.
|
||||
footer_content: "Copyright © 2017-2024 Pin Fang"
|
||||
|
||||
# Footer last edited timestamp
|
||||
last_edit_timestamp: true # show or hide edit time - page must have `last_modified_date` defined in the frontmatter
|
||||
last_edit_time_format: "%b %e %Y at %I:%M %p" # uses ruby's time format: https://ruby-doc.org/stdlib-2.7.0/libdoc/time/rdoc/Time.html
|
||||
|
||||
|
||||
|
||||
# code
|
||||
compress_html:
|
||||
ignore:
|
||||
envs: all
|
||||
|
||||
kramdown:
|
||||
syntax_highlighter_opts:
|
||||
block:
|
||||
line_numbers: true
|
28
herodb/Cargo.toml
Normal file
28
herodb/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "herodb"
|
||||
version = "0.0.1"
|
||||
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
bincode = "1.3.3"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
age = "0.10"
|
||||
secrecy = "0.8"
|
||||
ed25519-dalek = "2"
|
||||
base64 = "0.22"
|
||||
|
||||
[dev-dependencies]
|
||||
redis = { version = "0.24", features = ["aio", "tokio-comp"] }
|
0
herodb/README.md
Normal file
0
herodb/README.md
Normal file
9
herodb/build.sh
Executable file
9
herodb/build.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
export SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
echo "I am in $SCRIPT_DIR"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
cargo build
|
||||
|
188
herodb/docs/age.md
Normal file
188
herodb/docs/age.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# HeroDB AGE usage: Stateless vs Key‑Managed
|
||||
|
||||
This document explains how to use the AGE cryptography commands exposed by HeroDB over the Redis protocol in two modes:
|
||||
- Stateless (ephemeral keys; nothing stored on the server)
|
||||
- Key‑managed (server‑persisted, named keys)
|
||||
|
||||
If you are new to the codebase, the exact tests that exercise these behaviors are:
|
||||
- [rust.test_07_age_stateless_suite()](herodb/tests/usage_suite.rs:495)
|
||||
- [rust.test_08_age_persistent_named_suite()](herodb/tests/usage_suite.rs:555)
|
||||
|
||||
Implementation entry points:
|
||||
- [herodb/src/age.rs](herodb/src/age.rs)
|
||||
- Dispatch from [herodb/src/cmd.rs](herodb/src/cmd.rs)
|
||||
|
||||
Note: Database-at-rest encryption flags in the test harness are unrelated to AGE commands; those flags control storage-level encryption of DB files. See the harness near [rust.start_test_server()](herodb/tests/usage_suite.rs:10).
|
||||
|
||||
## Quick start
|
||||
|
||||
Assuming the server is running on localhost on some $PORT:
|
||||
```bash
|
||||
~/code/git.ourworld.tf/herocode/herodb/herodb/build.sh
|
||||
~/code/git.ourworld.tf/herocode/herodb/target/release/herodb --dir /tmp/data --debug --$PORT 6381 --encryption-key 1234 --encrypt
|
||||
```
|
||||
|
||||
|
||||
```bash
|
||||
export PORT=6381
|
||||
# Generate an ephemeral keypair and encrypt/decrypt a message (stateless mode)
|
||||
redis-cli -p $PORT AGE GENENC
|
||||
# → returns an array: [recipient, identity]
|
||||
|
||||
redis-cli -p $PORT AGE ENCRYPT <recipient> "hello world"
|
||||
# → returns ciphertext (base64 in a bulk string)
|
||||
|
||||
redis-cli -p $PORT AGE DECRYPT <identity> <ciphertext_b64>
|
||||
# → returns "hello world"
|
||||
```
|
||||
|
||||
For key‑managed mode, generate a named key once and reference it by name afterwards:
|
||||
|
||||
```bash
|
||||
redis-cli -p $PORT AGE KEYGEN app1
|
||||
# → persists encryption keypair under name "app1"
|
||||
|
||||
redis-cli -p $PORT AGE ENCRYPTNAME app1 "hello"
|
||||
redis-cli -p $PORT AGE DECRYPTNAME app1 <ciphertext_b64>
|
||||
```
|
||||
|
||||
## Stateless AGE (ephemeral)
|
||||
|
||||
Characteristics
|
||||
|
||||
- No server‑side storage of keys.
|
||||
- You pass the actual key material with every call.
|
||||
- Not listable via AGE LIST.
|
||||
|
||||
Commands and examples
|
||||
|
||||
1) Ephemeral encryption keys
|
||||
|
||||
```bash
|
||||
# Generate an ephemeral encryption keypair
|
||||
redis-cli -p $PORT AGE GENENC
|
||||
# Example output (abridged):
|
||||
# 1) "age1qz..." # recipient (public key) = can be used by others e.g. to verify what I sign
|
||||
# 2) "AGE-SECRET-KEY-1..." # identity (secret) = is like my private, cannot lose this one
|
||||
|
||||
# Encrypt with the recipient public key
|
||||
redis-cli -p $PORT AGE ENCRYPT "age1qz..." "hello world"
|
||||
|
||||
# → returns bulk string payload: base64 ciphertext (encrypted content)
|
||||
|
||||
# Decrypt with the identity (secret) in other words your private key
|
||||
redis-cli -p $PORT AGE DECRYPT "AGE-SECRET-KEY-1..." "<ciphertext_b64>"
|
||||
# → "hello world"
|
||||
```
|
||||
|
||||
2) Ephemeral signing keys
|
||||
|
||||
> ? is this same as my private key
|
||||
|
||||
```bash
|
||||
|
||||
# Generate an ephemeral signing keypair
|
||||
redis-cli -p $PORT AGE GENSIGN
|
||||
# Example output:
|
||||
# 1) "<verify_pub_b64>"
|
||||
# 2) "<sign_secret_b64>"
|
||||
|
||||
# Sign a message with the secret
|
||||
redis-cli -p $PORT AGE SIGN "<sign_secret_b64>" "msg"
|
||||
# → returns "<signature_b64>"
|
||||
|
||||
# Verify with the public key
|
||||
redis-cli -p $PORT AGE VERIFY "<verify_pub_b64>" "msg" "<signature_b64>"
|
||||
# → 1 (valid) or 0 (invalid)
|
||||
```
|
||||
|
||||
When to use
|
||||
- You do not want the server to store private keys.
|
||||
- You already manage key material on the client side.
|
||||
- You need ad‑hoc operations without persistence.
|
||||
|
||||
Reference test: [rust.test_07_age_stateless_suite()](herodb/tests/usage_suite.rs:495)
|
||||
|
||||
## Key‑managed AGE (persistent, named)
|
||||
|
||||
Characteristics
|
||||
- Server generates and persists keypairs under a chosen name.
|
||||
- Clients refer to keys by name; raw secrets are not supplied on each call.
|
||||
- Keys are discoverable via AGE LIST.
|
||||
|
||||
Commands and examples
|
||||
|
||||
1) Named encryption keys
|
||||
|
||||
```bash
|
||||
# Create/persist a named encryption keypair
|
||||
redis-cli -p $PORT AGE KEYGEN app1
|
||||
# → returns [recipient, identity] but also stores them under name "app1"
|
||||
|
||||
> TODO: should not return identity (security, but there can be separate function to export it e.g. AGE EXPORTKEY app1)
|
||||
|
||||
# Encrypt using the stored public key
|
||||
redis-cli -p $PORT AGE ENCRYPTNAME app1 "hello"
|
||||
# → returns bulk string payload: base64 ciphertext
|
||||
|
||||
# Decrypt using the stored secret
|
||||
redis-cli -p $PORT AGE DECRYPTNAME app1 "<ciphertext_b64>"
|
||||
# → "hello"
|
||||
```
|
||||
|
||||
2) Named signing keys
|
||||
|
||||
```bash
|
||||
# Create/persist a named signing keypair
|
||||
redis-cli -p $PORT AGE SIGNKEYGEN app1
|
||||
# → returns [verify_pub_b64, sign_secret_b64] and stores under name "app1"
|
||||
|
||||
> TODO: should not return sign_secret_b64 (for security, but there can be separate function to export it e.g. AGE EXPORTSIGNKEY app1)
|
||||
|
||||
# Sign using the stored secret
|
||||
redis-cli -p $PORT AGE SIGNNAME app1 "msg"
|
||||
# → returns "<signature_b64>"
|
||||
|
||||
# Verify using the stored public key
|
||||
redis-cli -p $PORT AGE VERIFYNAME app1 "msg" "<signature_b64>"
|
||||
# → 1 (valid) or 0 (invalid)
|
||||
```
|
||||
|
||||
3) List stored AGE keys
|
||||
|
||||
```bash
|
||||
redis-cli -p $PORT AGE LIST
|
||||
# Example output includes labels such as "encpub" and your key names (e.g., "app1")
|
||||
```
|
||||
|
||||
When to use
|
||||
- You want centralized key storage/rotation and fewer secrets on the client.
|
||||
- You need names/labels for workflows and can trust the server with secrets.
|
||||
- You want discoverability (AGE LIST) and simpler client commands.
|
||||
|
||||
Reference test: [rust.test_08_age_persistent_named_suite()](herodb/tests/usage_suite.rs:555)
|
||||
|
||||
## Choosing a mode
|
||||
|
||||
- Prefer Stateless when:
|
||||
- Minimizing server trust for secret material is the priority.
|
||||
- Clients already have a secure mechanism to store/distribute keys.
|
||||
- Prefer Key‑managed when:
|
||||
- Centralized lifecycle, naming, and discoverability are beneficial.
|
||||
- You plan to integrate rotation, ACLs, or auditability on the server side.
|
||||
|
||||
## Security notes
|
||||
|
||||
- Treat identities and signing secrets as sensitive; avoid logging them.
|
||||
- For key‑managed mode, ensure server storage (and backups) are protected.
|
||||
- AGE operations here are application‑level crypto and are distinct from database-at-rest encryption configured in the test harness.
|
||||
|
||||
## Repository pointers
|
||||
|
||||
- Stateless examples in tests: [rust.test_07_age_stateless_suite()](herodb/tests/usage_suite.rs:495)
|
||||
- Key‑managed examples in tests: [rust.test_08_age_persistent_named_suite()](herodb/tests/usage_suite.rs:555)
|
||||
- AGE implementation: [herodb/src/age.rs](herodb/src/age.rs)
|
||||
- Command dispatch: [herodb/src/cmd.rs](herodb/src/cmd.rs)
|
||||
- Bash demo: [herodb/examples/age_bash_demo.sh](herodb/examples/age_bash_demo.sh)
|
||||
- Rust persistent demo: [herodb/examples/age_persist_demo.rs](herodb/examples/age_persist_demo.rs)
|
||||
- Additional notes: [herodb/instructions/encrypt.md](herodb/instructions/encrypt.md)
|
623
herodb/docs/basics.md
Normal file
623
herodb/docs/basics.md
Normal file
@@ -0,0 +1,623 @@
|
||||
Here's an expanded version of the cmds.md documentation to include the list commands:
|
||||
# HeroDB Commands
|
||||
|
||||
HeroDB implements a subset of Redis commands over the Redis protocol. This document describes the available commands and their usage.
|
||||
|
||||
## String Commands
|
||||
|
||||
### PING
|
||||
Ping the server to test connectivity.
|
||||
```bash
|
||||
redis-cli -p $PORT PING
|
||||
# → PONG
|
||||
```
|
||||
|
||||
### ECHO
|
||||
Echo the given message.
|
||||
```bash
|
||||
redis-cli -p $PORT ECHO "hello"
|
||||
# → hello
|
||||
```
|
||||
|
||||
### SET
|
||||
Set a key to hold a string value.
|
||||
```bash
|
||||
redis-cli -p $PORT SET key value
|
||||
# → OK
|
||||
```
|
||||
|
||||
Options:
|
||||
- EX seconds: Set expiration in seconds
|
||||
- PX milliseconds: Set expiration in milliseconds
|
||||
- NX: Only set if key doesn't exist
|
||||
- XX: Only set if key exists
|
||||
- GET: Return old value
|
||||
|
||||
Examples:
|
||||
```bash
|
||||
redis-cli -p $PORT SET key value EX 60
|
||||
redis-cli -p $PORT SET key value PX 1000
|
||||
redis-cli -p $PORT SET key value NX
|
||||
redis-cli -p $PORT SET key value XX
|
||||
redis-cli -p $PORT SET key value GET
|
||||
```
|
||||
|
||||
### GET
|
||||
Get the value of a key.
|
||||
```bash
|
||||
redis-cli -p $PORT GET key
|
||||
# → value
|
||||
```
|
||||
|
||||
### MGET
|
||||
Get values of multiple keys.
|
||||
```bash
|
||||
redis-cli -p $PORT MGET key1 key2 key3
|
||||
# → 1) "value1"
|
||||
# 2) "value2"
|
||||
# 3) (nil)
|
||||
```
|
||||
|
||||
### MSET
|
||||
Set multiple key-value pairs.
|
||||
```bash
|
||||
redis-cli -p $PORT MSET key1 value1 key2 value2
|
||||
# → OK
|
||||
```
|
||||
|
||||
### INCR
|
||||
Increment the integer value of a key by 1.
|
||||
```bash
|
||||
redis-cli -p $PORT SET counter 10
|
||||
redis-cli -p $PORT INCR counter
|
||||
# → 11
|
||||
```
|
||||
|
||||
### DEL
|
||||
Delete a key.
|
||||
```bash
|
||||
redis-cli -p $PORT DEL key
|
||||
# → 1
|
||||
```
|
||||
|
||||
For multiple keys:
|
||||
```bash
|
||||
redis-cli -p $PORT DEL key1 key2 key3
|
||||
# → number of keys deleted
|
||||
```
|
||||
|
||||
### TYPE
|
||||
Determine the type of a key.
|
||||
```bash
|
||||
redis-cli -p $PORT TYPE key
|
||||
# → string
|
||||
```
|
||||
|
||||
### EXISTS
|
||||
Check if a key exists.
|
||||
```bash
|
||||
redis-cli -p $PORT EXISTS key
|
||||
# → 1 (exists) or 0 (doesn't exist)
|
||||
```
|
||||
|
||||
For multiple keys:
|
||||
```bash
|
||||
redis-cli -p $PORT EXISTS key1 key2 key3
|
||||
# → count of existing keys
|
||||
```
|
||||
|
||||
### EXPIRE / PEXPIRE
|
||||
Set expiration time for a key.
|
||||
```bash
|
||||
redis-cli -p $PORT EXPIRE key 60
|
||||
# → 1 (timeout set) or 0 (timeout not set)
|
||||
|
||||
redis-cli -p $PORT PEXPIRE key 1000
|
||||
# → 1 (timeout set) or 0 (timeout not set)
|
||||
```
|
||||
|
||||
### EXPIREAT / PEXPIREAT
|
||||
Set expiration timestamp for a key.
|
||||
```bash
|
||||
redis-cli -p $PORT EXPIREAT key 1672531200
|
||||
# → 1 (timeout set) or 0 (timeout not set)
|
||||
|
||||
redis-cli -p $PORT PEXPIREAT key 1672531200000
|
||||
# → 1 (timeout set) or 0 (timeout not set)
|
||||
```
|
||||
|
||||
### TTL
|
||||
Get the time to live for a key.
|
||||
```bash
|
||||
redis-cli -p $PORT TTL key
|
||||
# → remaining time in seconds
|
||||
```
|
||||
|
||||
### PERSIST
|
||||
Remove expiration from a key.
|
||||
```bash
|
||||
redis-cli -p $PORT PERSIST key
|
||||
# → 1 (timeout removed) or 0 (key has no timeout)
|
||||
```
|
||||
|
||||
## Hash Commands
|
||||
|
||||
### HSET
|
||||
Set field-value pairs in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HSET hashkey field1 value1 field2 value2
|
||||
# → number of fields added
|
||||
```
|
||||
|
||||
### HGET
|
||||
Get value of a field in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HGET hashkey field1
|
||||
# → value1
|
||||
```
|
||||
|
||||
### HGETALL
|
||||
Get all field-value pairs in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HGETALL hashkey
|
||||
# → 1) "field1"
|
||||
# 2) "value1"
|
||||
# 3) "field2"
|
||||
# 4) "value2"
|
||||
```
|
||||
|
||||
### HDEL
|
||||
Delete fields from a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HDEL hashkey field1 field2
|
||||
# → number of fields deleted
|
||||
```
|
||||
|
||||
### HEXISTS
|
||||
Check if a field exists in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HEXISTS hashkey field1
|
||||
# → 1 (exists) or 0 (doesn't exist)
|
||||
```
|
||||
|
||||
### HKEYS
|
||||
Get all field names in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HKEYS hashkey
|
||||
# → 1) "field1"
|
||||
# 2) "field2"
|
||||
```
|
||||
|
||||
### HVALS
|
||||
Get all values in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HVALS hashkey
|
||||
# → 1) "value1"
|
||||
# 2) "value2"
|
||||
```
|
||||
|
||||
### HLEN
|
||||
Get number of fields in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HLEN hashkey
|
||||
# → number of fields
|
||||
```
|
||||
|
||||
### HMGET
|
||||
Get values of multiple fields in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HMGET hashkey field1 field2 field3
|
||||
# → 1) "value1"
|
||||
# 2) "value2"
|
||||
# 3) (nil)
|
||||
```
|
||||
|
||||
### HSETNX
|
||||
Set field-value pair in hash only if field doesn't exist.
|
||||
```bash
|
||||
redis-cli -p $PORT HSETNX hashkey field1 value1
|
||||
# → 1 (field set) or 0 (field not set)
|
||||
```
|
||||
|
||||
### HINCRBY
|
||||
Increment integer value of a field in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HINCRBY hashkey field1 5
|
||||
# → new value
|
||||
```
|
||||
|
||||
### HINCRBYFLOAT
|
||||
Increment float value of a field in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HINCRBYFLOAT hashkey field1 3.14
|
||||
# → new value
|
||||
```
|
||||
|
||||
### HSCAN
|
||||
Incrementally iterate over fields in a hash.
|
||||
```bash
|
||||
redis-cli -p $PORT HSCAN hashkey 0
|
||||
# → 1) "next_cursor"
|
||||
# 2) 1) "field1"
|
||||
# 2) "value1"
|
||||
# 3) "field2"
|
||||
# 4) "value2"
|
||||
```
|
||||
|
||||
Options:
|
||||
- MATCH pattern: Filter fields by pattern
|
||||
- COUNT number: Suggest number of fields to return
|
||||
|
||||
Examples:
|
||||
```bash
|
||||
redis-cli -p $PORT HSCAN hashkey 0 MATCH f*
|
||||
redis-cli -p $PORT HSCAN hashkey 0 COUNT 10
|
||||
redis-cli -p $PORT HSCAN hashkey 0 MATCH f* COUNT 10
|
||||
```
|
||||
|
||||
## List Commands
|
||||
|
||||
### LPUSH
|
||||
Insert elements at the head of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LPUSH listkey element1 element2 element3
|
||||
# → number of elements in the list
|
||||
```
|
||||
|
||||
### RPUSH
|
||||
Insert elements at the tail of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT RPUSH listkey element1 element2 element3
|
||||
# → number of elements in the list
|
||||
```
|
||||
|
||||
### LPOP
|
||||
Remove and return elements from the head of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LPOP listkey
|
||||
# → element1
|
||||
```
|
||||
|
||||
With count:
|
||||
```bash
|
||||
redis-cli -p $PORT LPOP listkey 2
|
||||
# → 1) "element1"
|
||||
# 2) "element2"
|
||||
```
|
||||
|
||||
### RPOP
|
||||
Remove and return elements from the tail of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT RPOP listkey
|
||||
# → element3
|
||||
```
|
||||
|
||||
With count:
|
||||
```bash
|
||||
redis-cli -p $PORT RPOP listkey 2
|
||||
# → 1) "element3"
|
||||
# 2) "element2"
|
||||
```
|
||||
|
||||
### LLEN
|
||||
Get the length of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LLEN listkey
|
||||
# → number of elements in the list
|
||||
```
|
||||
|
||||
### LINDEX
|
||||
Get element at index in a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LINDEX listkey 0
|
||||
# → first element
|
||||
```
|
||||
|
||||
Negative indices count from the end:
|
||||
```bash
|
||||
redis-cli -p $PORT LINDEX listkey -1
|
||||
# → last element
|
||||
```
|
||||
|
||||
### LRANGE
|
||||
Get a range of elements from a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LRANGE listkey 0 -1
|
||||
# → 1) "element1"
|
||||
# 2) "element2"
|
||||
# 3) "element3"
|
||||
```
|
||||
|
||||
### LTRIM
|
||||
Trim a list to specified range.
|
||||
```bash
|
||||
redis-cli -p $PORT LTRIM listkey 0 1
|
||||
# → OK (list now contains only first 2 elements)
|
||||
```
|
||||
|
||||
### LREM
|
||||
Remove elements from a list.
|
||||
```bash
|
||||
redis-cli -p $PORT LREM listkey 2 element1
|
||||
# → number of elements removed
|
||||
```
|
||||
|
||||
Count values:
|
||||
- Positive: Remove from head
|
||||
- Negative: Remove from tail
|
||||
- Zero: Remove all
|
||||
|
||||
### LINSERT
|
||||
Insert element before or after pivot element.
|
||||
```bash
|
||||
redis-cli -p $PORT LINSERT listkey BEFORE pivot newelement
|
||||
# → number of elements in the list
|
||||
```
|
||||
|
||||
### BLPOP
|
||||
Blocking remove and return elements from the head of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT BLPOP listkey1 listkey2 5
|
||||
# → 1) "listkey1"
|
||||
# 2) "element1"
|
||||
```
|
||||
|
||||
If no elements are available, blocks for specified timeout (in seconds) until an element is pushed to one of the lists.
|
||||
|
||||
### BRPOP
|
||||
Blocking remove and return elements from the tail of a list.
|
||||
```bash
|
||||
redis-cli -p $PORT BRPOP listkey1 listkey2 5
|
||||
# → 1) "listkey1"
|
||||
# 2) "element1"
|
||||
```
|
||||
|
||||
If no elements are available, blocks for specified timeout (in seconds) until an element is pushed to one of the lists.
|
||||
|
||||
## Keyspace Commands
|
||||
|
||||
### KEYS
|
||||
Get all keys matching pattern.
|
||||
```bash
|
||||
redis-cli -p $PORT KEYS *
|
||||
# → 1) "key1"
|
||||
# 2) "key2"
|
||||
```
|
||||
|
||||
### SCAN
|
||||
Incrementally iterate over keys.
|
||||
```bash
|
||||
redis-cli -p $PORT SCAN 0
|
||||
# → 1) "next_cursor"
|
||||
# 2) 1) "key1"
|
||||
# 2) "key2"
|
||||
```
|
||||
|
||||
Options:
|
||||
- MATCH pattern: Filter keys by pattern
|
||||
- COUNT number: Suggest number of keys to return
|
||||
|
||||
Examples:
|
||||
```bash
|
||||
redis-cli -p $PORT SCAN 0 MATCH k*
|
||||
redis-cli -p $PORT SCAN 0 COUNT 10
|
||||
redis-cli -p $PORT SCAN 0 MATCH k* COUNT 10
|
||||
```
|
||||
|
||||
### DBSIZE
|
||||
Get number of keys in current database.
|
||||
```bash
|
||||
redis-cli -p $PORT DBSIZE
|
||||
# → number of keys
|
||||
```
|
||||
|
||||
### FLUSHDB
|
||||
Remove all keys from current database.
|
||||
```bash
|
||||
redis-cli -p $PORT FLUSHDB
|
||||
# → OK
|
||||
```
|
||||
|
||||
## Configuration Commands
|
||||
|
||||
### CONFIG GET
|
||||
Get configuration parameter.
|
||||
```bash
|
||||
redis-cli -p $PORT CONFIG GET dir
|
||||
# → 1) "dir"
|
||||
# 2) "/path/to/db"
|
||||
|
||||
redis-cli -p $PORT CONFIG GET dbfilename
|
||||
# → 1) "dbfilename"
|
||||
# 2) "0.db"
|
||||
```
|
||||
|
||||
## Client Commands
|
||||
|
||||
### CLIENT SETNAME
|
||||
Set current connection name.
|
||||
```bash
|
||||
redis-cli -p $PORT CLIENT SETNAME myconnection
|
||||
# → OK
|
||||
```
|
||||
|
||||
### CLIENT GETNAME
|
||||
Get current connection name.
|
||||
```bash
|
||||
redis-cli -p $PORT CLIENT GETNAME
|
||||
# → myconnection
|
||||
```
|
||||
|
||||
## Transaction Commands
|
||||
|
||||
### MULTI
|
||||
Start a transaction block.
|
||||
```bash
|
||||
redis-cli -p $PORT MULTI
|
||||
# → OK
|
||||
```
|
||||
|
||||
### EXEC
|
||||
Execute all commands in transaction block.
|
||||
```bash
|
||||
redis-cli -p $PORT MULTI
|
||||
redis-cli -p $PORT SET key1 value1
|
||||
redis-cli -p $PORT SET key2 value2
|
||||
redis-cli -p $PORT EXEC
|
||||
# → 1) OK
|
||||
# 2) OK
|
||||
```
|
||||
|
||||
### DISCARD
|
||||
Discard all commands in transaction block.
|
||||
```bash
|
||||
redis-cli -p $PORT MULTI
|
||||
redis-cli -p $PORT SET key1 value1
|
||||
redis-cli -p $PORT DISCARD
|
||||
# → OK
|
||||
```
|
||||
|
||||
## AGE Commands
|
||||
|
||||
### AGE GENENC
|
||||
Generate ephemeral encryption keypair.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE GENENC
|
||||
# → 1) "recipient_public_key"
|
||||
# 2) "identity_secret_key"
|
||||
```
|
||||
|
||||
### AGE ENCRYPT
|
||||
Encrypt message with recipient public key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE ENCRYPT recipient_public_key "message"
|
||||
# → base64_encoded_ciphertext
|
||||
```
|
||||
|
||||
### AGE DECRYPT
|
||||
Decrypt ciphertext with identity secret key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE DECRYPT identity_secret_key base64_encoded_ciphertext
|
||||
# → decrypted_message
|
||||
```
|
||||
|
||||
### AGE GENSIGN
|
||||
Generate ephemeral signing keypair.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE GENSIGN
|
||||
# → 1) "verify_public_key"
|
||||
# 2) "sign_secret_key"
|
||||
```
|
||||
|
||||
### AGE SIGN
|
||||
Sign message with signing secret key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE SIGN sign_secret_key "message"
|
||||
# → base64_encoded_signature
|
||||
```
|
||||
|
||||
### AGE VERIFY
|
||||
Verify signature with verify public key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE VERIFY verify_public_key "message" base64_encoded_signature
|
||||
# → 1 (valid) or 0 (invalid)
|
||||
```
|
||||
|
||||
### AGE KEYGEN
|
||||
Generate and persist named encryption keypair.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE KEYGEN keyname
|
||||
# → 1) "recipient_public_key"
|
||||
# 2) "identity_secret_key"
|
||||
```
|
||||
|
||||
### AGE SIGNKEYGEN
|
||||
Generate and persist named signing keypair.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE SIGNKEYGEN keyname
|
||||
# → 1) "verify_public_key"
|
||||
# 2) "sign_secret_key"
|
||||
```
|
||||
|
||||
### AGE ENCRYPTNAME
|
||||
Encrypt message with named key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE ENCRYPTNAME keyname "message"
|
||||
# → base64_encoded_ciphertext
|
||||
```
|
||||
|
||||
### AGE DECRYPTNAME
|
||||
Decrypt ciphertext with named key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE DECRYPTNAME keyname base64_encoded_ciphertext
|
||||
# → decrypted_message
|
||||
```
|
||||
|
||||
### AGE SIGNNAME
|
||||
Sign message with named signing key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE SIGNNAME keyname "message"
|
||||
# → base64_encoded_signature
|
||||
```
|
||||
|
||||
### AGE VERIFYNAME
|
||||
Verify signature with named verify key.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE VERIFYNAME keyname "message" base64_encoded_signature
|
||||
# → 1 (valid) or 0 (invalid)
|
||||
```
|
||||
|
||||
### AGE LIST
|
||||
List all stored AGE keys.
|
||||
```bash
|
||||
redis-cli -p $PORT AGE LIST
|
||||
# → 1) "keyname1"
|
||||
# 2) "keyname2"
|
||||
```
|
||||
|
||||
## Server Information Commands
|
||||
|
||||
### INFO
|
||||
Get server information.
|
||||
```bash
|
||||
redis-cli -p $PORT INFO
|
||||
# → Server information
|
||||
```
|
||||
|
||||
With section:
|
||||
```bash
|
||||
redis-cli -p $PORT INFO replication
|
||||
# → Replication information
|
||||
```
|
||||
|
||||
### COMMAND
|
||||
Get command information (stub implementation).
|
||||
```bash
|
||||
redis-cli -p $PORT COMMAND
|
||||
# → Empty array (stub)
|
||||
```
|
||||
|
||||
## Database Selection
|
||||
|
||||
### SELECT
|
||||
Select database by index.
|
||||
```bash
|
||||
redis-cli -p $PORT SELECT 0
|
||||
# → OK
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
This expanded documentation includes all the list commands that were implemented in the cmd.rs file:
|
||||
1. LPUSH - push elements to the left (head) of a list
|
||||
2. RPUSH - push elements to the right (tail) of a list
|
||||
3. LPOP - pop elements from the left (head) of a list
|
||||
4. RPOP - pop elements from the right (tail) of a list
|
||||
5. BLPOP - blocking pop from the left with timeout
|
||||
6. BRPOP - blocking pop from the right with timeout
|
||||
7. LLEN - get list length
|
||||
8. LREM - remove elements from list
|
||||
9. LTRIM - trim list to range
|
||||
10. LINDEX - get element by index
|
||||
11. LRANGE - get range of elements
|
||||
|
227
herodb/docs/cmds.md
Normal file
227
herodb/docs/cmds.md
Normal file
@@ -0,0 +1,227 @@
|
||||
|
||||
# HeroDB Redis Protocol Support: Commands & Client Usage
|
||||
|
||||
HeroDB is a Redis-compatible database built using the `redb` database backend.
|
||||
|
||||
It supports a subset of Redis commands over the standard RESP (Redis Serialization Protocol) via TCP, allowing you to interact with it using standard Redis clients like `redis-cli`, Python's `redis-py`, Node.js's `ioredis`, etc.
|
||||
|
||||
This document provides:
|
||||
- A list of all currently supported Redis commands.
|
||||
- Example usage with standard Redis clients.
|
||||
- Bash and Rust test-inspired usage examples.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Assuming the server is running on localhost at port `$PORT`:
|
||||
|
||||
```bash
|
||||
# Build HeroDB
|
||||
cargo build --release
|
||||
|
||||
# Start HeroDB server
|
||||
./target/release/herodb --dir /tmp/herodb_data --port 6381 --debug
|
||||
```
|
||||
|
||||
## Using Standard Redis Clients
|
||||
|
||||
### With `redis-cli`
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET mykey "hello"
|
||||
redis-cli -p 6381 GET mykey
|
||||
```
|
||||
|
||||
### With Python (`redis-py`)
|
||||
|
||||
```python
|
||||
import redis
|
||||
|
||||
r = redis.Redis(host='localhost', port=6381, db=0)
|
||||
r.set('mykey', 'hello')
|
||||
print(r.get('mykey').decode())
|
||||
```
|
||||
|
||||
### With Node.js (`ioredis`)
|
||||
|
||||
```js
|
||||
const Redis = require("ioredis");
|
||||
const redis = new Redis({ port: 6381, host: "localhost" });
|
||||
|
||||
await redis.set("mykey", "hello");
|
||||
const value = await redis.get("mykey");
|
||||
console.log(value); // "hello"
|
||||
```
|
||||
|
||||
## Supported Redis Commands
|
||||
|
||||
### String Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `SET` | Set a key to a string value | `SET name "Alice"` |
|
||||
| `GET` | Get the value of a key | `GET name` |
|
||||
| `DEL` | Delete one or more keys | `DEL name age` |
|
||||
| `INCR` | Increment the integer value of a key | `INCR counter` |
|
||||
| `DECR` | Decrement the integer value of a key | `DECR counter` |
|
||||
| `INCRBY` | Increment key by a given integer | `INCRBY counter 5` |
|
||||
| `DECRBY` | Decrement key by a given integer | `DECRBY counter 3` |
|
||||
| `EXISTS` | Check if a key exists | `EXISTS name` |
|
||||
| `TYPE` | Return the type of a key | `TYPE name` |
|
||||
|
||||
### Hash Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `HSET` | Set field in hash stored at key | `HSET user:1 name "Alice"` |
|
||||
| `HGET` | Get value of a field in hash | `HGET user:1 name` |
|
||||
| `HGETALL` | Get all fields and values in a hash | `HGETALL user:1` |
|
||||
| `HDEL` | Delete one or more fields from hash | `HDEL user:1 name age` |
|
||||
| `HEXISTS` | Check if field exists in hash | `HEXISTS user:1 name` |
|
||||
| `HKEYS` | Get all field names in a hash | `HKEYS user:1` |
|
||||
| `HVALS` | Get all values in a hash | `HVALS user:1` |
|
||||
| `HLEN` | Get number of fields in a hash | `HLEN user:1` |
|
||||
| `HMGET` | Get values of multiple fields | `HMGET user:1 name age` |
|
||||
| `HSETNX` | Set field only if it does not exist | `HSETNX user:1 email alice@example.com` |
|
||||
|
||||
### List Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `LPUSH` | Insert elements at the head of a list | `LPUSH mylist "item1" "item2"` |
|
||||
| `RPUSH` | Insert elements at the tail of a list | `RPUSH mylist "item3" "item4"` |
|
||||
| `LPOP` | Remove and return element from head | `LPOP mylist` |
|
||||
| `RPOP` | Remove and return element from tail | `RPOP mylist` |
|
||||
| `BLPOP` | Blocking remove from head with timeout | `BLPOP mylist1 mylist2 5` |
|
||||
| `BRPOP` | Blocking remove from tail with timeout | `BRPOP mylist1 mylist2 5` |
|
||||
| `LLEN` | Get the length of a list | `LLEN mylist` |
|
||||
| `LREM` | Remove elements from list | `LREM mylist 2 "item"` |
|
||||
| `LTRIM` | Trim list to specified range | `LTRIM mylist 0 5` |
|
||||
| `LINDEX` | Get element by index | `LINDEX mylist 0` |
|
||||
| `LRANGE` | Get range of elements | `LRANGE mylist 0 -1` |
|
||||
|
||||
### Keys & Scanning
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `KEYS` | Find all keys matching a pattern | `KEYS user:*` |
|
||||
| `SCAN` | Incrementally iterate keys | `SCAN 0 MATCH user:* COUNT 10` |
|
||||
|
||||
### Expiration
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `EXPIRE` | Set a key's time to live in seconds | `EXPIRE tempkey 60` |
|
||||
| `TTL` | Get the time to live for a key | `TTL tempkey` |
|
||||
| `PERSIST` | Remove the expiration from a key | `PERSIST tempkey` |
|
||||
|
||||
### Transactions
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `MULTI` | Start a transaction block | `MULTI` |
|
||||
| `EXEC` | Execute all commands in a transaction | `EXEC` |
|
||||
| `DISCARD` | Discard all commands in a transaction | `DISCARD` |
|
||||
|
||||
### Configuration
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `CONFIG GET` | Get configuration parameters | `CONFIG GET dir` |
|
||||
| `CONFIG SET` | Set configuration parameters | `CONFIG SET maxmemory 100mb` |
|
||||
|
||||
### Info & Monitoring
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `INFO` | Get information and statistics about server | `INFO` |
|
||||
| `PING` | Ping the server | `PING` |
|
||||
|
||||
### AGE Cryptography Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|--------------------|-----------------------------------------------|-----------------------------------------------|
|
||||
| `AGE GENENC` | Generate ephemeral encryption keypair | `AGE GENENC` |
|
||||
| `AGE GENSIGN` | Generate ephemeral signing keypair | `AGE GENSIGN` |
|
||||
| `AGE ENCRYPT` | Encrypt a message using a public key | `AGE ENCRYPT <recipient> "msg"` |
|
||||
| `AGE DECRYPT` | Decrypt a message using a secret key | `AGE DECRYPT <identity> <ciphertext>` |
|
||||
| `AGE SIGN` | Sign a message using a secret key | `AGE SIGN <sign_secret> "msg"` |
|
||||
| `AGE VERIFY` | Verify a signature using a public key | `AGE VERIFY <pubkey> "msg" <signature>` |
|
||||
| `AGE KEYGEN` | Create and persist a named encryption key | `AGE KEYGEN app1` |
|
||||
| `AGE SIGNKEYGEN` | Create and persist a named signing key | `AGE SIGNKEYGEN app1` |
|
||||
| `AGE ENCRYPTNAME` | Encrypt using a named key | `AGE ENCRYPTNAME app1 "msg"` |
|
||||
| `AGE DECRYPTNAME` | Decrypt using a named key | `AGE DECRYPTNAME app1 <ciphertext>` |
|
||||
| `AGE SIGNNAME` | Sign using a named key | `AGE SIGNNAME app1 "msg"` |
|
||||
| `AGE VERIFYNAME` | Verify using a named key | `AGE VERIFYNAME app1 "msg" <signature>` |
|
||||
| `AGE LIST` | List all persisted named keys | `AGE LIST` |
|
||||
|
||||
> Note: AGE commands are not part of standard Redis. They are HeroDB-specific extensions for cryptographic operations.
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Basic String Operations
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET greeting "Hello, HeroDB!"
|
||||
redis-cli -p 6381 GET greeting
|
||||
# → "Hello, HeroDB!"
|
||||
|
||||
redis-cli -p 6381 INCR visits
|
||||
redis-cli -p 6381 INCR visits
|
||||
redis-cli -p 6381 GET visits
|
||||
# → "2"
|
||||
```
|
||||
|
||||
### Hash Operations
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 HSET user:1000 name "Alice" age "30" city "NYC"
|
||||
redis-cli -p 6381 HGET user:1000 name
|
||||
# → "Alice"
|
||||
|
||||
redis-cli -p 6381 HGETALL user:1000
|
||||
# → 1) "name"
|
||||
# 2) "Alice"
|
||||
# 3) "age"
|
||||
# 4) "30"
|
||||
# 5) "city"
|
||||
# 6) "NYC"
|
||||
```
|
||||
|
||||
### Expiration
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET tempkey "temporary"
|
||||
redis-cli -p 6381 EXPIRE tempkey 5
|
||||
redis-cli -p 6381 TTL tempkey
|
||||
# → (integer) 4
|
||||
|
||||
# After 5 seconds:
|
||||
redis-cli -p 6381 GET tempkey
|
||||
# → (nil)
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 MULTI
|
||||
redis-cli -p 6381 SET txkey1 "value1"
|
||||
redis-cli -p 6381 SET txkey2 "value2"
|
||||
redis-cli -p 6381 INCR counter
|
||||
redis-cli -p 6381 EXEC
|
||||
# → 1) OK
|
||||
# 2) OK
|
||||
# 3) (integer) 3
|
||||
```
|
||||
|
||||
### Scanning Keys
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET scankey1 "val1"
|
||||
redis-cli -p 6381 SET scankey2 "val2"
|
||||
redis-cli -p 6381 HSET scanhash field1 "val1"
|
||||
|
||||
redis-cli -p 6381 SCAN 0 MATCH scankey*
|
||||
# → 1) "0"
|
||||
# 2) 1) "scankey1"
|
||||
# 2) "scankey2"
|
||||
```
|
71
herodb/examples/age_bash_demo.sh
Executable file
71
herodb/examples/age_bash_demo.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Start the herodb server in the background
|
||||
echo "Starting herodb server..."
|
||||
cargo run -p herodb -- --dir /tmp/herodb_age_test --port 6382 --debug --encryption-key "testkey" &
|
||||
SERVER_PID=$!
|
||||
sleep 2 # Give the server a moment to start
|
||||
|
||||
REDIS_CLI="redis-cli -p 6382"
|
||||
|
||||
echo "--- Generating and Storing Encryption Keys ---"
|
||||
# The new AGE commands are 'AGE KEYGEN <name>' etc., based on src/cmd.rs
|
||||
# This script uses older commands like 'AGE.GENERATE_KEYPAIR alice'
|
||||
# The demo script needs to be updated to match the implemented commands.
|
||||
# Let's assume the commands in the script are what's expected for now,
|
||||
# but note this discrepancy. The new commands are AGE KEYGEN etc.
|
||||
# The script here uses a different syntax not found in src/cmd.rs like 'AGE.GENERATE_KEYPAIR'.
|
||||
# For now, I will modify the script to fit the actual implementation.
|
||||
|
||||
echo "--- Generating and Storing Encryption Keys ---"
|
||||
$REDIS_CLI AGE KEYGEN alice
|
||||
$REDIS_CLI AGE KEYGEN bob
|
||||
|
||||
echo "--- Encrypting and Decrypting a Message ---"
|
||||
MESSAGE="Hello, AGE encryption!"
|
||||
# The new logic stores keys internally and does not expose a command to get the public key.
|
||||
# We will encrypt by name.
|
||||
ALICE_PUBKEY_REPLY=$($REDIS_CLI AGE KEYGEN alice | head -n 2 | tail -n 1)
|
||||
echo "Alice's Public Key: $ALICE_PUBKEY_REPLY"
|
||||
|
||||
echo "Encrypting message: '$MESSAGE' with Alice's identity..."
|
||||
# AGE.ENCRYPT recipient message. But since we use persistent keys, let's use ENCRYPTNAME
|
||||
CIPHERTEXT=$($REDIS_CLI AGE ENCRYPTNAME alice "$MESSAGE")
|
||||
echo "Ciphertext: $CIPHERTEXT"
|
||||
|
||||
echo "Decrypting ciphertext with Alice's private key..."
|
||||
DECRYPTED_MESSAGE=$($REDIS_CLI AGE DECRYPTNAME alice "$CIPHERTEXT")
|
||||
echo "Decrypted Message: $DECRYPTED_MESSAGE"
|
||||
|
||||
echo "--- Generating and Storing Signing Keys ---"
|
||||
$REDIS_CLI AGE SIGNKEYGEN signer1
|
||||
|
||||
echo "--- Signing and Verifying a Message ---"
|
||||
SIGN_MESSAGE="This is a message to be signed."
|
||||
# Similar to above, we don't have GET_SIGN_PUBKEY. We will verify by name.
|
||||
|
||||
echo "Signing message: '$SIGN_MESSAGE' with signer1's private key..."
|
||||
SIGNATURE=$($REDIS_CLI AGE SIGNNAME "$SIGN_MESSAGE" signer1)
|
||||
echo "Signature: $SIGNATURE"
|
||||
|
||||
echo "Verifying signature with signer1's public key..."
|
||||
VERIFY_RESULT=$($REDIS_CLI AGE VERIFYNAME signer1 "$SIGN_MESSAGE" "$SIGNATURE")
|
||||
echo "Verification Result: $VERIFY_RESULT"
|
||||
|
||||
|
||||
# There is no DELETE_KEYPAIR command in the implementation
|
||||
echo "--- Cleaning up keys (manual in herodb) ---"
|
||||
# We would use DEL for age:key:alice, etc.
|
||||
$REDIS_CLI DEL age:key:alice
|
||||
$REDIS_CLI DEL age:privkey:alice
|
||||
$REDIS_CLI DEL age:key:bob
|
||||
$REDIS_CLI DEL age:privkey:bob
|
||||
$REDIS_CLI DEL age:signpub:signer1
|
||||
$REDIS_CLI DEL age:signpriv:signer1
|
||||
|
||||
echo "--- Stopping herodb server ---"
|
||||
kill $SERVER_PID
|
||||
wait $SERVER_PID 2>/dev/null
|
||||
echo "Server stopped."
|
||||
|
||||
echo "Bash demo complete."
|
83
herodb/examples/age_persist_demo.rs
Normal file
83
herodb/examples/age_persist_demo.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use std::io::{Read, Write};
|
||||
use std::net::TcpStream;
|
||||
|
||||
// Minimal RESP helpers
|
||||
fn arr(parts: &[&str]) -> String {
|
||||
let mut out = format!("*{}\r\n", parts.len());
|
||||
for p in parts {
|
||||
out.push_str(&format!("${}\r\n{}\r\n", p.len(), p));
|
||||
}
|
||||
out
|
||||
}
|
||||
fn read_reply(s: &mut TcpStream) -> String {
|
||||
let mut buf = [0u8; 65536];
|
||||
let n = s.read(&mut buf).unwrap();
|
||||
String::from_utf8_lossy(&buf[..n]).to_string()
|
||||
}
|
||||
fn parse_two_bulk(reply: &str) -> Option<(String,String)> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
if lines.next()? != "*2" { return None; }
|
||||
let _n = lines.next()?;
|
||||
let a = lines.next()?.to_string();
|
||||
let _m = lines.next()?;
|
||||
let b = lines.next()?.to_string();
|
||||
Some((a,b))
|
||||
}
|
||||
fn parse_bulk(reply: &str) -> Option<String> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
let hdr = lines.next()?;
|
||||
if !hdr.starts_with('$') { return None; }
|
||||
Some(lines.next()?.to_string())
|
||||
}
|
||||
fn parse_simple(reply: &str) -> Option<String> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
let hdr = lines.next()?;
|
||||
if !hdr.starts_with('+') { return None; }
|
||||
Some(hdr[1..].to_string())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut args = std::env::args().skip(1);
|
||||
let host = args.next().unwrap_or_else(|| "127.0.0.1".into());
|
||||
let port = args.next().unwrap_or_else(|| "6379".into());
|
||||
let addr = format!("{host}:{port}");
|
||||
println!("Connecting to {addr}...");
|
||||
let mut s = TcpStream::connect(addr).expect("connect");
|
||||
|
||||
// Generate & persist X25519 enc keys under name "alice"
|
||||
s.write_all(arr(&["age","keygen","alice"]).as_bytes()).unwrap();
|
||||
let (_alice_recip, _alice_ident) = parse_two_bulk(&read_reply(&mut s)).expect("gen enc");
|
||||
|
||||
// Generate & persist Ed25519 signing key under name "signer"
|
||||
s.write_all(arr(&["age","signkeygen","signer"]).as_bytes()).unwrap();
|
||||
let (_verify, _secret) = parse_two_bulk(&read_reply(&mut s)).expect("gen sign");
|
||||
|
||||
// Encrypt by name
|
||||
let msg = "hello from persistent keys";
|
||||
s.write_all(arr(&["age","encryptname","alice", msg]).as_bytes()).unwrap();
|
||||
let ct_b64 = parse_bulk(&read_reply(&mut s)).expect("ct b64");
|
||||
println!("ciphertext b64: {}", ct_b64);
|
||||
|
||||
// Decrypt by name
|
||||
s.write_all(arr(&["age","decryptname","alice", &ct_b64]).as_bytes()).unwrap();
|
||||
let pt = parse_bulk(&read_reply(&mut s)).expect("pt");
|
||||
assert_eq!(pt, msg);
|
||||
println!("decrypted ok");
|
||||
|
||||
// Sign by name
|
||||
s.write_all(arr(&["age","signname","signer", msg]).as_bytes()).unwrap();
|
||||
let sig_b64 = parse_bulk(&read_reply(&mut s)).expect("sig b64");
|
||||
|
||||
// Verify by name
|
||||
s.write_all(arr(&["age","verifyname","signer", msg, &sig_b64]).as_bytes()).unwrap();
|
||||
let ok = parse_simple(&read_reply(&mut s)).expect("verify");
|
||||
assert_eq!(ok, "1");
|
||||
println!("signature verified");
|
||||
|
||||
// List names
|
||||
s.write_all(arr(&["age","list"]).as_bytes()).unwrap();
|
||||
let list = read_reply(&mut s);
|
||||
println!("LIST -> {list}");
|
||||
|
||||
println!("✔ persistent AGE workflow complete.");
|
||||
}
|
24
herodb/run_tests.sh
Executable file
24
herodb/run_tests.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "🧪 Running HeroDB Redis Compatibility Tests"
|
||||
echo "=========================================="
|
||||
|
||||
echo ""
|
||||
echo "1️⃣ Running Simple Redis Tests (4 tests)..."
|
||||
echo "----------------------------------------------"
|
||||
cargo test -p herodb --test simple_redis_test -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "2️⃣ Running Comprehensive Redis Integration Tests (13 tests)..."
|
||||
echo "----------------------------------------------------------------"
|
||||
cargo test -p herodb --test redis_integration_tests -- --nocapture
|
||||
cargo test -p herodb --test debug_hset -- --nocapture
|
||||
cargo test -p herodb --test debug_hset_simple -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "3️⃣ Running All Workspace Tests..."
|
||||
echo "--------------------------------"
|
||||
cargo test --workspace -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "✅ Test execution completed!"
|
99
herodb/specs/backgroundinfo/encrypt.md
Normal file
99
herodb/specs/backgroundinfo/encrypt.md
Normal file
@@ -0,0 +1,99 @@
|
||||
|
||||
### Cargo.toml
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
chacha20poly1305 = { version = "0.10", features = ["xchacha20"] }
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
```
|
||||
|
||||
### `crypto_factory.rs`
|
||||
|
||||
```rust
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, OsRng},
|
||||
XChaCha20Poly1305, Key, XNonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
const VERSION: u8 = 1;
|
||||
const NONCE_LEN: usize = 24;
|
||||
const TAG_LEN: usize = 16;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum CryptoError {
|
||||
Format, // wrong length / header
|
||||
Version(u8), // unknown version
|
||||
Decrypt, // wrong key or corrupted data
|
||||
}
|
||||
|
||||
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)
|
||||
pub struct CryptoFactory {
|
||||
key: Key<XChaCha20Poly1305>,
|
||||
}
|
||||
|
||||
impl CryptoFactory {
|
||||
/// Accepts any secret bytes; turns them into a 32-byte key (SHA-256).
|
||||
/// (If your secret is already 32 bytes, this is still fine.)
|
||||
pub fn new<S: AsRef<[u8]>>(secret: S) -> Self {
|
||||
let mut h = Sha256::new();
|
||||
h.update(b"xchacha20poly1305-factory:v1"); // domain separation
|
||||
h.update(secret.as_ref());
|
||||
let digest = h.finalize(); // 32 bytes
|
||||
let key = Key::<XChaCha20Poly1305>::from_slice(&digest).to_owned();
|
||||
Self { key }
|
||||
}
|
||||
|
||||
/// Output layout: [version:1][nonce:24][ciphertext||tag]
|
||||
pub fn encrypt(&self, plaintext: &[u8]) -> Vec<u8> {
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
|
||||
let mut nonce_bytes = [0u8; NONCE_LEN];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = XNonce::from_slice(&nonce_bytes);
|
||||
|
||||
let mut out = Vec::with_capacity(1 + NONCE_LEN + plaintext.len() + TAG_LEN);
|
||||
out.push(VERSION);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
|
||||
let ct = cipher.encrypt(nonce, plaintext).expect("encrypt");
|
||||
out.extend_from_slice(&ct);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decrypt(&self, blob: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
if blob.len() < 1 + NONCE_LEN + TAG_LEN {
|
||||
return Err(CryptoError::Format);
|
||||
}
|
||||
let ver = blob[0];
|
||||
if ver != VERSION {
|
||||
return Err(CryptoError::Version(ver));
|
||||
}
|
||||
|
||||
let nonce = XNonce::from_slice(&blob[1..1 + NONCE_LEN]);
|
||||
let ct = &blob[1 + NONCE_LEN..];
|
||||
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
cipher.decrypt(nonce, ct).map_err(|_| CryptoError::Decrypt)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Tiny usage example
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
let f = CryptoFactory::new(b"super-secret-key-material");
|
||||
let val = b"\x00\xFFbinary\x01\x02\x03";
|
||||
|
||||
let blob = f.encrypt(val);
|
||||
let roundtrip = f.decrypt(&blob).unwrap();
|
||||
|
||||
assert_eq!(roundtrip, val);
|
||||
}
|
||||
```
|
||||
|
||||
That’s it: `new(secret)`, `encrypt(bytes)`, `decrypt(bytes)`.
|
||||
You can stash the returned `blob` directly in your storage layer behind Redis.
|
150
herodb/specs/backgroundinfo/redis_basic_client.md
Normal file
150
herodb/specs/backgroundinfo/redis_basic_client.md
Normal file
@@ -0,0 +1,150 @@
|
||||
]
|
||||
# INFO
|
||||
|
||||
**What it does**
|
||||
Returns server stats in a human-readable text block, optionally filtered by sections. Typical sections: `server`, `clients`, `memory`, `persistence`, `stats`, `replication`, `cpu`, `commandstats`, `latencystats`, `cluster`, `modules`, `keyspace`, `errorstats`. Special args: `all`, `default`, `everything`. The reply is a **Bulk String** with `# <Section>` headers and `key:value` lines. ([Redis][1])
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
INFO [section [section ...]]
|
||||
```
|
||||
|
||||
**Return (RESP2/RESP3)**: Bulk String. ([Redis][1])
|
||||
|
||||
**RESP request/response**
|
||||
|
||||
```
|
||||
# Request: whole default set
|
||||
*1\r\n$4\r\nINFO\r\n
|
||||
|
||||
# Request: a specific section, e.g., clients
|
||||
*2\r\n$4\r\nINFO\r\n$7\r\nclients\r\n
|
||||
|
||||
# Response (prefix shown; body is long)
|
||||
$1234\r\n# Server\r\nredis_version:7.4.0\r\n...\r\n# Clients\r\nconnected_clients:3\r\n...\r\n
|
||||
```
|
||||
|
||||
(Reply type/format per RESP spec and the INFO page.) ([Redis][2])
|
||||
|
||||
---
|
||||
|
||||
# Connection “name” (there is **no** top-level `NAME` command)
|
||||
|
||||
Redis doesn’t have a standalone `NAME` command. Connection names are handled via `CLIENT SETNAME` and retrieved via `CLIENT GETNAME`. ([Redis][3])
|
||||
|
||||
## CLIENT SETNAME
|
||||
|
||||
Assigns a human label to the current connection (shown in `CLIENT LIST`, logs, etc.). No spaces allowed in the name; empty string clears it. Length is limited by Redis string limits (practically huge). **Reply**: Simple String `OK`. ([Redis][4])
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
CLIENT SETNAME connection-name
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
# Set the name "myapp"
|
||||
*3\r\n$6\r\nCLIENT\r\n$7\r\nSETNAME\r\n$5\r\nmyapp\r\n
|
||||
|
||||
# Reply
|
||||
+OK\r\n
|
||||
```
|
||||
|
||||
## CLIENT GETNAME
|
||||
|
||||
Returns the current connection’s name or **Null Bulk String** if unset. ([Redis][5])
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
CLIENT GETNAME
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
# Before SETNAME:
|
||||
*2\r\n$6\r\nCLIENT\r\n$7\r\nGETNAME\r\n
|
||||
$-1\r\n # nil (no name)
|
||||
|
||||
# After SETNAME myapp:
|
||||
*2\r\n$6\r\nCLIENT\r\n$7\r\nGETNAME\r\n
|
||||
$5\r\nmyapp\r\n
|
||||
```
|
||||
|
||||
(Null/Bulk String encoding per RESP spec.) ([Redis][2])
|
||||
|
||||
---
|
||||
|
||||
# CLIENT (container command + key subcommands)
|
||||
|
||||
`CLIENT` is a **container**; use subcommands like `CLIENT LIST`, `CLIENT INFO`, `CLIENT ID`, `CLIENT KILL`, `CLIENT TRACKING`, etc. Call `CLIENT HELP` to enumerate them. ([Redis][3])
|
||||
|
||||
## CLIENT LIST
|
||||
|
||||
Shows all connections as a single **Bulk String**: one line per client with `field=value` pairs (includes `id`, `addr`, `name`, `db`, `user`, `resp`, and more). Filters: `TYPE` and `ID`. **Return**: Bulk String (RESP2/RESP3). ([Redis][6])
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
CLIENT LIST [TYPE <NORMAL|MASTER|REPLICA|PUBSUB>] [ID client-id ...]
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*2\r\n$6\r\nCLIENT\r\n$4\r\nLIST\r\n
|
||||
|
||||
# Reply (single Bulk String; example with one line shown)
|
||||
$188\r\nid=7 addr=127.0.0.1:60840 laddr=127.0.0.1:6379 fd=8 name=myapp age=12 idle=3 flags=N db=0 ...\r\n
|
||||
```
|
||||
|
||||
## CLIENT INFO
|
||||
|
||||
Returns info for **this** connection only (same format/fields as a single line of `CLIENT LIST`). **Return**: Bulk String. Available since 6.2.0. ([Redis][7])
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
CLIENT INFO
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*2\r\n$6\r\nCLIENT\r\n$4\r\nINFO\r\n
|
||||
|
||||
$160\r\nid=7 addr=127.0.0.1:60840 laddr=127.0.0.1:6379 fd=8 name=myapp db=0 user=default resp=2 ...\r\n
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# RESP notes you’ll need for your parser
|
||||
|
||||
* **Requests** are Arrays: `*N\r\n` followed by `N` Bulk Strings for verb/args.
|
||||
* **Common replies here**: Simple String (`+OK\r\n`), Bulk String (`$<len>\r\n...\r\n`), and **Null Bulk String** (`$-1\r\n`). (These cover `INFO`, `CLIENT LIST/INFO`, `CLIENT GETNAME`, `CLIENT SETNAME`.) ([Redis][2])
|
||||
|
||||
---
|
||||
|
||||
## Sources (checked)
|
||||
|
||||
* INFO command (syntax, sections, behavior). ([Redis][1])
|
||||
* RESP spec (request/response framing, Bulk/Null Bulk Strings). ([Redis][2])
|
||||
* CLIENT container + subcommands index. ([Redis][3])
|
||||
* CLIENT LIST (fields, bulk-string return, filters). ([Redis][6])
|
||||
* CLIENT INFO (exists since 6.2, reply format). ([Redis][7])
|
||||
* CLIENT SETNAME (no spaces; clears with empty string; huge length OK). ([Redis][4])
|
||||
* CLIENT GETNAME (nil if unset). ([Redis][5])
|
||||
|
||||
If you want, I can fold this into a tiny Rust “command + RESP” test harness that exercises `INFO`, `CLIENT SETNAME/GETNAME`, `CLIENT LIST`, and `CLIENT INFO` against your in-mem RESP parser.
|
||||
|
||||
[1]: https://redis.io/docs/latest/commands/info/ "INFO | Docs"
|
||||
[2]: https://redis.io/docs/latest/develop/reference/protocol-spec/?utm_source=chatgpt.com "Redis serialization protocol specification | Docs"
|
||||
[3]: https://redis.io/docs/latest/commands/client/ "CLIENT | Docs"
|
||||
[4]: https://redis.io/docs/latest/commands/client-setname/?utm_source=chatgpt.com "CLIENT SETNAME | Docs"
|
||||
[5]: https://redis.io/docs/latest/commands/client-getname/?utm_source=chatgpt.com "CLIENT GETNAME | Docs"
|
||||
[6]: https://redis.io/docs/latest/commands/client-list/ "CLIENT LIST | Docs"
|
||||
[7]: https://redis.io/docs/latest/commands/client-info/?utm_source=chatgpt.com "CLIENT INFO | Docs"
|
@@ -248,3 +248,4 @@ Protocol and reply structure same as SCAN.
|
||||
| `SSCAN` | Iterate over set members | `[cursor, array]` |
|
||||
| `ZSCAN` | Iterate over sorted set | `[cursor, array]` |
|
||||
|
||||
##
|
259
herodb/specs/backgroundinfo/redis_lists.md
Normal file
259
herodb/specs/backgroundinfo/redis_lists.md
Normal file
@@ -0,0 +1,259 @@
|
||||
|
||||
# 1) Data model & basics
|
||||
|
||||
* A **queue** is a List at key `queue:<name>`.
|
||||
* Common patterns:
|
||||
|
||||
* **Producer**: `LPUSH queue item` (or `RPUSH`)
|
||||
* **Consumer (non-blocking)**: `RPOP queue` (or `LPOP`)
|
||||
* **Consumer (blocking)**: `BRPOP queue timeout` (or `BLPOP`)
|
||||
* If a key doesn’t exist, it’s treated as an **empty list**; push **creates** the list; when the **last element is popped, the key is deleted**. ([Redis][1])
|
||||
|
||||
---
|
||||
|
||||
# 2) Commands to implement (queues via Lists)
|
||||
|
||||
## LPUSH / RPUSH
|
||||
|
||||
Prepend/append one or more elements. Create the list if it doesn’t exist.
|
||||
**Return**: Integer = new length of the list.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
LPUSH key element [element ...]
|
||||
RPUSH key element [element ...]
|
||||
```
|
||||
|
||||
**RESP (example)**
|
||||
|
||||
```
|
||||
*3\r\n$5\r\nLPUSH\r\n$5\r\nqueue\r\n$5\r\njob-1\r\n
|
||||
:1\r\n
|
||||
```
|
||||
|
||||
Refs: semantics & multi-arg ordering. ([Redis][1])
|
||||
|
||||
### LPUSHX / RPUSHX (optional but useful)
|
||||
|
||||
Like LPUSH/RPUSH, **but only if the list exists**.
|
||||
**Return**: Integer = new length (0 if key didn’t exist).
|
||||
|
||||
```
|
||||
LPUSHX key element [element ...]
|
||||
RPUSHX key element [element ...]
|
||||
```
|
||||
|
||||
Refs: command index. ([Redis][2])
|
||||
|
||||
---
|
||||
|
||||
## LPOP / RPOP
|
||||
|
||||
Remove & return one (default) or **up to COUNT** elements since Redis 6.2.
|
||||
If the list is empty or missing, **Null** is returned (Null Bulk or Null Array if COUNT>1).
|
||||
**Return**:
|
||||
|
||||
* No COUNT: Bulk String or Null Bulk.
|
||||
* With COUNT: Array of Bulk Strings (possibly empty) or Null Array if key missing.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
LPOP key [count]
|
||||
RPOP key [count]
|
||||
```
|
||||
|
||||
**RESP (no COUNT)**
|
||||
|
||||
```
|
||||
*2\r\n$4\r\nRPOP\r\n$5\r\nqueue\r\n
|
||||
$5\r\njob-1\r\n # or $-1\r\n if empty
|
||||
```
|
||||
|
||||
**RESP (COUNT=2)**
|
||||
|
||||
```
|
||||
*3\r\n$4\r\nLPOP\r\n$5\r\nqueue\r\n$1\r\n2\r\n
|
||||
*2\r\n$5\r\njob-2\r\n$5\r\njob-3\r\n # or *-1\r\n if key missing
|
||||
```
|
||||
|
||||
Refs: LPOP w/ COUNT; general pop semantics. ([Redis][3])
|
||||
|
||||
---
|
||||
|
||||
## BLPOP / BRPOP (blocking consumers)
|
||||
|
||||
Block until an element is available in any of the given lists or until `timeout` (seconds, **double**, `0` = forever).
|
||||
**Return** on success: **Array \[key, element]**.
|
||||
**Return** on timeout: **Null Array**.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
BLPOP key [key ...] timeout
|
||||
BRPOP key [key ...] timeout
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*3\r\n$5\r\nBRPOP\r\n$5\r\nqueue\r\n$1\r\n0\r\n # block forever
|
||||
|
||||
# Success reply
|
||||
*2\r\n$5\r\nqueue\r\n$5\r\njob-4\r\n
|
||||
|
||||
# Timeout reply
|
||||
*-1\r\n
|
||||
```
|
||||
|
||||
**Implementation notes**
|
||||
|
||||
* If any listed key is non-empty at call time, reply **immediately** from the first non-empty key **by the command’s key order**.
|
||||
* Otherwise, put the client into a **blocked state** (register per-key waiters). On any `LPUSH/RPUSH` to those keys, **wake the earliest waiter** and serve it atomically.
|
||||
* If timeout expires, return **Null Array** and clear the blocked state.
|
||||
Refs: timeout semantics and return shape. ([Redis][4])
|
||||
|
||||
---
|
||||
|
||||
## LMOVE / BLMOVE (atomic move; replaces RPOPLPUSH/BRPOPLPUSH)
|
||||
|
||||
Atomically **pop from one side** of `source` and **push to one side** of `destination`.
|
||||
|
||||
* Use for **reliable queues** (move to a *processing* list).
|
||||
* `BLMOVE` blocks like `BLPOP` when `source` is empty.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```
|
||||
LMOVE source destination LEFT|RIGHT LEFT|RIGHT
|
||||
BLMOVE source destination LEFT|RIGHT LEFT|RIGHT timeout
|
||||
```
|
||||
|
||||
**Return**: Bulk String element moved, or Null if `source` empty (LMOVE); `BLMOVE` blocks/Null on timeout.
|
||||
|
||||
**RESP (LMOVE RIGHT->LEFT)**
|
||||
|
||||
```
|
||||
*5\r\n$5\r\nLMOVE\r\n$6\r\nsource\r\n$3\r\ndst\r\n$5\r\nRIGHT\r\n$4\r\nLEFT\r\n
|
||||
$5\r\njob-5\r\n
|
||||
```
|
||||
|
||||
**Notes**
|
||||
|
||||
* Prefer `LMOVE/BLMOVE` over deprecated `RPOPLPUSH/BRPOPLPUSH`.
|
||||
* Pattern: consumer `LMOVE queue processing RIGHT LEFT` → work → `LREM processing 1 <elem>` to ACK; a reaper can requeue stale items.
|
||||
Refs: LMOVE/BLMOVE behavior and reliable-queue pattern; deprecation of RPOPLPUSH. ([Redis][5])
|
||||
|
||||
*(Compat: you can still implement `RPOPLPUSH source dest` and `BRPOPLPUSH source dest timeout`, but mark them deprecated and map to LMOVE/BLMOVE.)* ([Redis][6])
|
||||
|
||||
---
|
||||
|
||||
## LLEN (length)
|
||||
|
||||
Useful for metrics/backpressure.
|
||||
|
||||
```
|
||||
LLEN key
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*2\r\n$4\r\nLLEN\r\n$5\r\nqueue\r\n
|
||||
:3\r\n
|
||||
```
|
||||
|
||||
Refs: list overview mentioning LLEN. ([Redis][7])
|
||||
|
||||
---
|
||||
|
||||
## LREM (ack for “reliable” processing)
|
||||
|
||||
Remove occurrences of `element` from the list (head→tail scan).
|
||||
Use `count=1` to ACK a single processed item from `processing`.
|
||||
|
||||
```
|
||||
LREM key count element
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*4\r\n$4\r\nLREM\r\n$9\r\nprocessing\r\n$1\r\n1\r\n$5\r\njob-5\r\n
|
||||
:1\r\n
|
||||
```
|
||||
|
||||
Refs: reliable pattern mentions LREM to ACK. ([Redis][5])
|
||||
|
||||
---
|
||||
|
||||
## LTRIM (bounded queues / retention)
|
||||
|
||||
Keep only `[start, stop]` range; everything else is dropped.
|
||||
Use to cap queue length after pushes.
|
||||
|
||||
```
|
||||
LTRIM key start stop
|
||||
```
|
||||
|
||||
**RESP**
|
||||
|
||||
```
|
||||
*4\r\n$5\r\nLTRIM\r\n$5\r\nqueue\r\n$2\r\n0\r\n$3\r\n999\r\n
|
||||
+OK\r\n
|
||||
```
|
||||
|
||||
Refs: list overview includes LTRIM for retention. ([Redis][7])
|
||||
|
||||
---
|
||||
|
||||
## LRANGE / LINDEX (debugging / peeking)
|
||||
|
||||
* `LRANGE key start stop` → Array of elements (non-destructive).
|
||||
* `LINDEX key index` → one element or Null.
|
||||
|
||||
These aren’t required for queue semantics, but handy. ([Redis][7])
|
||||
|
||||
---
|
||||
|
||||
# 3) Errors & types
|
||||
|
||||
* Wrong type: `-WRONGTYPE Operation against a key holding the wrong kind of value\r\n`
|
||||
* Non-existing key:
|
||||
|
||||
* Push: creates the list (returns new length).
|
||||
* Pop (non-blocking): returns **Null**.
|
||||
* Blocking pop: **Null Array** on timeout. ([Redis][1])
|
||||
|
||||
---
|
||||
|
||||
# 4) Blocking engine (implementation sketch)
|
||||
|
||||
1. **Call time**: scan keys in user order. If a non-empty list is found, pop & reply immediately.
|
||||
2. **Otherwise**: register the client as **blocked** on those keys with `deadline = now + timeout` (or infinite).
|
||||
3. **On push to any key**: if waiters exist, **wake one** (FIFO) and serve its pop **atomically** with the push result.
|
||||
4. **On timer**: for each blocked client whose deadline passed, reply `Null Array` and clear state.
|
||||
5. **Connection close**: remove from any wait queues.
|
||||
|
||||
Refs for timeout/block semantics. ([Redis][4])
|
||||
|
||||
---
|
||||
|
||||
# 5) Reliable queue pattern (recommended)
|
||||
|
||||
* **Consume**: `LMOVE queue processing RIGHT LEFT` (or `BLMOVE ... 0`).
|
||||
* **Process** the job.
|
||||
* **ACK**: `LREM processing 1 <job>` when done.
|
||||
* **Reaper**: auxiliary task that detects stale jobs (e.g., track job IDs + timestamps in a ZSET) and requeues them. (Lists don’t include timestamps; pairing with a ZSET is standard practice.)
|
||||
Refs: LMOVE doc’s pattern. ([Redis][5])
|
||||
|
||||
---
|
||||
|
||||
# 6) Minimal test matrix
|
||||
|
||||
* Push/pop happy path (both ends), with/without COUNT.
|
||||
* Blocking pop: immediate availability, block + timeout, wake on push, multiple keys order, FIFO across multiple waiters.
|
||||
* LMOVE/BLMOVE: RIGHT→LEFT pipeline, block + wake, cross-list atomicity, ACK via LREM.
|
||||
* Type errors and key deletion on last pop.
|
||||
|
308
herodb/src/age.rs
Normal file
308
herodb/src/age.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
//! age.rs — AGE (rage) helpers + persistent key management for your mini-Redis.
|
||||
//
|
||||
// Features:
|
||||
// - X25519 encryption/decryption (age style)
|
||||
// - Ed25519 detached signatures + verification
|
||||
// - Persistent named keys in DB (strings):
|
||||
// age:key:{name} -> X25519 recipient (public encryption key, "age1...")
|
||||
// age:privkey:{name} -> X25519 identity (secret encryption key, "AGE-SECRET-KEY-1...")
|
||||
// age:signpub:{name} -> Ed25519 verify pubkey (public, used to verify signatures)
|
||||
// age:signpriv:{name} -> Ed25519 signing secret key (private, used to sign)
|
||||
// - Base64 wrapping for ciphertext/signature binary blobs.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use age::{Decryptor, Encryptor};
|
||||
use age::x25519;
|
||||
|
||||
use ed25519_dalek::{Signature, Signer, Verifier, SigningKey, VerifyingKey};
|
||||
|
||||
use base64::{engine::general_purpose::STANDARD as B64, Engine as _};
|
||||
|
||||
use crate::protocol::Protocol;
|
||||
use crate::server::Server;
|
||||
use crate::error::DBError;
|
||||
|
||||
// ---------- Internal helpers ----------
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AgeWireError {
|
||||
ParseKey,
|
||||
Crypto(String),
|
||||
Utf8,
|
||||
SignatureLen,
|
||||
NotFound(&'static str), // which kind of key was missing
|
||||
Storage(String),
|
||||
}
|
||||
|
||||
impl AgeWireError {
|
||||
fn to_protocol(self) -> Protocol {
|
||||
match self {
|
||||
AgeWireError::ParseKey => Protocol::err("ERR age: invalid key"),
|
||||
AgeWireError::Crypto(e) => Protocol::err(&format!("ERR age: {e}")),
|
||||
AgeWireError::Utf8 => Protocol::err("ERR age: invalid UTF-8 plaintext"),
|
||||
AgeWireError::SignatureLen => Protocol::err("ERR age: bad signature length"),
|
||||
AgeWireError::NotFound(w) => Protocol::err(&format!("ERR age: missing {w}")),
|
||||
AgeWireError::Storage(e) => Protocol::err(&format!("ERR storage: {e}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recipient(s: &str) -> Result<x25519::Recipient, AgeWireError> {
|
||||
x25519::Recipient::from_str(s).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
fn parse_identity(s: &str) -> Result<x25519::Identity, AgeWireError> {
|
||||
x25519::Identity::from_str(s).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
fn parse_ed25519_signing_key(s: &str) -> Result<SigningKey, AgeWireError> {
|
||||
// Parse base64-encoded signing key
|
||||
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
|
||||
if bytes.len() != 32 {
|
||||
return Err(AgeWireError::ParseKey);
|
||||
}
|
||||
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
|
||||
Ok(SigningKey::from_bytes(&key_bytes))
|
||||
}
|
||||
fn parse_ed25519_verifying_key(s: &str) -> Result<VerifyingKey, AgeWireError> {
|
||||
// Parse base64-encoded verifying key
|
||||
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
|
||||
if bytes.len() != 32 {
|
||||
return Err(AgeWireError::ParseKey);
|
||||
}
|
||||
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
|
||||
VerifyingKey::from_bytes(&key_bytes).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
|
||||
// ---------- Stateless crypto helpers (string in/out) ----------
|
||||
|
||||
pub fn gen_enc_keypair() -> (String, String) {
|
||||
let id = x25519::Identity::generate();
|
||||
let pk = id.to_public();
|
||||
(pk.to_string(), id.to_string().expose_secret().to_string()) // (recipient, identity)
|
||||
}
|
||||
|
||||
pub fn gen_sign_keypair() -> (String, String) {
|
||||
use rand::RngCore;
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
// Generate random 32 bytes for the signing key
|
||||
let mut secret_bytes = [0u8; 32];
|
||||
OsRng.fill_bytes(&mut secret_bytes);
|
||||
|
||||
let signing_key = SigningKey::from_bytes(&secret_bytes);
|
||||
let verifying_key = signing_key.verifying_key();
|
||||
|
||||
// Encode as base64 for storage
|
||||
let signing_key_b64 = B64.encode(signing_key.to_bytes());
|
||||
let verifying_key_b64 = B64.encode(verifying_key.to_bytes());
|
||||
|
||||
(verifying_key_b64, signing_key_b64) // (verify_pub, signing_secret)
|
||||
}
|
||||
|
||||
/// Encrypt `msg` for `recipient_str` (X25519). Returns base64(ciphertext).
|
||||
pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result<String, AgeWireError> {
|
||||
let recipient = parse_recipient(recipient_str)?;
|
||||
let enc = Encryptor::with_recipients(vec![Box::new(recipient)])
|
||||
.expect("failed to create encryptor"); // Handle Option<Encryptor>
|
||||
let mut out = Vec::new();
|
||||
{
|
||||
use std::io::Write;
|
||||
let mut w = enc.wrap_output(&mut out).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
w.write_all(msg.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
w.finish().map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
}
|
||||
Ok(B64.encode(out))
|
||||
}
|
||||
|
||||
/// Decrypt base64(ciphertext) with `identity_str`. Returns plaintext String.
|
||||
pub fn decrypt_b64(identity_str: &str, ct_b64: &str) -> Result<String, AgeWireError> {
|
||||
let id = parse_identity(identity_str)?;
|
||||
let ct = B64.decode(ct_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
let dec = Decryptor::new(&ct[..]).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
|
||||
// The decrypt method returns a Result<StreamReader, DecryptError>
|
||||
let mut r = match dec {
|
||||
Decryptor::Recipients(d) => d.decrypt(std::iter::once(&id as &dyn age::Identity))
|
||||
.map_err(|e| AgeWireError::Crypto(e.to_string()))?,
|
||||
Decryptor::Passphrase(_) => return Err(AgeWireError::Crypto("Expected recipients, got passphrase".to_string())),
|
||||
};
|
||||
|
||||
let mut pt = Vec::new();
|
||||
use std::io::Read;
|
||||
r.read_to_end(&mut pt).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
String::from_utf8(pt).map_err(|_| AgeWireError::Utf8)
|
||||
}
|
||||
|
||||
/// Sign bytes of `msg` (detached). Returns base64(signature bytes, 64 bytes).
|
||||
pub fn sign_b64(signing_secret_str: &str, msg: &str) -> Result<String, AgeWireError> {
|
||||
let signing_key = parse_ed25519_signing_key(signing_secret_str)?;
|
||||
let sig = signing_key.sign(msg.as_bytes());
|
||||
Ok(B64.encode(sig.to_bytes()))
|
||||
}
|
||||
|
||||
/// Verify detached signature (base64) for `msg` with pubkey.
|
||||
pub fn verify_b64(verify_pub_str: &str, msg: &str, sig_b64: &str) -> Result<bool, AgeWireError> {
|
||||
let verifying_key = parse_ed25519_verifying_key(verify_pub_str)?;
|
||||
let sig_bytes = B64.decode(sig_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
if sig_bytes.len() != 64 {
|
||||
return Err(AgeWireError::SignatureLen);
|
||||
}
|
||||
let sig = Signature::from_bytes(sig_bytes[..].try_into().unwrap());
|
||||
Ok(verifying_key.verify(msg.as_bytes(), &sig).is_ok())
|
||||
}
|
||||
|
||||
// ---------- Storage helpers ----------
|
||||
|
||||
fn sget(server: &Server, key: &str) -> Result<Option<String>, AgeWireError> {
|
||||
let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?;
|
||||
st.get(key).map_err(|e| AgeWireError::Storage(e.0))
|
||||
}
|
||||
fn sset(server: &Server, key: &str, val: &str) -> Result<(), AgeWireError> {
|
||||
let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?;
|
||||
st.set(key.to_string(), val.to_string()).map_err(|e| AgeWireError::Storage(e.0))
|
||||
}
|
||||
|
||||
fn enc_pub_key_key(name: &str) -> String { format!("age:key:{name}") }
|
||||
fn enc_priv_key_key(name: &str) -> String { format!("age:privkey:{name}") }
|
||||
fn sign_pub_key_key(name: &str) -> String { format!("age:signpub:{name}") }
|
||||
fn sign_priv_key_key(name: &str) -> String { format!("age:signpriv:{name}") }
|
||||
|
||||
// ---------- Command handlers (RESP Protocol) ----------
|
||||
// Basic (stateless) ones kept for completeness
|
||||
|
||||
pub async fn cmd_age_genenc() -> Protocol {
|
||||
let (recip, ident) = gen_enc_keypair();
|
||||
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_gensign() -> Protocol {
|
||||
let (verify, secret) = gen_sign_keypair();
|
||||
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_encrypt(recipient: &str, message: &str) -> Protocol {
|
||||
match encrypt_b64(recipient, message) {
|
||||
Ok(b64) => Protocol::BulkString(b64),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_decrypt(identity: &str, ct_b64: &str) -> Protocol {
|
||||
match decrypt_b64(identity, ct_b64) {
|
||||
Ok(pt) => Protocol::BulkString(pt),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_sign(secret: &str, message: &str) -> Protocol {
|
||||
match sign_b64(secret, message) {
|
||||
Ok(b64sig) => Protocol::BulkString(b64sig),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_verify(verify_pub: &str, message: &str, sig_b64: &str) -> Protocol {
|
||||
match verify_b64(verify_pub, message, sig_b64) {
|
||||
Ok(true) => Protocol::SimpleString("1".to_string()),
|
||||
Ok(false) => Protocol::SimpleString("0".to_string()),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- NEW: Persistent, named-key commands ----------
|
||||
|
||||
pub async fn cmd_age_keygen(server: &Server, name: &str) -> Protocol {
|
||||
let (recip, ident) = gen_enc_keypair();
|
||||
if let Err(e) = sset(server, &enc_pub_key_key(name), &recip) { return e.to_protocol(); }
|
||||
if let Err(e) = sset(server, &enc_priv_key_key(name), &ident) { return e.to_protocol(); }
|
||||
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_signkeygen(server: &Server, name: &str) -> Protocol {
|
||||
let (verify, secret) = gen_sign_keypair();
|
||||
if let Err(e) = sset(server, &sign_pub_key_key(name), &verify) { return e.to_protocol(); }
|
||||
if let Err(e) = sset(server, &sign_priv_key_key(name), &secret) { return e.to_protocol(); }
|
||||
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_encrypt_name(server: &Server, name: &str, message: &str) -> Protocol {
|
||||
let recip = match sget(server, &enc_pub_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("recipient (age:key:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match encrypt_b64(&recip, message) {
|
||||
Ok(ct) => Protocol::BulkString(ct),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_decrypt_name(server: &Server, name: &str, ct_b64: &str) -> Protocol {
|
||||
let ident = match sget(server, &enc_priv_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("identity (age:privkey:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match decrypt_b64(&ident, ct_b64) {
|
||||
Ok(pt) => Protocol::BulkString(pt),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_sign_name(server: &Server, name: &str, message: &str) -> Protocol {
|
||||
let sec = match sget(server, &sign_priv_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("signing secret (age:signpriv:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match sign_b64(&sec, message) {
|
||||
Ok(sig) => Protocol::BulkString(sig),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_verify_name(server: &Server, name: &str, message: &str, sig_b64: &str) -> Protocol {
|
||||
let pubk = match sget(server, &sign_pub_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("verify pubkey (age:signpub:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match verify_b64(&pubk, message, sig_b64) {
|
||||
Ok(true) => Protocol::SimpleString("1".to_string()),
|
||||
Ok(false) => Protocol::SimpleString("0".to_string()),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_list(server: &Server) -> Protocol {
|
||||
// Returns 4 arrays: ["encpub", <names...>], ["encpriv", ...], ["signpub", ...], ["signpriv", ...]
|
||||
let st = match server.current_storage() { Ok(s) => s, Err(e) => return Protocol::err(&e.0) };
|
||||
|
||||
let pull = |pat: &str, prefix: &str| -> Result<Vec<String>, DBError> {
|
||||
let keys = st.keys(pat)?;
|
||||
let mut names: Vec<String> = keys.into_iter()
|
||||
.filter_map(|k| k.strip_prefix(prefix).map(|x| x.to_string()))
|
||||
.collect();
|
||||
names.sort();
|
||||
Ok(names)
|
||||
};
|
||||
|
||||
let encpub = match pull("age:key:*", "age:key:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let encpriv = match pull("age:privkey:*", "age:privkey:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let signpub = match pull("age:signpub:*", "age:signpub:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let signpriv= match pull("age:signpriv:*", "age:signpriv:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
|
||||
let to_arr = |label: &str, v: Vec<String>| {
|
||||
let mut out = vec![Protocol::BulkString(label.to_string())];
|
||||
out.push(Protocol::Array(v.into_iter().map(Protocol::BulkString).collect()));
|
||||
Protocol::Array(out)
|
||||
};
|
||||
|
||||
Protocol::Array(vec![
|
||||
to_arr("encpub", encpub),
|
||||
to_arr("encpriv", encpriv),
|
||||
to_arr("signpub", signpub),
|
||||
to_arr("signpriv", signpriv),
|
||||
])
|
||||
}
|
1519
herodb/src/cmd.rs
Normal file
1519
herodb/src/cmd.rs
Normal file
File diff suppressed because it is too large
Load Diff
73
herodb/src/crypto.rs
Normal file
73
herodb/src/crypto.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, OsRng},
|
||||
XChaCha20Poly1305, XNonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
const VERSION: u8 = 1;
|
||||
const NONCE_LEN: usize = 24;
|
||||
const TAG_LEN: usize = 16;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum CryptoError {
|
||||
Format, // wrong length / header
|
||||
Version(u8), // unknown version
|
||||
Decrypt, // wrong key or corrupted data
|
||||
}
|
||||
|
||||
impl From<CryptoError> for crate::error::DBError {
|
||||
fn from(e: CryptoError) -> Self {
|
||||
crate::error::DBError(format!("Crypto error: {:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)
|
||||
pub struct CryptoFactory {
|
||||
key: chacha20poly1305::Key,
|
||||
}
|
||||
|
||||
impl CryptoFactory {
|
||||
/// Accepts any secret bytes; turns them into a 32-byte key (SHA-256).
|
||||
pub fn new<S: AsRef<[u8]>>(secret: S) -> Self {
|
||||
let mut h = Sha256::new();
|
||||
h.update(b"xchacha20poly1305-factory:v1"); // domain separation
|
||||
h.update(secret.as_ref());
|
||||
let digest = h.finalize(); // 32 bytes
|
||||
let key = chacha20poly1305::Key::from_slice(&digest).to_owned();
|
||||
Self { key }
|
||||
}
|
||||
|
||||
/// Output layout: [version:1][nonce:24][ciphertext||tag]
|
||||
pub fn encrypt(&self, plaintext: &[u8]) -> Vec<u8> {
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
|
||||
let mut nonce_bytes = [0u8; NONCE_LEN];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = XNonce::from_slice(&nonce_bytes);
|
||||
|
||||
let mut out = Vec::with_capacity(1 + NONCE_LEN + plaintext.len() + TAG_LEN);
|
||||
out.push(VERSION);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
|
||||
let ct = cipher.encrypt(nonce, plaintext).expect("encrypt");
|
||||
out.extend_from_slice(&ct);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decrypt(&self, blob: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
if blob.len() < 1 + NONCE_LEN + TAG_LEN {
|
||||
return Err(CryptoError::Format);
|
||||
}
|
||||
let ver = blob[0];
|
||||
if ver != VERSION {
|
||||
return Err(CryptoError::Version(ver));
|
||||
}
|
||||
|
||||
let nonce = XNonce::from_slice(&blob[1..1 + NONCE_LEN]);
|
||||
let ct = &blob[1 + NONCE_LEN..];
|
||||
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
cipher.decrypt(nonce, ct).map_err(|_| CryptoError::Decrypt)
|
||||
}
|
||||
}
|
@@ -4,7 +4,6 @@ use tokio::sync::mpsc;
|
||||
use redb;
|
||||
use bincode;
|
||||
|
||||
use crate::protocol::Protocol;
|
||||
|
||||
// todo: more error types
|
||||
#[derive(Debug)]
|
||||
@@ -81,3 +80,15 @@ impl From<tokio::sync::mpsc::error::SendError<()>> for DBError {
|
||||
DBError(item.to_string().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for DBError {
|
||||
fn from(item: serde_json::Error) -> Self {
|
||||
DBError(item.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<chacha20poly1305::Error> for DBError {
|
||||
fn from(item: chacha20poly1305::Error) -> Self {
|
||||
DBError(item.to_string())
|
||||
}
|
||||
}
|
8
herodb/src/lib.rs
Normal file
8
herodb/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod age; // NEW
|
||||
pub mod cmd;
|
||||
pub mod crypto;
|
||||
pub mod error;
|
||||
pub mod options;
|
||||
pub mod protocol;
|
||||
pub mod server;
|
||||
pub mod storage;
|
@@ -2,7 +2,7 @@
|
||||
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use redis_rs::server;
|
||||
use herodb::server;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
@@ -14,10 +14,22 @@ struct Args {
|
||||
#[arg(long)]
|
||||
dir: String,
|
||||
|
||||
|
||||
/// The port of the Redis server, default is 6379 if not specified
|
||||
#[arg(long)]
|
||||
port: Option<u16>,
|
||||
|
||||
/// Enable debug mode
|
||||
#[arg(long)]
|
||||
debug: bool,
|
||||
|
||||
|
||||
/// Master encryption key for encrypted databases
|
||||
#[arg(long)]
|
||||
encryption_key: Option<String>,
|
||||
|
||||
/// Encrypt the database
|
||||
#[arg(long)]
|
||||
encrypt: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -33,14 +45,20 @@ async fn main() {
|
||||
.unwrap();
|
||||
|
||||
// new DB option
|
||||
let option = redis_rs::options::DBOption {
|
||||
let option = herodb::options::DBOption {
|
||||
dir: args.dir,
|
||||
port,
|
||||
debug: args.debug,
|
||||
encryption_key: args.encryption_key,
|
||||
encrypt: args.encrypt,
|
||||
};
|
||||
|
||||
// new server
|
||||
let server = server::Server::new(option).await;
|
||||
|
||||
// Add a small delay to ensure the port is ready
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
// accept new connections
|
||||
loop {
|
||||
let stream = listener.accept().await;
|
8
herodb/src/options.rs
Normal file
8
herodb/src/options.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
#[derive(Clone)]
|
||||
pub struct DBOption {
|
||||
pub dir: String,
|
||||
pub port: u16,
|
||||
pub debug: bool,
|
||||
pub encrypt: bool,
|
||||
pub encryption_key: Option<String>, // Master encryption key
|
||||
}
|
171
herodb/src/protocol.rs
Normal file
171
herodb/src/protocol.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use core::fmt;
|
||||
|
||||
use crate::error::DBError;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Protocol {
|
||||
SimpleString(String),
|
||||
BulkString(String),
|
||||
Null,
|
||||
Array(Vec<Protocol>),
|
||||
Error(String), // NEW
|
||||
}
|
||||
|
||||
impl fmt::Display for Protocol {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.decode().as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
pub fn from(protocol: &str) -> Result<(Self, &str), DBError> {
|
||||
if protocol.is_empty() {
|
||||
// Incomplete frame; caller should read more bytes
|
||||
return Err(DBError("[incomplete] empty".to_string()));
|
||||
}
|
||||
let ret = match protocol.chars().nth(0) {
|
||||
Some('+') => Self::parse_simple_string_sfx(&protocol[1..]),
|
||||
Some('$') => Self::parse_bulk_string_sfx(&protocol[1..]),
|
||||
Some('*') => Self::parse_array_sfx(&protocol[1..]),
|
||||
_ => Err(DBError(format!(
|
||||
"[from] unsupported protocol: {:?}",
|
||||
protocol
|
||||
))),
|
||||
};
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn from_vec(array: Vec<&str>) -> Self {
|
||||
let array = array
|
||||
.into_iter()
|
||||
.map(|x| Protocol::BulkString(x.to_string()))
|
||||
.collect();
|
||||
Protocol::Array(array)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ok() -> Self {
|
||||
Protocol::SimpleString("ok".to_string())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn err(msg: &str) -> Self {
|
||||
Protocol::Error(msg.to_string())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write_on_slave_err() -> Self {
|
||||
Self::err("DISALLOW WRITE ON SLAVE")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn psync_on_slave_err() -> Self {
|
||||
Self::err("PSYNC ON SLAVE IS NOT ALLOWED")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn none() -> Self {
|
||||
Self::SimpleString("none".to_string())
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> String {
|
||||
match self {
|
||||
Protocol::SimpleString(s) => s.to_string(),
|
||||
Protocol::BulkString(s) => s.to_string(),
|
||||
Protocol::Null => "".to_string(),
|
||||
Protocol::Array(s) => s.iter().map(|x| x.decode()).collect::<Vec<_>>().join(" "),
|
||||
Protocol::Error(s) => s.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> String {
|
||||
match self {
|
||||
Protocol::SimpleString(s) => format!("+{}\r\n", s),
|
||||
Protocol::BulkString(s) => format!("${}\r\n{}\r\n", s.len(), s),
|
||||
Protocol::Array(ss) => {
|
||||
format!("*{}\r\n", ss.len()) + &ss.iter().map(|x| x.encode()).collect::<String>()
|
||||
}
|
||||
Protocol::Null => "$-1\r\n".to_string(),
|
||||
Protocol::Error(s) => format!("-{}\r\n", s), // proper RESP error
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_simple_string_sfx(protocol: &str) -> Result<(Self, &str), DBError> {
|
||||
match protocol.find("\r\n") {
|
||||
Some(x) => Ok((Self::SimpleString(protocol[..x].to_string()), &protocol[x + 2..])),
|
||||
_ => Err(DBError(format!(
|
||||
"[new simple string] unsupported protocol: {:?}",
|
||||
protocol
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_bulk_string_sfx(protocol: &str) -> Result<(Self, &str), DBError> {
|
||||
if let Some(len_end) = protocol.find("\r\n") {
|
||||
let size = Self::parse_usize(&protocol[..len_end])?;
|
||||
let data_start = len_end + 2;
|
||||
let data_end = data_start + size;
|
||||
|
||||
// If we don't yet have the full bulk payload + trailing CRLF, signal INCOMPLETE
|
||||
if protocol.len() < data_end + 2 {
|
||||
return Err(DBError("[incomplete] bulk body".to_string()));
|
||||
}
|
||||
if &protocol[data_end..data_end + 2] != "\r\n" {
|
||||
return Err(DBError("[incomplete] bulk terminator".to_string()));
|
||||
}
|
||||
|
||||
let s = Self::parse_string(&protocol[data_start..data_end])?;
|
||||
Ok((Protocol::BulkString(s), &protocol[data_end + 2..]))
|
||||
} else {
|
||||
// No CRLF after bulk length header yet
|
||||
Err(DBError("[incomplete] bulk header".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_array_sfx(s: &str) -> Result<(Self, &str), DBError> {
|
||||
if let Some(len_end) = s.find("\r\n") {
|
||||
let array_len = s[..len_end].parse::<usize>()?;
|
||||
let mut remaining = &s[len_end + 2..];
|
||||
let mut vec = vec![];
|
||||
for _ in 0..array_len {
|
||||
match Protocol::from(remaining) {
|
||||
Ok((p, rem)) => {
|
||||
vec.push(p);
|
||||
remaining = rem;
|
||||
}
|
||||
Err(e) => {
|
||||
// Propagate incomplete so caller can read more bytes
|
||||
if e.0.starts_with("[incomplete]") {
|
||||
return Err(e);
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((Protocol::Array(vec), remaining))
|
||||
} else {
|
||||
// No CRLF after array header yet
|
||||
Err(DBError("[incomplete] array header".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_usize(protocol: &str) -> Result<usize, DBError> {
|
||||
if protocol.is_empty() {
|
||||
Err(DBError("Cannot parse usize from empty string".to_string()))
|
||||
} else {
|
||||
protocol
|
||||
.parse::<usize>()
|
||||
.map_err(|_| DBError(format!("Failed to parse usize from: {}", protocol)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_string(protocol: &str) -> Result<String, DBError> {
|
||||
if protocol.is_empty() {
|
||||
// Allow empty strings, but handle appropriately
|
||||
Ok("".to_string())
|
||||
} else {
|
||||
Ok(protocol.to_string())
|
||||
}
|
||||
}
|
||||
}
|
250
herodb/src/server.rs
Normal file
250
herodb/src/server.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use core::str;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::sync::{Mutex, oneshot};
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use crate::cmd::Cmd;
|
||||
use crate::error::DBError;
|
||||
use crate::options;
|
||||
use crate::protocol::Protocol;
|
||||
use crate::storage::Storage;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
|
||||
pub option: options::DBOption,
|
||||
pub client_name: Option<String>,
|
||||
pub selected_db: u64, // Changed from usize to u64
|
||||
pub queued_cmd: Option<Vec<(Cmd, Protocol)>>,
|
||||
|
||||
// BLPOP waiter registry: per (db_index, key) FIFO of waiters
|
||||
pub list_waiters: Arc<Mutex<HashMap<u64, HashMap<String, Vec<Waiter>>>>>,
|
||||
pub waiter_seq: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
pub struct Waiter {
|
||||
pub id: u64,
|
||||
pub side: PopSide,
|
||||
pub tx: oneshot::Sender<(String, String)>, // (key, element)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum PopSide {
|
||||
Left,
|
||||
Right,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub async fn new(option: options::DBOption) -> Self {
|
||||
Server {
|
||||
db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())),
|
||||
option,
|
||||
client_name: None,
|
||||
selected_db: 0,
|
||||
queued_cmd: None,
|
||||
|
||||
list_waiters: Arc::new(Mutex::new(HashMap::new())),
|
||||
waiter_seq: Arc::new(AtomicU64::new(1)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn current_storage(&self) -> Result<Arc<Storage>, DBError> {
|
||||
let mut cache = self.db_cache.write().unwrap();
|
||||
|
||||
if let Some(storage) = cache.get(&self.selected_db) {
|
||||
return Ok(storage.clone());
|
||||
}
|
||||
|
||||
|
||||
// Create new database file
|
||||
let db_file_path = std::path::PathBuf::from(self.option.dir.clone())
|
||||
.join(format!("{}.db", self.selected_db));
|
||||
|
||||
// Ensure the directory exists before creating the database file
|
||||
if let Some(parent_dir) = db_file_path.parent() {
|
||||
std::fs::create_dir_all(parent_dir).map_err(|e| {
|
||||
DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e))
|
||||
})?;
|
||||
}
|
||||
|
||||
println!("Creating new db file: {}", db_file_path.display());
|
||||
|
||||
let storage = Arc::new(Storage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?);
|
||||
|
||||
cache.insert(self.selected_db, storage.clone());
|
||||
Ok(storage)
|
||||
}
|
||||
|
||||
fn should_encrypt_db(&self, db_index: u64) -> bool {
|
||||
// DB 0-9 are non-encrypted, DB 10+ are encrypted
|
||||
self.option.encrypt && db_index >= 10
|
||||
}
|
||||
|
||||
// ----- BLPOP waiter helpers -----
|
||||
|
||||
pub async fn register_waiter(&self, db_index: u64, key: &str, side: PopSide) -> (u64, oneshot::Receiver<(String, String)>) {
|
||||
let id = self.waiter_seq.fetch_add(1, Ordering::Relaxed);
|
||||
let (tx, rx) = oneshot::channel::<(String, String)>();
|
||||
|
||||
let mut guard = self.list_waiters.lock().await;
|
||||
let per_db = guard.entry(db_index).or_insert_with(HashMap::new);
|
||||
let q = per_db.entry(key.to_string()).or_insert_with(Vec::new);
|
||||
q.push(Waiter { id, side, tx });
|
||||
(id, rx)
|
||||
}
|
||||
|
||||
pub async fn unregister_waiter(&self, db_index: u64, key: &str, id: u64) {
|
||||
let mut guard = self.list_waiters.lock().await;
|
||||
if let Some(per_db) = guard.get_mut(&db_index) {
|
||||
if let Some(q) = per_db.get_mut(key) {
|
||||
q.retain(|w| w.id != id);
|
||||
if q.is_empty() {
|
||||
per_db.remove(key);
|
||||
}
|
||||
}
|
||||
if per_db.is_empty() {
|
||||
guard.remove(&db_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called after LPUSH/RPUSH to deliver to blocked BLPOP waiters.
|
||||
pub async fn drain_waiters_after_push(&self, key: &str) -> Result<(), DBError> {
|
||||
let db_index = self.selected_db;
|
||||
|
||||
loop {
|
||||
// Check if any waiter exists
|
||||
let maybe_waiter = {
|
||||
let mut guard = self.list_waiters.lock().await;
|
||||
if let Some(per_db) = guard.get_mut(&db_index) {
|
||||
if let Some(q) = per_db.get_mut(key) {
|
||||
if !q.is_empty() {
|
||||
// Pop FIFO
|
||||
Some(q.remove(0))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let waiter = if let Some(w) = maybe_waiter { w } else { break };
|
||||
|
||||
// Pop one element depending on waiter side
|
||||
let elems = match waiter.side {
|
||||
PopSide::Left => self.current_storage()?.lpop(key, 1)?,
|
||||
PopSide::Right => self.current_storage()?.rpop(key, 1)?,
|
||||
};
|
||||
if elems.is_empty() {
|
||||
// Nothing to deliver; re-register waiter at the front to preserve order
|
||||
let mut guard = self.list_waiters.lock().await;
|
||||
let per_db = guard.entry(db_index).or_insert_with(HashMap::new);
|
||||
let q = per_db.entry(key.to_string()).or_insert_with(Vec::new);
|
||||
q.insert(0, waiter);
|
||||
break;
|
||||
} else {
|
||||
let elem = elems[0].clone();
|
||||
// Send to waiter; if receiver dropped, just continue
|
||||
let _ = waiter.tx.send((key.to_string(), elem));
|
||||
// Loop to try to satisfy more waiters if more elements remain
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
&mut self,
|
||||
mut stream: tokio::net::TcpStream,
|
||||
) -> Result<(), DBError> {
|
||||
// Accumulate incoming bytes to handle partial RESP frames
|
||||
let mut acc = String::new();
|
||||
let mut buf = vec![0u8; 8192];
|
||||
|
||||
loop {
|
||||
let n = match stream.read(&mut buf).await {
|
||||
Ok(0) => {
|
||||
println!("[handle] connection closed");
|
||||
return Ok(());
|
||||
}
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
println!("[handle] read error: {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
// Append to accumulator. RESP for our usage is ASCII-safe.
|
||||
acc.push_str(str::from_utf8(&buf[..n])?);
|
||||
|
||||
// Try to parse as many complete commands as are available in 'acc'.
|
||||
loop {
|
||||
let parsed = Cmd::from(&acc);
|
||||
let (cmd, protocol, remaining) = match parsed {
|
||||
Ok((cmd, protocol, remaining)) => (cmd, protocol, remaining),
|
||||
Err(_e) => {
|
||||
// Incomplete or invalid frame; assume incomplete and wait for more data.
|
||||
// This avoids emitting spurious protocol_error for split frames.
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Advance the accumulator to the unparsed remainder
|
||||
acc = remaining.to_string();
|
||||
|
||||
if self.option.debug {
|
||||
println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol);
|
||||
} else {
|
||||
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
|
||||
}
|
||||
|
||||
// Check if this is a QUIT command before processing
|
||||
let is_quit = matches!(cmd, Cmd::Quit);
|
||||
|
||||
let res = match cmd.run(self).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
if self.option.debug {
|
||||
eprintln!("[run error] {:?}", e);
|
||||
}
|
||||
Protocol::err(&format!("ERR {}", e.0))
|
||||
}
|
||||
};
|
||||
|
||||
if self.option.debug {
|
||||
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", self.queued_cmd);
|
||||
println!("\x1b[32;1mgoing to send response {}\x1b[0m", res.encode());
|
||||
} else {
|
||||
print!("queued cmd {:?}", self.queued_cmd);
|
||||
println!("going to send response {}", res.encode());
|
||||
}
|
||||
|
||||
_ = stream.write(res.encode().as_bytes()).await?;
|
||||
|
||||
// If this was a QUIT command, close the connection
|
||||
if is_quit {
|
||||
println!("[handle] QUIT command received, closing connection");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Continue parsing any further complete commands already in 'acc'
|
||||
if acc.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
126
herodb/src/storage/mod.rs
Normal file
126
herodb/src/storage/mod.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use std::{
|
||||
path::Path,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use redb::{Database, TableDefinition};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::CryptoFactory;
|
||||
use crate::error::DBError;
|
||||
|
||||
// Re-export modules
|
||||
mod storage_basic;
|
||||
mod storage_hset;
|
||||
mod storage_lists;
|
||||
mod storage_extra;
|
||||
|
||||
// Re-export implementations
|
||||
// Note: These imports are used by the impl blocks in the submodules
|
||||
// The compiler shows them as unused because they're not directly used in this file
|
||||
// but they're needed for the Storage struct methods to be available
|
||||
pub use storage_extra::*;
|
||||
|
||||
// Table definitions for different Redis data types
|
||||
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
|
||||
const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("strings");
|
||||
const HASHES_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("hashes");
|
||||
const LISTS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("lists");
|
||||
const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta");
|
||||
const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data");
|
||||
const ENCRYPTED_TABLE: TableDefinition<&str, u8> = TableDefinition::new("encrypted");
|
||||
const EXPIRATION_TABLE: TableDefinition<&str, u64> = TableDefinition::new("expiration");
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StreamEntry {
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ListValue {
|
||||
pub elements: Vec<String>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn now_in_millis() -> u128 {
|
||||
let start = SystemTime::now();
|
||||
let duration_since_epoch = start.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration_since_epoch.as_millis()
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
db: Database,
|
||||
crypto: Option<CryptoFactory>,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(path: impl AsRef<Path>, should_encrypt: bool, master_key: Option<&str>) -> Result<Self, DBError> {
|
||||
let db = Database::create(path)?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(TYPES_TABLE)?;
|
||||
let _ = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let _ = write_txn.open_table(HASHES_TABLE)?;
|
||||
let _ = write_txn.open_table(LISTS_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let _ = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let _ = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
// Check if database was previously encrypted
|
||||
let read_txn = db.begin_read()?;
|
||||
let encrypted_table = read_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let was_encrypted = encrypted_table.get("encrypted")?.map(|v| v.value() == 1).unwrap_or(false);
|
||||
drop(read_txn);
|
||||
|
||||
let crypto = if should_encrypt || was_encrypted {
|
||||
if let Some(key) = master_key {
|
||||
Some(CryptoFactory::new(key.as_bytes()))
|
||||
} else {
|
||||
return Err(DBError("Encryption requested but no master key provided".to_string()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// If we're enabling encryption for the first time, mark it
|
||||
if should_encrypt && !was_encrypted {
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let mut encrypted_table = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
encrypted_table.insert("encrypted", &1u8)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(Storage {
|
||||
db,
|
||||
crypto,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
self.crypto.is_some()
|
||||
}
|
||||
|
||||
// Helper methods for encryption
|
||||
fn encrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.encrypt(data))
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.decrypt(data)?)
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
}
|
245
herodb/src/storage/storage_basic.rs
Normal file
245
herodb/src/storage/storage_basic.rs
Normal file
@@ -0,0 +1,245 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
pub fn flushdb(&self) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
let mut streams_meta_table = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let mut streams_data_table = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
|
||||
// inefficient, but there is no other way
|
||||
let keys: Vec<String> = types_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
types_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<String> = strings_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
strings_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<(String, String)> = hashes_table
|
||||
.iter()?
|
||||
.map(|item| {
|
||||
let binding = item.unwrap();
|
||||
let (k, f) = binding.0.value();
|
||||
(k.to_string(), f.to_string())
|
||||
})
|
||||
.collect();
|
||||
for (key, field) in keys {
|
||||
hashes_table.remove((key.as_str(), field.as_str()))?;
|
||||
}
|
||||
let keys: Vec<String> = lists_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
lists_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<String> = streams_meta_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
streams_meta_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<(String,String)> = streams_data_table.iter()?.map(|item| {
|
||||
let binding = item.unwrap();
|
||||
let (key, field) = binding.0.value();
|
||||
(key.to_string(), field.to_string())
|
||||
}).collect();
|
||||
for (key, field) in keys {
|
||||
streams_data_table.remove((key.as_str(), field.as_str()))?;
|
||||
}
|
||||
let keys: Vec<String> = expiration_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
// Before returning type, check for expiration
|
||||
if let Some(type_val) = table.get(key)? {
|
||||
if type_val.value() == "string" {
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
// The key is expired, so it effectively has no type
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(type_val.value().to_string()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted/decrypted
|
||||
pub fn get(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
// Check expiration first (unencrypted)
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
drop(read_txn);
|
||||
self.del(key.to_string())?;
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// Get and decrypt value
|
||||
let strings_table = read_txn.open_table(STRINGS_TABLE)?;
|
||||
match strings_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
Ok(Some(value))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn set(&self, key: String, value: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
// Only encrypt the value, not expiration
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
strings_table.insert(key.as_str(), encrypted.as_slice())?;
|
||||
|
||||
// Remove any existing expiration since this is a regular SET
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
// Only encrypt the value
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
strings_table.insert(key.as_str(), encrypted.as_slice())?;
|
||||
|
||||
// Store expiration separately (unencrypted)
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at = expire_ms + now_in_millis();
|
||||
expiration_table.insert(key.as_str(), &(expires_at as u64))?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn del(&self, key: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let mut hashes_table: redb::Table<(&str, &str), &[u8]> = write_txn.open_table(HASHES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Remove from type table
|
||||
types_table.remove(key.as_str())?;
|
||||
|
||||
// Remove from strings table
|
||||
strings_table.remove(key.as_str())?;
|
||||
|
||||
// Remove all hash fields for this key
|
||||
let mut to_remove = Vec::new();
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key.as_str() {
|
||||
to_remove.push((hash_key.to_string(), field.to_string()));
|
||||
}
|
||||
}
|
||||
drop(iter);
|
||||
|
||||
for (hash_key, field) in to_remove {
|
||||
hashes_table.remove((hash_key.as_str(), field.as_str()))?;
|
||||
}
|
||||
|
||||
// Remove from lists table
|
||||
lists_table.remove(key.as_str())?;
|
||||
|
||||
// Also remove expiration
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let mut keys = Vec::new();
|
||||
let mut iter = table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let key = entry?.0.value().to_string();
|
||||
if pattern == "*" || super::storage_extra::glob_match(pattern, &key) {
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn dbsize(&self) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
|
||||
let mut count: i64 = 0;
|
||||
let mut iter = types_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let key = entry.0.value();
|
||||
let ty = entry.1.value();
|
||||
|
||||
if ty == "string" {
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
// Skip logically expired string keys
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
count += 1;
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
}
|
278
herodb/src/storage/storage_extra.rs
Normal file
278
herodb/src/storage/storage_extra.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let strings_table = read_txn.open_table(STRINGS_TABLE)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
let mut iter = types_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let key = entry.0.value().to_string();
|
||||
let key_type = entry.1.value().to_string();
|
||||
|
||||
if current_cursor >= cursor {
|
||||
// Apply pattern matching if specified
|
||||
let matches = if let Some(pat) = pattern {
|
||||
glob_match(pat, &key)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
// For scan, we return key-value pairs for string types
|
||||
if key_type == "string" {
|
||||
if let Some(data) = strings_table.get(key.as_str())? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((key, value));
|
||||
} else {
|
||||
result.push((key, String::new()));
|
||||
}
|
||||
} else {
|
||||
// For non-string types, just return the key with type as value
|
||||
result.push((key, key_type));
|
||||
}
|
||||
|
||||
if result.len() >= limit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
|
||||
pub fn ttl(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
match expiration_table.get(key)? {
|
||||
Some(expires_at) => {
|
||||
let now = now_in_millis();
|
||||
let expires_at_ms = expires_at.value() as u128;
|
||||
if now >= expires_at_ms {
|
||||
Ok(-2) // Key has expired
|
||||
} else {
|
||||
Ok(((expires_at_ms - now) / 1000) as i64) // TTL in seconds
|
||||
}
|
||||
}
|
||||
None => Ok(-1), // Key exists but has no expiration
|
||||
}
|
||||
}
|
||||
Some(_) => Ok(-1), // Key exists but is not a string (no expiration support for other types)
|
||||
None => Ok(-2), // Key does not exist
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exists(&self, key: &str) -> Result<bool, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
// Check if string key has expired
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
return Ok(false); // Key has expired
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
Some(_) => Ok(true), // Key exists and is not a string
|
||||
None => Ok(false), // Key does not exist
|
||||
}
|
||||
}
|
||||
|
||||
// -------- Expiration helpers (string keys only, consistent with TTL/EXISTS) --------
|
||||
|
||||
// Set expiry in seconds; returns true if applied (key exists and is string), false otherwise
|
||||
pub fn expire_seconds(&self, key: &str, secs: u64) -> Result<bool, DBError> {
|
||||
// Determine eligibility first to avoid holding borrows across commit
|
||||
let mut applied = false;
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let is_string = types_table
|
||||
.get(key)?
|
||||
.map(|v| v.value() == "string")
|
||||
.unwrap_or(false);
|
||||
if is_string {
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at = now_in_millis() + (secs as u128) * 1000;
|
||||
expiration_table.insert(key, &(expires_at as u64))?;
|
||||
applied = true;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(applied)
|
||||
}
|
||||
|
||||
// Set expiry in milliseconds; returns true if applied (key exists and is string), false otherwise
|
||||
pub fn pexpire_millis(&self, key: &str, ms: u128) -> Result<bool, DBError> {
|
||||
let mut applied = false;
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let is_string = types_table
|
||||
.get(key)?
|
||||
.map(|v| v.value() == "string")
|
||||
.unwrap_or(false);
|
||||
if is_string {
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at = now_in_millis() + ms;
|
||||
expiration_table.insert(key, &(expires_at as u64))?;
|
||||
applied = true;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(applied)
|
||||
}
|
||||
|
||||
// Remove expiry if present; returns true if removed, false otherwise
|
||||
pub fn persist(&self, key: &str) -> Result<bool, DBError> {
|
||||
let mut removed = false;
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let is_string = types_table
|
||||
.get(key)?
|
||||
.map(|v| v.value() == "string")
|
||||
.unwrap_or(false);
|
||||
if is_string {
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if expiration_table.remove(key)?.is_some() {
|
||||
removed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
// Absolute EXPIREAT in seconds since epoch
|
||||
// Returns true if applied (key exists and is string), false otherwise
|
||||
pub fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result<bool, DBError> {
|
||||
let mut applied = false;
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let is_string = types_table
|
||||
.get(key)?
|
||||
.map(|v| v.value() == "string")
|
||||
.unwrap_or(false);
|
||||
if is_string {
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at_ms: u128 = if ts_secs <= 0 { 0 } else { (ts_secs as u128) * 1000 };
|
||||
expiration_table.insert(key, &((expires_at_ms as u64)))?;
|
||||
applied = true;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(applied)
|
||||
}
|
||||
|
||||
// Absolute PEXPIREAT in milliseconds since epoch
|
||||
// Returns true if applied (key exists and is string), false otherwise
|
||||
pub fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result<bool, DBError> {
|
||||
let mut applied = false;
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let is_string = types_table
|
||||
.get(key)?
|
||||
.map(|v| v.value() == "string")
|
||||
.unwrap_or(false);
|
||||
if is_string {
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at_ms: u128 = if ts_ms <= 0 { 0 } else { ts_ms as u128 };
|
||||
expiration_table.insert(key, &((expires_at_ms as u64)))?;
|
||||
applied = true;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(applied)
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function for glob pattern matching
|
||||
pub fn glob_match(pattern: &str, text: &str) -> bool {
|
||||
if pattern == "*" {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Simple glob matching - supports * and ? wildcards
|
||||
let pattern_chars: Vec<char> = pattern.chars().collect();
|
||||
let text_chars: Vec<char> = text.chars().collect();
|
||||
|
||||
fn match_recursive(pattern: &[char], text: &[char], pi: usize, ti: usize) -> bool {
|
||||
if pi >= pattern.len() {
|
||||
return ti >= text.len();
|
||||
}
|
||||
|
||||
if ti >= text.len() {
|
||||
// Check if remaining pattern is all '*'
|
||||
return pattern[pi..].iter().all(|&c| c == '*');
|
||||
}
|
||||
|
||||
match pattern[pi] {
|
||||
'*' => {
|
||||
// Try matching zero or more characters
|
||||
for i in ti..=text.len() {
|
||||
if match_recursive(pattern, text, pi + 1, i) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
'?' => {
|
||||
// Match exactly one character
|
||||
match_recursive(pattern, text, pi + 1, ti + 1)
|
||||
}
|
||||
c => {
|
||||
// Match exact character
|
||||
if text[ti] == c {
|
||||
match_recursive(pattern, text, pi + 1, ti + 1)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match_recursive(&pattern_chars, &text_chars, 0, 0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_glob_match() {
|
||||
assert!(glob_match("*", "anything"));
|
||||
assert!(glob_match("hello", "hello"));
|
||||
assert!(!glob_match("hello", "world"));
|
||||
assert!(glob_match("h*o", "hello"));
|
||||
assert!(glob_match("h*o", "ho"));
|
||||
assert!(!glob_match("h*o", "hi"));
|
||||
assert!(glob_match("h?llo", "hello"));
|
||||
assert!(!glob_match("h?llo", "hllo"));
|
||||
assert!(glob_match("*test*", "this_is_a_test_string"));
|
||||
assert!(!glob_match("*test*", "this_is_a_string"));
|
||||
}
|
||||
}
|
377
herodb/src/storage/storage_hset.rs
Normal file
377
herodb/src/storage/storage_hset.rs
Normal file
@@ -0,0 +1,377 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Values are encrypted before storage
|
||||
pub fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut new_fields = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") | None => { // Proceed if hash or new key
|
||||
// Set the type to hash (only if new key or existing hash)
|
||||
types_table.insert(key, "hash")?;
|
||||
|
||||
for (field, value) in pairs {
|
||||
// Check if field already exists
|
||||
let exists = hashes_table.get((key, field.as_str()))?.is_some();
|
||||
|
||||
// Encrypt the value before storing
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
hashes_table.insert((key, field.as_str()), encrypted.as_slice())?;
|
||||
|
||||
if !exists {
|
||||
new_fields += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(new_fields)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is decrypted after retrieval
|
||||
pub fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let key_type = types_table.get(key)?.map(|v| v.value().to_string());
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
match hashes_table.get((key, field))? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
Ok(Some(value))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: All values are decrypted after retrieval
|
||||
pub fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((field.to_string(), value));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hdel(&self, key: &str, fields: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut deleted = 0i64;
|
||||
|
||||
// First check if key exists and is a hash
|
||||
let key_type = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
for field in fields {
|
||||
if hashes_table.remove((key, field.as_str()))?.is_some() {
|
||||
deleted += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if hash is now empty and remove type if so
|
||||
let mut has_fields = false;
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
has_fields = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
drop(iter);
|
||||
|
||||
if !has_fields {
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => {} // Key does not exist, nothing to delete, return 0 deleted
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
pub fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
Ok(hashes_table.get((key, field))?.is_some())
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key {
|
||||
result.push(field.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: All values are decrypted after retrieval
|
||||
pub fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hlen(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut count = 0i64;
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
for field in fields {
|
||||
match hashes_table.get((key, field.as_str()))? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push(Some(value));
|
||||
}
|
||||
None => result.push(None),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(fields.into_iter().map(|_| None).collect()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = false;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") | None => { // Proceed if hash or new key
|
||||
// Check if field already exists
|
||||
if hashes_table.get((key, field))?.is_none() {
|
||||
// Set the type to hash (only if new key or existing hash)
|
||||
types_table.insert(key, "hash")?;
|
||||
|
||||
// Encrypt the value before storing
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
hashes_table.insert((key, field), encrypted.as_slice())?;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
|
||||
if hash_key == key {
|
||||
if current_cursor >= cursor {
|
||||
let field_str = field.to_string();
|
||||
|
||||
// Apply pattern matching if specified
|
||||
let matches = if let Some(pat) = pattern {
|
||||
super::storage_extra::glob_match(pat, &field_str)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((field_str, value));
|
||||
|
||||
if result.len() >= limit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok((0, Vec::new())),
|
||||
}
|
||||
}
|
||||
}
|
403
herodb/src/storage/storage_lists.rs
Normal file
403
herodb/src/storage/storage_lists.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Elements are encrypted before storage
|
||||
pub fn lpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut _length = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Set the type to list
|
||||
types_table.insert(key, "list")?;
|
||||
|
||||
// Get current list or create empty one
|
||||
let mut list: Vec<String> = match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
serde_json::from_slice(&decrypted)?
|
||||
}
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
// Add elements to the front (left)
|
||||
for element in elements.into_iter() {
|
||||
list.insert(0, element);
|
||||
}
|
||||
|
||||
_length = list.len() as i64;
|
||||
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(_length)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are encrypted before storage
|
||||
pub fn rpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut _length = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Set the type to list
|
||||
types_table.insert(key, "list")?;
|
||||
|
||||
// Get current list or create empty one
|
||||
let mut list: Vec<String> = match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
serde_json::from_slice(&decrypted)?
|
||||
}
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
// Add elements to the end (right)
|
||||
list.extend(elements);
|
||||
_length = list.len() as i64;
|
||||
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(_length)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn lpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
let pop_count = std::cmp::min(count as usize, list.len());
|
||||
for _ in 0..pop_count {
|
||||
if !list.is_empty() {
|
||||
result.push(list.remove(0));
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
// Remove the key if list is empty
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn rpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
let pop_count = std::cmp::min(count as usize, list.len());
|
||||
for _ in 0..pop_count {
|
||||
if !list.is_empty() {
|
||||
result.push(list.pop().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
// Remove the key if list is empty
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn llen(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Ok(list.len() as i64)
|
||||
}
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
_ => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Element is decrypted after retrieval
|
||||
pub fn lindex(&self, key: &str, index: i64) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
|
||||
let actual_index = if index < 0 {
|
||||
list.len() as i64 + index
|
||||
} else {
|
||||
index
|
||||
};
|
||||
|
||||
if actual_index >= 0 && (actual_index as usize) < list.len() {
|
||||
Ok(Some(list[actual_index as usize].clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval
|
||||
pub fn lrange(&self, key: &str, start: i64, stop: i64) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
|
||||
if list.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) };
|
||||
let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) };
|
||||
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
|
||||
Ok(list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec())
|
||||
}
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(list) = list_data {
|
||||
if list.is_empty() {
|
||||
write_txn.commit()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) };
|
||||
let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) };
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
// Remove the entire list
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
let trimmed = list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec();
|
||||
|
||||
if trimmed.is_empty() {
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the trimmed list
|
||||
let serialized = serde_json::to_vec(&trimmed)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn lrem(&self, key: &str, count: i64, element: &str) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut removed = 0i64;
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
if count == 0 {
|
||||
// Remove all occurrences
|
||||
let original_len = list.len();
|
||||
list.retain(|x| x != element);
|
||||
removed = (original_len - list.len()) as i64;
|
||||
} else if count > 0 {
|
||||
// Remove first count occurrences
|
||||
let mut to_remove = count as usize;
|
||||
list.retain(|x| {
|
||||
if x == element && to_remove > 0 {
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Remove last |count| occurrences
|
||||
let mut to_remove = (-count) as usize;
|
||||
for i in (0..list.len()).rev() {
|
||||
if list[i] == element && to_remove > 0 {
|
||||
list.remove(i);
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(removed)
|
||||
}
|
||||
}
|
@@ -14,7 +14,7 @@ NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DB_DIR="./test_db"
|
||||
PORT=6379
|
||||
PORT=6381
|
||||
SERVER_PID=""
|
||||
|
||||
# Function to print colored output
|
||||
@@ -288,7 +288,7 @@ main() {
|
||||
|
||||
# Build the project
|
||||
print_status "Building HeroDB..."
|
||||
if ! cargo build --release; then
|
||||
if ! cargo build -p herodb --release; then
|
||||
print_error "Failed to build HeroDB"
|
||||
exit 1
|
||||
fi
|
||||
@@ -298,7 +298,7 @@ main() {
|
||||
|
||||
# Start the server
|
||||
print_status "Starting HeroDB server..."
|
||||
./target/release/redis-rs --dir "$DB_DIR" --port $PORT &
|
||||
../target/release/herodb --dir "$DB_DIR" --port $PORT &
|
||||
SERVER_PID=$!
|
||||
|
||||
# Wait for server to start
|
||||
@@ -352,4 +352,4 @@ check_dependencies() {
|
||||
|
||||
# Run dependency check and main function
|
||||
check_dependencies
|
||||
main "$@"
|
||||
main "$@"
|
62
herodb/tests/debug_hset.rs
Normal file
62
herodb/tests/debug_hset.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to send command and get response
|
||||
async fn send_command(stream: &mut TcpStream, command: &str) -> String {
|
||||
stream.write_all(command.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
String::from_utf8_lossy(&buffer[..n]).to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn debug_hset_simple() {
|
||||
// Clean up any existing test database
|
||||
let test_dir = "/tmp/herodb_debug_hset";
|
||||
let _ = std::fs::remove_dir_all(test_dir);
|
||||
std::fs::create_dir_all(test_dir).unwrap();
|
||||
|
||||
let port = 16500;
|
||||
let option = DBOption {
|
||||
dir: test_dir.to_string(),
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let mut server = Server::new(option).await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
|
||||
|
||||
// Test simple HSET
|
||||
println!("Testing HSET...");
|
||||
let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await;
|
||||
println!("HSET response: {}", response);
|
||||
assert!(response.contains("1"), "Expected '1' but got: {}", response);
|
||||
|
||||
// Test HGET
|
||||
println!("Testing HGET...");
|
||||
let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
println!("HGET response: {}", response);
|
||||
assert!(response.contains("value1"), "Expected 'value1' but got: {}", response);
|
||||
}
|
56
herodb/tests/debug_hset_simple.rs
Normal file
56
herodb/tests/debug_hset_simple.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[tokio::test]
|
||||
async fn debug_hset_return_value() {
|
||||
let test_dir = "/tmp/herodb_debug_hset_return";
|
||||
|
||||
// Clean up any existing test data
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir.to_string(),
|
||||
port: 16390,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let mut server = Server::new(option).await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:16390")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
// Connect and test HSET
|
||||
let mut stream = TcpStream::connect("127.0.0.1:16390").await.unwrap();
|
||||
|
||||
// Send HSET command
|
||||
let cmd = "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n";
|
||||
stream.write_all(cmd.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
|
||||
println!("HSET response: {}", response);
|
||||
println!("Response bytes: {:?}", &buffer[..n]);
|
||||
|
||||
// Check if response contains "1"
|
||||
assert!(response.contains("1"), "Expected response to contain '1', got: {}", response);
|
||||
}
|
35
herodb/tests/debug_protocol.rs
Normal file
35
herodb/tests/debug_protocol.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use herodb::protocol::Protocol;
|
||||
use herodb::cmd::Cmd;
|
||||
|
||||
#[test]
|
||||
fn test_protocol_parsing() {
|
||||
// Test TYPE command parsing
|
||||
let type_cmd = "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n";
|
||||
println!("Parsing TYPE command: {}", type_cmd.replace("\r\n", "\\r\\n"));
|
||||
|
||||
match Protocol::from(type_cmd) {
|
||||
Ok((protocol, _)) => {
|
||||
println!("Protocol parsed successfully: {:?}", protocol);
|
||||
match Cmd::from(type_cmd) {
|
||||
Ok((cmd, _, _)) => println!("Command parsed successfully: {:?}", cmd),
|
||||
Err(e) => println!("Command parsing failed: {:?}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => println!("Protocol parsing failed: {:?}", e),
|
||||
}
|
||||
|
||||
// Test HEXISTS command parsing
|
||||
let hexists_cmd = "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n";
|
||||
println!("\nParsing HEXISTS command: {}", hexists_cmd.replace("\r\n", "\\r\\n"));
|
||||
|
||||
match Protocol::from(hexists_cmd) {
|
||||
Ok((protocol, _)) => {
|
||||
println!("Protocol parsed successfully: {:?}", protocol);
|
||||
match Cmd::from(hexists_cmd) {
|
||||
Ok((cmd, _, _)) => println!("Command parsed successfully: {:?}", cmd),
|
||||
Err(e) => println!("Command parsing failed: {:?}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => println!("Protocol parsing failed: {:?}", e),
|
||||
}
|
||||
}
|
317
herodb/tests/redis_integration_tests.rs
Normal file
317
herodb/tests/redis_integration_tests.rs
Normal file
@@ -0,0 +1,317 @@
|
||||
use redis::{Client, Commands, Connection, RedisResult};
|
||||
use std::process::{Child, Command};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to get Redis connection, retrying until successful
|
||||
fn get_redis_connection(port: u16) -> Connection {
|
||||
let connection_info = format!("redis://127.0.0.1:{}", port);
|
||||
let client = Client::open(connection_info).unwrap();
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
match client.get_connection() {
|
||||
Ok(mut conn) => {
|
||||
if redis::cmd("PING").query::<String>(&mut conn).is_ok() {
|
||||
return conn;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if attempts >= 120 {
|
||||
panic!(
|
||||
"Failed to connect to Redis server after 120 attempts: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
attempts += 1;
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
// A guard to ensure the server process is killed when it goes out of scope
|
||||
struct ServerProcessGuard {
|
||||
process: Child,
|
||||
test_dir: String,
|
||||
}
|
||||
|
||||
impl Drop for ServerProcessGuard {
|
||||
fn drop(&mut self) {
|
||||
println!("Killing server process (pid: {})...", self.process.id());
|
||||
if let Err(e) = self.process.kill() {
|
||||
eprintln!("Failed to kill server process: {}", e);
|
||||
}
|
||||
match self.process.wait() {
|
||||
Ok(status) => println!("Server process exited with: {}", status),
|
||||
Err(e) => eprintln!("Failed to wait on server process: {}", e),
|
||||
}
|
||||
|
||||
// Clean up the specific test directory
|
||||
println!("Cleaning up test directory: {}", self.test_dir);
|
||||
if let Err(e) = std::fs::remove_dir_all(&self.test_dir) {
|
||||
eprintln!("Failed to clean up test directory: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to set up the server and return a connection
|
||||
fn setup_server() -> (ServerProcessGuard, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(16400);
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
let test_dir = format!("/tmp/herodb_test_{}", port);
|
||||
|
||||
// Clean up previous test data
|
||||
if std::path::Path::new(&test_dir).exists() {
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
}
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
// Start the server in a subprocess
|
||||
let child = Command::new("cargo")
|
||||
.args(&[
|
||||
"run",
|
||||
"--",
|
||||
"--dir",
|
||||
&test_dir,
|
||||
"--port",
|
||||
&port.to_string(),
|
||||
"--debug",
|
||||
])
|
||||
.spawn()
|
||||
.expect("Failed to start server process");
|
||||
|
||||
// Create a new guard that also owns the test directory path
|
||||
let guard = ServerProcessGuard {
|
||||
process: child,
|
||||
test_dir,
|
||||
};
|
||||
|
||||
// Give the server time to build and start (cargo run may compile first)
|
||||
std::thread::sleep(Duration::from_millis(2500));
|
||||
|
||||
(guard, port)
|
||||
}
|
||||
|
||||
async fn cleanup_keys(conn: &mut Connection) {
|
||||
let keys: Vec<String> = redis::cmd("KEYS").arg("*").query(conn).unwrap();
|
||||
if !keys.is_empty() {
|
||||
for key in keys {
|
||||
let _: () = redis::cmd("DEL").arg(key).query(conn).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn all_tests() {
|
||||
let (_server_guard, port) = setup_server();
|
||||
let mut conn = get_redis_connection(port);
|
||||
|
||||
// Run all tests using the same connection
|
||||
test_basic_ping(&mut conn).await;
|
||||
test_string_operations(&mut conn).await;
|
||||
test_incr_operations(&mut conn).await;
|
||||
test_hash_operations(&mut conn).await;
|
||||
test_expiration(&mut conn).await;
|
||||
test_scan_operations(&mut conn).await;
|
||||
test_scan_with_count(&mut conn).await;
|
||||
test_hscan_operations(&mut conn).await;
|
||||
test_transaction_operations(&mut conn).await;
|
||||
test_discard_transaction(&mut conn).await;
|
||||
test_type_command(&mut conn).await;
|
||||
test_info_command(&mut conn).await;
|
||||
}
|
||||
|
||||
async fn test_basic_ping(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let result: String = redis::cmd("PING").query(conn).unwrap();
|
||||
assert_eq!(result, "PONG");
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_string_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set("key", "value").unwrap();
|
||||
let result: String = conn.get("key").unwrap();
|
||||
assert_eq!(result, "value");
|
||||
let result: Option<String> = conn.get("noexist").unwrap();
|
||||
assert_eq!(result, None);
|
||||
let deleted: i32 = conn.del("key").unwrap();
|
||||
assert_eq!(deleted, 1);
|
||||
let result: Option<String> = conn.get("key").unwrap();
|
||||
assert_eq!(result, None);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_incr_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let result: i32 = redis::cmd("INCR").arg("counter").query(conn).unwrap();
|
||||
assert_eq!(result, 1);
|
||||
let result: i32 = redis::cmd("INCR").arg("counter").query(conn).unwrap();
|
||||
assert_eq!(result, 2);
|
||||
let _: () = conn.set("string", "hello").unwrap();
|
||||
let result: RedisResult<i32> = redis::cmd("INCR").arg("string").query(conn);
|
||||
assert!(result.is_err());
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_hash_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let result: i32 = conn.hset("hash", "field1", "value1").unwrap();
|
||||
assert_eq!(result, 1);
|
||||
let result: String = conn.hget("hash", "field1").unwrap();
|
||||
assert_eq!(result, "value1");
|
||||
let _: () = conn.hset("hash", "field2", "value2").unwrap();
|
||||
let _: () = conn.hset("hash", "field3", "value3").unwrap();
|
||||
let result: std::collections::HashMap<String, String> = conn.hgetall("hash").unwrap();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get("field1").unwrap(), "value1");
|
||||
assert_eq!(result.get("field2").unwrap(), "value2");
|
||||
assert_eq!(result.get("field3").unwrap(), "value3");
|
||||
let result: i32 = conn.hlen("hash").unwrap();
|
||||
assert_eq!(result, 3);
|
||||
let result: bool = conn.hexists("hash", "field1").unwrap();
|
||||
assert_eq!(result, true);
|
||||
let result: bool = conn.hexists("hash", "noexist").unwrap();
|
||||
assert_eq!(result, false);
|
||||
let result: i32 = conn.hdel("hash", "field1").unwrap();
|
||||
assert_eq!(result, 1);
|
||||
let mut result: Vec<String> = conn.hkeys("hash").unwrap();
|
||||
result.sort();
|
||||
assert_eq!(result, vec!["field2", "field3"]);
|
||||
let mut result: Vec<String> = conn.hvals("hash").unwrap();
|
||||
result.sort();
|
||||
assert_eq!(result, vec!["value2", "value3"]);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_expiration(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set_ex("expkey", "value", 1).unwrap();
|
||||
let result: i32 = conn.ttl("expkey").unwrap();
|
||||
assert!(result == 1 || result == 0);
|
||||
let result: bool = conn.exists("expkey").unwrap();
|
||||
assert_eq!(result, true);
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
let result: Option<String> = conn.get("expkey").unwrap();
|
||||
assert_eq!(result, None);
|
||||
let result: i32 = conn.ttl("expkey").unwrap();
|
||||
assert_eq!(result, -2);
|
||||
let result: bool = conn.exists("expkey").unwrap();
|
||||
assert_eq!(result, false);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_scan_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..5 {
|
||||
let _: () = conn.set(format!("key{}", i), format!("value{}", i)).unwrap();
|
||||
}
|
||||
let result: (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(0)
|
||||
.arg("MATCH")
|
||||
.arg("key*")
|
||||
.arg("COUNT")
|
||||
.arg(10)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
let (cursor, keys) = result;
|
||||
assert_eq!(cursor, 0);
|
||||
assert_eq!(keys.len(), 5);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_scan_with_count(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..15 {
|
||||
let _: () = conn.set(format!("scan_key{}", i), i).unwrap();
|
||||
}
|
||||
let mut cursor = 0;
|
||||
let mut all_keys = std::collections::HashSet::new();
|
||||
loop {
|
||||
let (next_cursor, keys): (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(cursor)
|
||||
.arg("MATCH")
|
||||
.arg("scan_key*")
|
||||
.arg("COUNT")
|
||||
.arg(5)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
for key in keys {
|
||||
all_keys.insert(key);
|
||||
}
|
||||
cursor = next_cursor;
|
||||
if cursor == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(all_keys.len(), 15);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_hscan_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..3 {
|
||||
let _: () = conn.hset("testhash", format!("field{}", i), format!("value{}", i)).unwrap();
|
||||
}
|
||||
let result: (u64, Vec<String>) = redis::cmd("HSCAN")
|
||||
.arg("testhash")
|
||||
.arg(0)
|
||||
.arg("MATCH")
|
||||
.arg("*")
|
||||
.arg("COUNT")
|
||||
.arg(10)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
let (cursor, fields) = result;
|
||||
assert_eq!(cursor, 0);
|
||||
assert_eq!(fields.len(), 6);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_transaction_operations(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = redis::cmd("MULTI").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("key1").arg("value1").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("key2").arg("value2").query(conn).unwrap();
|
||||
let _: Vec<String> = redis::cmd("EXEC").query(conn).unwrap();
|
||||
let result: String = conn.get("key1").unwrap();
|
||||
assert_eq!(result, "value1");
|
||||
let result: String = conn.get("key2").unwrap();
|
||||
assert_eq!(result, "value2");
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_discard_transaction(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = redis::cmd("MULTI").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("discard").arg("value").query(conn).unwrap();
|
||||
let _: () = redis::cmd("DISCARD").query(conn).unwrap();
|
||||
let result: Option<String> = conn.get("discard").unwrap();
|
||||
assert_eq!(result, None);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_type_command(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set("string", "value").unwrap();
|
||||
let result: String = redis::cmd("TYPE").arg("string").query(conn).unwrap();
|
||||
assert_eq!(result, "string");
|
||||
let _: () = conn.hset("hash", "field", "value").unwrap();
|
||||
let result: String = redis::cmd("TYPE").arg("hash").query(conn).unwrap();
|
||||
assert_eq!(result, "hash");
|
||||
let result: String = redis::cmd("TYPE").arg("noexist").query(conn).unwrap();
|
||||
assert_eq!(result, "none");
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
|
||||
async fn test_info_command(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let result: String = redis::cmd("INFO").query(conn).unwrap();
|
||||
assert!(result.contains("redis_version"));
|
||||
let result: String = redis::cmd("INFO").arg("replication").query(conn).unwrap();
|
||||
assert!(result.contains("role:master"));
|
||||
cleanup_keys(conn).await;
|
||||
}
|
608
herodb/tests/redis_tests.rs
Normal file
608
herodb/tests/redis_tests.rs
Normal file
@@ -0,0 +1,608 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to start a test server
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(16379);
|
||||
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let test_dir = format!("/tmp/herodb_test_{}", test_name);
|
||||
|
||||
// Clean up and create test directory
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
// Helper function to connect to the test server
|
||||
async fn connect_to_server(port: u16) -> TcpStream {
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
match TcpStream::connect(format!("127.0.0.1:{}", port)).await {
|
||||
Ok(stream) => return stream,
|
||||
Err(_) if attempts < 10 => {
|
||||
attempts += 1;
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
Err(e) => panic!("Failed to connect to test server: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to send command and get response
|
||||
async fn send_command(stream: &mut TcpStream, command: &str) -> String {
|
||||
stream.write_all(command.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
String::from_utf8_lossy(&buffer[..n]).to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ping() {
|
||||
let (mut server, port) = start_test_server("ping").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
let response = send_command(&mut stream, "*1\r\n$4\r\nPING\r\n").await;
|
||||
assert!(response.contains("PONG"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_string_operations() {
|
||||
let (mut server, port) = start_test_server("string").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test SET
|
||||
let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test GET
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("value"));
|
||||
|
||||
// Test GET non-existent key
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$7\r\nnoexist\r\n").await;
|
||||
assert!(response.contains("$-1")); // NULL response
|
||||
|
||||
// Test DEL
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nDEL\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test GET after DEL
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("$-1")); // NULL response
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_incr_operations() {
|
||||
let (mut server, port) = start_test_server("incr").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test INCR on non-existent key
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$7\r\ncounter\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test INCR on existing key
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$7\r\ncounter\r\n").await;
|
||||
assert!(response.contains("2"));
|
||||
|
||||
// Test INCR on string value (should fail)
|
||||
send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nhello\r\n").await;
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$6\r\nstring\r\n").await;
|
||||
assert!(response.contains("ERR"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hash_operations() {
|
||||
let (mut server, port) = start_test_server("hash").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test HSET
|
||||
let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await;
|
||||
assert!(response.contains("1")); // 1 new field
|
||||
|
||||
// Test HGET
|
||||
let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
assert!(response.contains("value1"));
|
||||
|
||||
// Test HSET multiple fields
|
||||
let response = send_command(&mut stream, "*6\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield2\r\n$6\r\nvalue2\r\n$6\r\nfield3\r\n$6\r\nvalue3\r\n").await;
|
||||
assert!(response.contains("2")); // 2 new fields
|
||||
|
||||
// Test HGETALL
|
||||
let response = send_command(&mut stream, "*2\r\n$7\r\nHGETALL\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("field1"));
|
||||
assert!(response.contains("value1"));
|
||||
assert!(response.contains("field2"));
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Test HLEN
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nHLEN\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("3"));
|
||||
|
||||
// Test HEXISTS
|
||||
let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n").await;
|
||||
assert!(response.contains("0"));
|
||||
|
||||
// Test HDEL
|
||||
let response = send_command(&mut stream, "*3\r\n$4\r\nHDEL\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test HKEYS
|
||||
let response = send_command(&mut stream, "*2\r\n$5\r\nHKEYS\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("field2"));
|
||||
assert!(response.contains("field3"));
|
||||
assert!(!response.contains("field1")); // Should be deleted
|
||||
|
||||
// Test HVALS
|
||||
let response = send_command(&mut stream, "*2\r\n$5\r\nHVALS\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("value2"));
|
||||
assert!(response.contains("value3"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_expiration() {
|
||||
let (mut server, port) = start_test_server("expiration").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test SETEX (expire in 1 second)
|
||||
let response = send_command(&mut stream, "*5\r\n$3\r\nSET\r\n$6\r\nexpkey\r\n$5\r\nvalue\r\n$2\r\nEX\r\n$1\r\n1\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test TTL
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nTTL\r\n$6\r\nexpkey\r\n").await;
|
||||
assert!(response.contains("1") || response.contains("0")); // Should be 1 or 0 seconds
|
||||
|
||||
// Test EXISTS
|
||||
let response = send_command(&mut stream, "*2\r\n$6\r\nEXISTS\r\n$6\r\nexpkey\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Wait for expiration
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
|
||||
// Test GET after expiration
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$6\r\nexpkey\r\n").await;
|
||||
assert!(response.contains("$-1")); // Should be NULL
|
||||
|
||||
// Test TTL after expiration
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nTTL\r\n$6\r\nexpkey\r\n").await;
|
||||
assert!(response.contains("-2")); // Key doesn't exist
|
||||
|
||||
// Test EXISTS after expiration
|
||||
let response = send_command(&mut stream, "*2\r\n$6\r\nEXISTS\r\n$6\r\nexpkey\r\n").await;
|
||||
assert!(response.contains("0"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_scan_operations() {
|
||||
let (mut server, port) = start_test_server("scan").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Set up test data
|
||||
for i in 0..5 {
|
||||
let cmd = format!("*3\r\n$3\r\nSET\r\n$4\r\nkey{}\r\n$6\r\nvalue{}\r\n", i, i);
|
||||
send_command(&mut stream, &cmd).await;
|
||||
}
|
||||
|
||||
// Test SCAN
|
||||
let response = send_command(&mut stream, "*6\r\n$4\r\nSCAN\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await;
|
||||
assert!(response.contains("key"));
|
||||
|
||||
// Test KEYS
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nKEYS\r\n$1\r\n*\r\n").await;
|
||||
assert!(response.contains("key0"));
|
||||
assert!(response.contains("key1"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hscan_operations() {
|
||||
let (mut server, port) = start_test_server("hscan").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Set up hash data
|
||||
for i in 0..3 {
|
||||
let cmd = format!("*4\r\n$4\r\nHSET\r\n$8\r\ntesthash\r\n$6\r\nfield{}\r\n$6\r\nvalue{}\r\n", i, i);
|
||||
send_command(&mut stream, &cmd).await;
|
||||
}
|
||||
|
||||
// Test HSCAN
|
||||
let response = send_command(&mut stream, "*7\r\n$5\r\nHSCAN\r\n$8\r\ntesthash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await;
|
||||
assert!(response.contains("field"));
|
||||
assert!(response.contains("value"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_operations() {
|
||||
let (mut server, port) = start_test_server("transaction").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test MULTI
|
||||
let response = send_command(&mut stream, "*1\r\n$5\r\nMULTI\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test queued commands
|
||||
let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n").await;
|
||||
assert!(response.contains("QUEUED"));
|
||||
|
||||
let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n").await;
|
||||
assert!(response.contains("QUEUED"));
|
||||
|
||||
// Test EXEC
|
||||
let response = send_command(&mut stream, "*1\r\n$4\r\nEXEC\r\n").await;
|
||||
assert!(response.contains("OK")); // Should contain results of executed commands
|
||||
|
||||
// Verify commands were executed
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n").await;
|
||||
assert!(response.contains("value1"));
|
||||
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n").await;
|
||||
assert!(response.contains("value2"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_discard_transaction() {
|
||||
let (mut server, port) = start_test_server("discard").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test MULTI
|
||||
let response = send_command(&mut stream, "*1\r\n$5\r\nMULTI\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test queued command
|
||||
let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$7\r\ndiscard\r\n$5\r\nvalue\r\n").await;
|
||||
assert!(response.contains("QUEUED"));
|
||||
|
||||
// Test DISCARD
|
||||
let response = send_command(&mut stream, "*1\r\n$7\r\nDISCARD\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Verify command was not executed
|
||||
let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$7\r\ndiscard\r\n").await;
|
||||
assert!(response.contains("$-1")); // Should be NULL
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_type_command() {
|
||||
let (mut server, port) = start_test_server("type").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test string type
|
||||
send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await;
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$6\r\nstring\r\n").await;
|
||||
assert!(response.contains("string"));
|
||||
|
||||
// Test hash type
|
||||
send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await;
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("hash"));
|
||||
|
||||
// Test non-existent key
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n").await;
|
||||
assert!(response.contains("none"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_config_commands() {
|
||||
let (mut server, port) = start_test_server("config").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test CONFIG GET databases
|
||||
let response = send_command(&mut stream, "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$9\r\ndatabases\r\n").await;
|
||||
assert!(response.contains("databases"));
|
||||
assert!(response.contains("16"));
|
||||
|
||||
// Test CONFIG GET dir
|
||||
let response = send_command(&mut stream, "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$3\r\ndir\r\n").await;
|
||||
assert!(response.contains("dir"));
|
||||
assert!(response.contains("/tmp/herodb_test_config"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_info_command() {
|
||||
let (mut server, port) = start_test_server("info").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test INFO
|
||||
let response = send_command(&mut stream, "*1\r\n$4\r\nINFO\r\n").await;
|
||||
assert!(response.contains("redis_version"));
|
||||
|
||||
// Test INFO replication
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nINFO\r\n$11\r\nreplication\r\n").await;
|
||||
assert!(response.contains("role:master"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
let (mut server, port) = start_test_server("error").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test WRONGTYPE error - try to use hash command on string
|
||||
send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await;
|
||||
let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$6\r\nstring\r\n$5\r\nfield\r\n").await;
|
||||
assert!(response.contains("WRONGTYPE"));
|
||||
|
||||
// Test unknown command
|
||||
let response = send_command(&mut stream, "*1\r\n$7\r\nUNKNOWN\r\n").await;
|
||||
assert!(response.contains("unknown cmd") || response.contains("ERR"));
|
||||
|
||||
// Test EXEC without MULTI
|
||||
let response = send_command(&mut stream, "*1\r\n$4\r\nEXEC\r\n").await;
|
||||
assert!(response.contains("ERR"));
|
||||
|
||||
// Test DISCARD without MULTI
|
||||
let response = send_command(&mut stream, "*1\r\n$7\r\nDISCARD\r\n").await;
|
||||
assert!(response.contains("ERR"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list_operations() {
|
||||
let (mut server, port) = start_test_server("list").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test LPUSH
|
||||
let response = send_command(&mut stream, "*4\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n$1\r\nb\r\n").await;
|
||||
assert!(response.contains("2")); // 2 elements
|
||||
|
||||
// Test RPUSH
|
||||
let response = send_command(&mut stream, "*4\r\n$5\r\nRPUSH\r\n$4\r\nlist\r\n$1\r\nc\r\n$1\r\nd\r\n").await;
|
||||
assert!(response.contains("4")); // 4 elements
|
||||
|
||||
// Test LLEN
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nLLEN\r\n$4\r\nlist\r\n").await;
|
||||
assert!(response.contains("4"));
|
||||
|
||||
// Test LRANGE
|
||||
let response = send_command(&mut stream, "*4\r\n$6\r\nLRANGE\r\n$4\r\nlist\r\n$1\r\n0\r\n$2\r\n-1\r\n").await;
|
||||
assert_eq!(response, "*4\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n$1\r\nd\r\n");
|
||||
|
||||
// Test LINDEX
|
||||
let response = send_command(&mut stream, "*3\r\n$6\r\nLINDEX\r\n$4\r\nlist\r\n$1\r\n0\r\n").await;
|
||||
assert_eq!(response, "$1\r\nb\r\n");
|
||||
|
||||
// Test LPOP
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nLPOP\r\n$4\r\nlist\r\n").await;
|
||||
assert_eq!(response, "$1\r\nb\r\n");
|
||||
|
||||
// Test RPOP
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nRPOP\r\n$4\r\nlist\r\n").await;
|
||||
assert_eq!(response, "$1\r\nd\r\n");
|
||||
|
||||
// Test LREM
|
||||
send_command(&mut stream, "*3\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n").await; // list is now a, c, a
|
||||
let response = send_command(&mut stream, "*4\r\n$4\r\nLREM\r\n$4\r\nlist\r\n$1\r\n1\r\n$1\r\na\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test LTRIM
|
||||
let response = send_command(&mut stream, "*4\r\n$5\r\nLTRIM\r\n$4\r\nlist\r\n$1\r\n0\r\n$1\r\n0\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nLLEN\r\n$4\r\nlist\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
}
|
228
herodb/tests/simple_integration_test.rs
Normal file
228
herodb/tests/simple_integration_test.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
// Helper function to start a test server with clean data directory
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(17000);
|
||||
|
||||
// Get a unique port for this test
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
let test_dir = format!("/tmp/herodb_test_{}", test_name);
|
||||
|
||||
// Clean up any existing test data
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
// Helper function to send Redis command and get response
|
||||
async fn send_redis_command(port: u16, command: &str) -> String {
|
||||
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
|
||||
stream.write_all(command.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
String::from_utf8_lossy(&buffer[..n]).to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_redis_functionality() {
|
||||
let (mut server, port) = start_test_server("basic").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..10 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Test PING
|
||||
let response = send_redis_command(port, "*1\r\n$4\r\nPING\r\n").await;
|
||||
assert!(response.contains("PONG"));
|
||||
|
||||
// Test SET
|
||||
let response = send_redis_command(port, "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n").await;
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test GET
|
||||
let response = send_redis_command(port, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("value"));
|
||||
|
||||
// Test HSET
|
||||
let response = send_redis_command(port, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test HGET
|
||||
let response = send_redis_command(port, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$5\r\nfield\r\n").await;
|
||||
assert!(response.contains("value"));
|
||||
|
||||
// Test EXISTS
|
||||
let response = send_redis_command(port, "*2\r\n$6\r\nEXISTS\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test TTL
|
||||
let response = send_redis_command(port, "*2\r\n$3\r\nTTL\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("-1")); // No expiration
|
||||
|
||||
// Test TYPE
|
||||
let response = send_redis_command(port, "*2\r\n$4\r\nTYPE\r\n$3\r\nkey\r\n").await;
|
||||
assert!(response.contains("string"));
|
||||
|
||||
// Test QUIT to close connection gracefully
|
||||
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
|
||||
stream.write_all("*1\r\n$4\r\nQUIT\r\n".as_bytes()).await.unwrap();
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Ensure the stream is closed
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// Stop the server
|
||||
server_handle.abort();
|
||||
|
||||
println!("✅ All basic Redis functionality tests passed!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hash_operations() {
|
||||
let (mut server, port) = start_test_server("hash_ops").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..5 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Test HSET multiple fields
|
||||
let response = send_redis_command(port, "*6\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n$6\r\nfield2\r\n$6\r\nvalue2\r\n").await;
|
||||
assert!(response.contains("2")); // 2 new fields
|
||||
|
||||
// Test HGETALL
|
||||
let response = send_redis_command(port, "*2\r\n$7\r\nHGETALL\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("field1"));
|
||||
assert!(response.contains("value1"));
|
||||
assert!(response.contains("field2"));
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Test HEXISTS
|
||||
let response = send_redis_command(port, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test HLEN
|
||||
let response = send_redis_command(port, "*2\r\n$4\r\nHLEN\r\n$4\r\nhash\r\n").await;
|
||||
assert!(response.contains("2"));
|
||||
|
||||
// Test HSCAN
|
||||
let response = send_redis_command(port, "*7\r\n$5\r\nHSCAN\r\n$4\r\nhash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await;
|
||||
assert!(response.contains("field1"));
|
||||
assert!(response.contains("value1"));
|
||||
assert!(response.contains("field2"));
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Stop the server
|
||||
// For hash operations, we don't have a persistent stream, so we'll just abort the server.
|
||||
// The server should handle closing its connections.
|
||||
server_handle.abort();
|
||||
|
||||
println!("✅ All hash operations tests passed!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_operations() {
|
||||
let (mut server, port) = start_test_server("transactions").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..5 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Use a single connection for the transaction
|
||||
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
|
||||
|
||||
// Test MULTI
|
||||
stream.write_all("*1\r\n$5\r\nMULTI\r\n".as_bytes()).await.unwrap();
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Test queued commands
|
||||
stream.write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("QUEUED"));
|
||||
|
||||
stream.write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("QUEUED"));
|
||||
|
||||
// Test EXEC
|
||||
stream.write_all("*1\r\n$4\r\nEXEC\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("OK")); // Should contain array of OK responses
|
||||
|
||||
// Verify commands were executed
|
||||
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("value1"));
|
||||
|
||||
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Stop the server
|
||||
server_handle.abort();
|
||||
|
||||
println!("✅ All transaction operations tests passed!");
|
||||
}
|
183
herodb/tests/simple_redis_test.rs
Normal file
183
herodb/tests/simple_redis_test.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to start a test server with clean data directory
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(16500);
|
||||
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let test_dir = format!("/tmp/herodb_simple_test_{}", test_name);
|
||||
|
||||
// Clean up any existing test data
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
// Helper function to send command and get response
|
||||
async fn send_command(stream: &mut TcpStream, command: &str) -> String {
|
||||
stream.write_all(command.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
String::from_utf8_lossy(&buffer[..n]).to_string()
|
||||
}
|
||||
|
||||
// Helper function to connect to the test server
|
||||
async fn connect_to_server(port: u16) -> TcpStream {
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
match TcpStream::connect(format!("127.0.0.1:{}", port)).await {
|
||||
Ok(stream) => return stream,
|
||||
Err(_) if attempts < 10 => {
|
||||
attempts += 1;
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
Err(e) => panic!("Failed to connect to test server: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ping_simple() {
|
||||
let (mut server, port) = start_test_server("ping").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
let response = send_command(&mut stream, "*1\r\n$4\r\nPING\r\n").await;
|
||||
assert!(response.contains("PONG"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hset_clean_db() {
|
||||
let (mut server, port) = start_test_server("hset_clean").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test HSET - should return 1 for new field
|
||||
let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await;
|
||||
println!("HSET response: {}", response);
|
||||
assert!(response.contains("1"), "Expected HSET to return 1, got: {}", response);
|
||||
|
||||
// Test HGET
|
||||
let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
println!("HGET response: {}", response);
|
||||
assert!(response.contains("value1"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_type_command_simple() {
|
||||
let (mut server, port) = start_test_server("type").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Test string type
|
||||
send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await;
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$6\r\nstring\r\n").await;
|
||||
println!("TYPE string response: {}", response);
|
||||
assert!(response.contains("string"));
|
||||
|
||||
// Test hash type
|
||||
send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await;
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$4\r\nhash\r\n").await;
|
||||
println!("TYPE hash response: {}", response);
|
||||
assert!(response.contains("hash"));
|
||||
|
||||
// Test non-existent key
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n").await;
|
||||
println!("TYPE noexist response: {}", response);
|
||||
assert!(response.contains("none"), "Expected 'none' for non-existent key, got: {}", response);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hexists_simple() {
|
||||
let (mut server, port) = start_test_server("hexists").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let mut stream = connect_to_server(port).await;
|
||||
|
||||
// Set up hash
|
||||
send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await;
|
||||
|
||||
// Test HEXISTS for existing field
|
||||
let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await;
|
||||
println!("HEXISTS existing field response: {}", response);
|
||||
assert!(response.contains("1"));
|
||||
|
||||
// Test HEXISTS for non-existent field
|
||||
let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n").await;
|
||||
println!("HEXISTS non-existent field response: {}", response);
|
||||
assert!(response.contains("0"), "Expected HEXISTS to return 0 for non-existent field, got: {}", response);
|
||||
}
|
892
herodb/tests/usage_suite.rs
Normal file
892
herodb/tests/usage_suite.rs
Normal file
@@ -0,0 +1,892 @@
|
||||
use herodb::{options::DBOption, server::Server};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
// =========================
|
||||
// Helpers
|
||||
// =========================
|
||||
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(17100);
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
let test_dir = format!("/tmp/herodb_usage_suite_{}", test_name);
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
async fn spawn_listener(server: Server, port: u16) {
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.expect("bind listener");
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
let mut s_clone = server.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = s_clone.handle(stream).await;
|
||||
});
|
||||
}
|
||||
Err(_e) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Build RESP array for args ["PING"] -> "*1\r\n$4\r\nPING\r\n"
|
||||
fn build_resp(args: &[&str]) -> String {
|
||||
let mut s = format!("*{}\r\n", args.len());
|
||||
for a in args {
|
||||
s.push_str(&format!("${}\r\n{}\r\n", a.len(), a));
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
async fn connect(port: u16) -> TcpStream {
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
match TcpStream::connect(format!("127.0.0.1:{}", port)).await {
|
||||
Ok(s) => return s,
|
||||
Err(_) if attempts < 30 => {
|
||||
attempts += 1;
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
Err(e) => panic!("Failed to connect: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_crlf(buf: &[u8], start: usize) -> Option<usize> {
|
||||
let mut i = start;
|
||||
while i + 1 < buf.len() {
|
||||
if buf[i] == b'\r' && buf[i + 1] == b'\n' {
|
||||
return Some(i);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_number_i64(buf: &[u8], start: usize, end: usize) -> Option<i64> {
|
||||
let s = std::str::from_utf8(&buf[start..end]).ok()?;
|
||||
s.parse::<i64>().ok()
|
||||
}
|
||||
|
||||
// Return number of bytes that make up a complete RESP element starting at 'i', or None if incomplete.
|
||||
fn parse_elem(buf: &[u8], i: usize) -> Option<usize> {
|
||||
if i >= buf.len() {
|
||||
return None;
|
||||
}
|
||||
match buf[i] {
|
||||
b'+' | b'-' | b':' => {
|
||||
let end = find_crlf(buf, i + 1)?;
|
||||
Some(end + 2 - i)
|
||||
}
|
||||
b'$' => {
|
||||
let hdr_end = find_crlf(buf, i + 1)?;
|
||||
let n = parse_number_i64(buf, i + 1, hdr_end)?;
|
||||
if n < 0 {
|
||||
// Null bulk string: only header
|
||||
Some(hdr_end + 2 - i)
|
||||
} else {
|
||||
let need = hdr_end + 2 + (n as usize) + 2;
|
||||
if need <= buf.len() {
|
||||
Some(need - i)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
b'*' => {
|
||||
let hdr_end = find_crlf(buf, i + 1)?;
|
||||
let n = parse_number_i64(buf, i + 1, hdr_end)?;
|
||||
if n < 0 {
|
||||
// Null array: only header
|
||||
Some(hdr_end + 2 - i)
|
||||
} else {
|
||||
let mut j = hdr_end + 2;
|
||||
for _ in 0..(n as usize) {
|
||||
let consumed = parse_elem(buf, j)?;
|
||||
j += consumed;
|
||||
}
|
||||
Some(j - i)
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn resp_frame_len(buf: &[u8]) -> Option<usize> {
|
||||
parse_elem(buf, 0)
|
||||
}
|
||||
|
||||
async fn read_full_resp(stream: &mut TcpStream) -> String {
|
||||
let mut buf: Vec<u8> = Vec::with_capacity(8192);
|
||||
let mut tmp = vec![0u8; 4096];
|
||||
|
||||
loop {
|
||||
if let Some(total) = resp_frame_len(&buf) {
|
||||
if buf.len() >= total {
|
||||
return String::from_utf8_lossy(&buf[..total]).to_string();
|
||||
}
|
||||
}
|
||||
|
||||
match tokio::time::timeout(Duration::from_secs(2), stream.read(&mut tmp)).await {
|
||||
Ok(Ok(n)) => {
|
||||
if n == 0 {
|
||||
if let Some(total) = resp_frame_len(&buf) {
|
||||
if buf.len() >= total {
|
||||
return String::from_utf8_lossy(&buf[..total]).to_string();
|
||||
}
|
||||
}
|
||||
return String::from_utf8_lossy(&buf).to_string();
|
||||
}
|
||||
buf.extend_from_slice(&tmp[..n]);
|
||||
}
|
||||
Ok(Err(e)) => panic!("read error: {}", e),
|
||||
Err(_) => panic!("timeout waiting for reply"),
|
||||
}
|
||||
|
||||
if buf.len() > 8 * 1024 * 1024 {
|
||||
panic!("reply too large");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_cmd(stream: &mut TcpStream, args: &[&str]) -> String {
|
||||
let req = build_resp(args);
|
||||
stream.write_all(req.as_bytes()).await.unwrap();
|
||||
read_full_resp(stream).await
|
||||
}
|
||||
|
||||
// Assert helpers with clearer output
|
||||
fn assert_contains(haystack: &str, needle: &str, ctx: &str) {
|
||||
assert!(
|
||||
haystack.contains(needle),
|
||||
"ASSERT CONTAINS failed: '{}' not found in response.\nContext: {}\nResponse:\n{}",
|
||||
needle,
|
||||
ctx,
|
||||
haystack
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_eq_resp(actual: &str, expected: &str, ctx: &str) {
|
||||
assert!(
|
||||
actual == expected,
|
||||
"ASSERT EQUAL failed.\nContext: {}\nExpected:\n{:?}\nActual:\n{:?}",
|
||||
ctx,
|
||||
expected,
|
||||
actual
|
||||
);
|
||||
}
|
||||
|
||||
/// Extract the payload of a single RESP Bulk String reply.
|
||||
/// Example input:
|
||||
/// "$5\r\nhello\r\n" -> Some("hello".to_string())
|
||||
fn extract_bulk_payload(resp: &str) -> Option<String> {
|
||||
// find first CRLF after "$len"
|
||||
let first = resp.find("\r\n")?;
|
||||
let after = &resp[(first + 2)..];
|
||||
// find next CRLF ending payload
|
||||
let second = after.find("\r\n")?;
|
||||
Some(after[..second].to_string())
|
||||
}
|
||||
|
||||
// =========================
|
||||
// Test suites
|
||||
// =========================
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_01_connection_and_info() {
|
||||
let (server, port) = start_test_server("conn_info").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// redis-cli may send COMMAND DOCS, our server replies empty array; harmless.
|
||||
let pong = send_cmd(&mut s, &["PING"]).await;
|
||||
assert_contains(&pong, "PONG", "PING should return PONG");
|
||||
|
||||
let echo = send_cmd(&mut s, &["ECHO", "hello"]).await;
|
||||
assert_contains(&echo, "hello", "ECHO hello");
|
||||
|
||||
// INFO (general)
|
||||
let info = send_cmd(&mut s, &["INFO"]).await;
|
||||
assert_contains(&info, "redis_version", "INFO should include redis_version");
|
||||
|
||||
// INFO REPLICATION (static stub)
|
||||
let repl = send_cmd(&mut s, &["INFO", "replication"]).await;
|
||||
assert_contains(&repl, "role:master", "INFO replication role");
|
||||
|
||||
// CONFIG GET subset
|
||||
let cfg = send_cmd(&mut s, &["CONFIG", "GET", "databases"]).await;
|
||||
assert_contains(&cfg, "databases", "CONFIG GET databases");
|
||||
assert_contains(&cfg, "16", "CONFIG GET databases value");
|
||||
|
||||
// CLIENT name
|
||||
let setname = send_cmd(&mut s, &["CLIENT", "SETNAME", "myapp"]).await;
|
||||
assert_contains(&setname, "OK", "CLIENT SETNAME");
|
||||
|
||||
let getname = send_cmd(&mut s, &["CLIENT", "GETNAME"]).await;
|
||||
assert_contains(&getname, "myapp", "CLIENT GETNAME");
|
||||
|
||||
// SELECT db
|
||||
let sel = send_cmd(&mut s, &["SELECT", "0"]).await;
|
||||
assert_contains(&sel, "OK", "SELECT 0");
|
||||
|
||||
// QUIT should close connection after sending OK
|
||||
let quit = send_cmd(&mut s, &["QUIT"]).await;
|
||||
assert_contains(&quit, "OK", "QUIT should return OK");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_02_strings_and_expiry() {
|
||||
let (server, port) = start_test_server("strings").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// SET / GET
|
||||
let set = send_cmd(&mut s, &["SET", "user:1", "alice"]).await;
|
||||
assert_contains(&set, "OK", "SET user:1 alice");
|
||||
|
||||
let get = send_cmd(&mut s, &["GET", "user:1"]).await;
|
||||
assert_contains(&get, "alice", "GET user:1");
|
||||
|
||||
// EXISTS / DEL
|
||||
let ex1 = send_cmd(&mut s, &["EXISTS", "user:1"]).await;
|
||||
assert_contains(&ex1, "1", "EXISTS user:1");
|
||||
|
||||
let del = send_cmd(&mut s, &["DEL", "user:1"]).await;
|
||||
assert_contains(&del, "1", "DEL user:1");
|
||||
|
||||
let ex0 = send_cmd(&mut s, &["EXISTS", "user:1"]).await;
|
||||
assert_contains(&ex0, "0", "EXISTS after DEL");
|
||||
|
||||
// INCR behavior
|
||||
let i1 = send_cmd(&mut s, &["INCR", "count"]).await;
|
||||
assert_contains(&i1, "1", "INCR new key -> 1");
|
||||
let i2 = send_cmd(&mut s, &["INCR", "count"]).await;
|
||||
assert_contains(&i2, "2", "INCR existing -> 2");
|
||||
let _ = send_cmd(&mut s, &["SET", "notnum", "abc"]).await;
|
||||
let ierr = send_cmd(&mut s, &["INCR", "notnum"]).await;
|
||||
assert_contains(&ierr, "ERR", "INCR on non-numeric should ERR");
|
||||
|
||||
// Expiration via SET EX
|
||||
let setex = send_cmd(&mut s, &["SET", "tmp:1", "boom", "EX", "1"]).await;
|
||||
assert_contains(&setex, "OK", "SET tmp:1 EX 1");
|
||||
|
||||
let g_immediate = send_cmd(&mut s, &["GET", "tmp:1"]).await;
|
||||
assert_contains(&g_immediate, "boom", "GET tmp:1 immediately");
|
||||
|
||||
let ttl = send_cmd(&mut s, &["TTL", "tmp:1"]).await;
|
||||
// Implementation returns a SimpleString, accept any numeric content
|
||||
assert!(
|
||||
ttl.contains("1") || ttl.contains("0"),
|
||||
"TTL should be 1 or 0, got: {}",
|
||||
ttl
|
||||
);
|
||||
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
let g_after = send_cmd(&mut s, &["GET", "tmp:1"]).await;
|
||||
assert_contains(&g_after, "$-1", "GET tmp:1 after expiry -> Null");
|
||||
|
||||
// TYPE
|
||||
let _ = send_cmd(&mut s, &["SET", "t", "v"]).await;
|
||||
let ty = send_cmd(&mut s, &["TYPE", "t"]).await;
|
||||
assert_contains(&ty, "string", "TYPE string key");
|
||||
let ty_none = send_cmd(&mut s, &["TYPE", "noexist"]).await;
|
||||
assert_contains(&ty_none, "none", "TYPE nonexistent");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_03_scan_and_keys() {
|
||||
let (server, port) = start_test_server("scan").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
for i in 0..5 {
|
||||
let _ = send_cmd(&mut s, &["SET", &format!("key{}", i), &format!("value{}", i)]).await;
|
||||
}
|
||||
|
||||
let scan = send_cmd(&mut s, &["SCAN", "0", "MATCH", "key*", "COUNT", "10"]).await;
|
||||
assert_contains(&scan, "key0", "SCAN should return keys with MATCH");
|
||||
assert_contains(&scan, "key4", "SCAN should return last key");
|
||||
|
||||
let keys = send_cmd(&mut s, &["KEYS", "*"]).await;
|
||||
assert_contains(&keys, "key0", "KEYS * includes key0");
|
||||
assert_contains(&keys, "key4", "KEYS * includes key4");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_04_hashes_suite() {
|
||||
let (server, port) = start_test_server("hashes").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// HSET (single, returns number of new fields)
|
||||
let h1 = send_cmd(&mut s, &["HSET", "profile:1", "name", "alice"]).await;
|
||||
assert_contains(&h1, "1", "HSET new field -> 1");
|
||||
|
||||
// HGET
|
||||
let hg = send_cmd(&mut s, &["HGET", "profile:1", "name"]).await;
|
||||
assert_contains(&hg, "alice", "HGET existing field");
|
||||
|
||||
// HSET multiple
|
||||
let h2 = send_cmd(&mut s, &["HSET", "profile:1", "age", "30", "city", "paris"]).await;
|
||||
assert_contains(&h2, "2", "HSET added 2 new fields");
|
||||
|
||||
// HMGET
|
||||
let hmg = send_cmd(&mut s, &["HMGET", "profile:1", "name", "age", "city", "nope"]).await;
|
||||
assert_contains(&hmg, "alice", "HMGET name");
|
||||
assert_contains(&hmg, "30", "HMGET age");
|
||||
assert_contains(&hmg, "paris", "HMGET city");
|
||||
assert_contains(&hmg, "$-1", "HMGET non-existent -> Null");
|
||||
|
||||
// HGETALL
|
||||
let hga = send_cmd(&mut s, &["HGETALL", "profile:1"]).await;
|
||||
assert_contains(&hga, "name", "HGETALL contains name");
|
||||
assert_contains(&hga, "alice", "HGETALL contains alice");
|
||||
|
||||
// HLEN
|
||||
let hlen = send_cmd(&mut s, &["HLEN", "profile:1"]).await;
|
||||
assert_contains(&hlen, "3", "HLEN is 3");
|
||||
|
||||
// HEXISTS
|
||||
let hex1 = send_cmd(&mut s, &["HEXISTS", "profile:1", "age"]).await;
|
||||
assert_contains(&hex1, "1", "HEXISTS age true");
|
||||
let hex0 = send_cmd(&mut s, &["HEXISTS", "profile:1", "nope"]).await;
|
||||
assert_contains(&hex0, "0", "HEXISTS nope false");
|
||||
|
||||
// HKEYS / HVALS
|
||||
let hkeys = send_cmd(&mut s, &["HKEYS", "profile:1"]).await;
|
||||
assert_contains(&hkeys, "name", "HKEYS includes name");
|
||||
let hvals = send_cmd(&mut s, &["HVALS", "profile:1"]).await;
|
||||
assert_contains(&hvals, "alice", "HVALS includes alice");
|
||||
|
||||
// HSETNX
|
||||
let hnx0 = send_cmd(&mut s, &["HSETNX", "profile:1", "name", "bob"]).await;
|
||||
assert_contains(&hnx0, "0", "HSETNX existing field -> 0");
|
||||
let hnx1 = send_cmd(&mut s, &["HSETNX", "profile:1", "nickname", "ali"]).await;
|
||||
assert_contains(&hnx1, "1", "HSETNX new field -> 1");
|
||||
|
||||
// HSCAN
|
||||
let hscan = send_cmd(&mut s, &["HSCAN", "profile:1", "0", "MATCH", "n*", "COUNT", "10"]).await;
|
||||
assert_contains(&hscan, "name", "HSCAN matches fields starting with n");
|
||||
assert_contains(&hscan, "nickname", "HSCAN nickname present");
|
||||
|
||||
// HDEL
|
||||
let hdel = send_cmd(&mut s, &["HDEL", "profile:1", "city", "age"]).await;
|
||||
assert_contains(&hdel, "2", "HDEL removed two fields");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_05_lists_suite_including_blpop() {
|
||||
let (server, port) = start_test_server("lists").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut a = connect(port).await;
|
||||
|
||||
// LPUSH / RPUSH / LLEN
|
||||
let lp = send_cmd(&mut a, &["LPUSH", "q:jobs", "a", "b"]).await;
|
||||
assert_contains(&lp, "2", "LPUSH added 2, length 2");
|
||||
|
||||
let rp = send_cmd(&mut a, &["RPUSH", "q:jobs", "c"]).await;
|
||||
assert_contains(&rp, "3", "RPUSH now length 3");
|
||||
|
||||
let llen = send_cmd(&mut a, &["LLEN", "q:jobs"]).await;
|
||||
assert_contains(&llen, "3", "LLEN 3");
|
||||
|
||||
// LINDEX / LRANGE
|
||||
let lidx = send_cmd(&mut a, &["LINDEX", "q:jobs", "0"]).await;
|
||||
assert_eq_resp(&lidx, "$1\r\nb\r\n", "LINDEX q:jobs 0 should be b");
|
||||
|
||||
let lr = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await;
|
||||
assert_eq_resp(&lr, "*3\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n", "LRANGE q:jobs 0 -1 should be [b,a,c]");
|
||||
|
||||
// LTRIM
|
||||
let ltrim = send_cmd(&mut a, &["LTRIM", "q:jobs", "0", "1"]).await;
|
||||
assert_contains(<rim, "OK", "LTRIM OK");
|
||||
let lr_post = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await;
|
||||
assert_eq_resp(&lr_post, "*2\r\n$1\r\nb\r\n$1\r\na\r\n", "After LTRIM, list [b,a]");
|
||||
|
||||
// LREM remove first occurrence of b
|
||||
let lrem = send_cmd(&mut a, &["LREM", "q:jobs", "1", "b"]).await;
|
||||
assert_contains(&lrem, "1", "LREM removed 1");
|
||||
|
||||
// LPOP and RPOP
|
||||
let lpop1 = send_cmd(&mut a, &["LPOP", "q:jobs"]).await;
|
||||
assert_contains(&lpop1, "$1\r\na\r\n", "LPOP returns a");
|
||||
let rpop_empty = send_cmd(&mut a, &["RPOP", "q:jobs"]).await; // empty now
|
||||
assert_contains(&rpop_empty, "$-1", "RPOP on empty -> Null");
|
||||
|
||||
// LPOP with count on empty -> []
|
||||
let lpop0 = send_cmd(&mut a, &["LPOP", "q:jobs", "2"]).await;
|
||||
assert_eq_resp(&lpop0, "*0\r\n", "LPOP with count on empty returns empty array");
|
||||
|
||||
// BLPOP: block on one client, push from another
|
||||
let c1 = connect(port).await;
|
||||
let mut c2 = connect(port).await;
|
||||
|
||||
// Start BLPOP on c1
|
||||
let blpop_task = tokio::spawn(async move {
|
||||
let mut c1_local = c1;
|
||||
send_cmd(&mut c1_local, &["BLPOP", "q:block", "5"]).await
|
||||
});
|
||||
|
||||
// Give it time to register waiter
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
// Push from c2 to wake BLPOP
|
||||
let _ = send_cmd(&mut c2, &["LPUSH", "q:block", "x"]).await;
|
||||
|
||||
// Await BLPOP result
|
||||
let blpop_res = blpop_task.await.expect("BLPOP task join");
|
||||
assert_contains(&blpop_res, "q:block", "BLPOP returned key");
|
||||
assert_contains(&blpop_res, "x", "BLPOP returned element");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_06_flushdb_suite() {
|
||||
let (server, port) = start_test_server("flushdb").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
let _ = send_cmd(&mut s, &["SET", "k1", "v1"]).await;
|
||||
let _ = send_cmd(&mut s, &["HSET", "h1", "f", "v"]).await;
|
||||
let _ = send_cmd(&mut s, &["LPUSH", "l1", "a"]).await;
|
||||
|
||||
let keys_before = send_cmd(&mut s, &["KEYS", "*"]).await;
|
||||
assert_contains(&keys_before, "k1", "have string key before FLUSHDB");
|
||||
assert_contains(&keys_before, "h1", "have hash key before FLUSHDB");
|
||||
assert_contains(&keys_before, "l1", "have list key before FLUSHDB");
|
||||
|
||||
let fl = send_cmd(&mut s, &["FLUSHDB"]).await;
|
||||
assert_contains(&fl, "OK", "FLUSHDB OK");
|
||||
|
||||
let keys_after = send_cmd(&mut s, &["KEYS", "*"]).await;
|
||||
assert_eq_resp(&keys_after, "*0\r\n", "DB should be empty after FLUSHDB");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_07_age_stateless_suite() {
|
||||
let (server, port) = start_test_server("age_stateless").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// GENENC -> [recipient, identity]
|
||||
let gen = send_cmd(&mut s, &["AGE", "GENENC"]).await;
|
||||
assert!(
|
||||
gen.starts_with("*2\r\n$"),
|
||||
"AGE GENENC should return array [recipient, identity], got:\n{}",
|
||||
gen
|
||||
);
|
||||
|
||||
// Parse simple RESP array of two bulk strings to extract keys
|
||||
fn parse_two_bulk_array(resp: &str) -> (String, String) {
|
||||
// naive parse for tests
|
||||
let mut lines = resp.lines();
|
||||
let _ = lines.next(); // *2
|
||||
// $len
|
||||
let _ = lines.next();
|
||||
let recip = lines.next().unwrap_or("").to_string();
|
||||
let _ = lines.next();
|
||||
let ident = lines.next().unwrap_or("").to_string();
|
||||
(recip, ident)
|
||||
}
|
||||
let (recipient, identity) = parse_two_bulk_array(&gen);
|
||||
assert!(
|
||||
recipient.starts_with("age1") && identity.starts_with("AGE-SECRET-KEY-1"),
|
||||
"Unexpected AGE key formats.\nrecipient: {}\nidentity: {}",
|
||||
recipient,
|
||||
identity
|
||||
);
|
||||
|
||||
// ENCRYPT / DECRYPT
|
||||
let ct = send_cmd(&mut s, &["AGE", "ENCRYPT", &recipient, "hello world"]).await;
|
||||
let ct_b64 = extract_bulk_payload(&ct).expect("Failed to parse bulk payload from ENCRYPT");
|
||||
let pt = send_cmd(&mut s, &["AGE", "DECRYPT", &identity, &ct_b64]).await;
|
||||
assert_contains(&pt, "hello world", "AGE DECRYPT round-trip");
|
||||
|
||||
// GENSIGN -> [verify_pub_b64, sign_secret_b64]
|
||||
let gensign = send_cmd(&mut s, &["AGE", "GENSIGN"]).await;
|
||||
let (verify_pub, sign_secret) = parse_two_bulk_array(&gensign);
|
||||
assert!(
|
||||
!verify_pub.is_empty() && !sign_secret.is_empty(),
|
||||
"GENSIGN returned empty keys"
|
||||
);
|
||||
|
||||
// SIGN / VERIFY
|
||||
let sig = send_cmd(&mut s, &["AGE", "SIGN", &sign_secret, "msg"]).await;
|
||||
let sig_b64 = extract_bulk_payload(&sig).expect("Failed to parse bulk payload from SIGN");
|
||||
let v_ok = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "msg", &sig_b64]).await;
|
||||
assert_contains(&v_ok, "1", "VERIFY should be 1 for valid signature");
|
||||
|
||||
let v_bad = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "tampered", &sig_b64]).await;
|
||||
assert_contains(&v_bad, "0", "VERIFY should be 0 for invalid message/signature");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_08_age_persistent_named_suite() {
|
||||
let (server, port) = start_test_server("age_persistent").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// KEYGEN + ENCRYPTNAME/DECRYPTNAME
|
||||
let kg = send_cmd(&mut s, &["AGE", "KEYGEN", "app1"]).await;
|
||||
assert!(
|
||||
kg.starts_with("*2\r\n"),
|
||||
"AGE KEYGEN should return [recipient, identity], got:\n{}",
|
||||
kg
|
||||
);
|
||||
|
||||
let ct = send_cmd(&mut s, &["AGE", "ENCRYPTNAME", "app1", "hello"]).await;
|
||||
let ct_b64 = extract_bulk_payload(&ct).expect("Failed to parse bulk payload from ENCRYPTNAME");
|
||||
let pt = send_cmd(&mut s, &["AGE", "DECRYPTNAME", "app1", &ct_b64]).await;
|
||||
assert_contains(&pt, "hello", "DECRYPTNAME round-trip");
|
||||
|
||||
// SIGNKEYGEN + SIGNNAME/VERIFYNAME
|
||||
let skg = send_cmd(&mut s, &["AGE", "SIGNKEYGEN", "app1"]).await;
|
||||
assert!(
|
||||
skg.starts_with("*2\r\n"),
|
||||
"AGE SIGNKEYGEN should return [verify_pub, sign_secret], got:\n{}",
|
||||
skg
|
||||
);
|
||||
|
||||
let sig = send_cmd(&mut s, &["AGE", "SIGNNAME", "app1", "m"] ).await;
|
||||
let sig_b64 = extract_bulk_payload(&sig).expect("Failed to parse bulk payload from SIGNNAME");
|
||||
let v1 = send_cmd(&mut s, &["AGE", "VERIFYNAME", "app1", "m", &sig_b64]).await;
|
||||
assert_contains(&v1, "1", "VERIFYNAME valid => 1");
|
||||
|
||||
let v0 = send_cmd(&mut s, &["AGE", "VERIFYNAME", "app1", "bad", &sig_b64]).await;
|
||||
assert_contains(&v0, "0", "VERIFYNAME invalid => 0");
|
||||
|
||||
// AGE LIST
|
||||
let lst = send_cmd(&mut s, &["AGE", "LIST"]).await;
|
||||
assert_contains(&lst, "encpub", "AGE LIST label encpub");
|
||||
assert_contains(&lst, "app1", "AGE LIST includes app1");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_10_expire_pexpire_persist() {
|
||||
let (server, port) = start_test_server("expire_suite").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// EXPIRE: seconds
|
||||
let _ = send_cmd(&mut s, &["SET", "exp:s", "v"]).await;
|
||||
let ex = send_cmd(&mut s, &["EXPIRE", "exp:s", "1"]).await;
|
||||
assert_contains(&ex, "1", "EXPIRE exp:s 1 -> 1 (applied)");
|
||||
let ttl1 = send_cmd(&mut s, &["TTL", "exp:s"]).await;
|
||||
assert!(
|
||||
ttl1.contains("1") || ttl1.contains("0"),
|
||||
"TTL exp:s should be 1 or 0, got: {}",
|
||||
ttl1
|
||||
);
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
let get_after = send_cmd(&mut s, &["GET", "exp:s"]).await;
|
||||
assert_contains(&get_after, "$-1", "GET after expiry should be Null");
|
||||
let ttl_after = send_cmd(&mut s, &["TTL", "exp:s"]).await;
|
||||
assert_contains(&ttl_after, "-2", "TTL after expiry -> -2");
|
||||
let exists_after = send_cmd(&mut s, &["EXISTS", "exp:s"]).await;
|
||||
assert_contains(&exists_after, "0", "EXISTS after expiry -> 0");
|
||||
|
||||
// PEXPIRE: milliseconds
|
||||
let _ = send_cmd(&mut s, &["SET", "exp:ms", "v"]).await;
|
||||
let pex = send_cmd(&mut s, &["PEXPIRE", "exp:ms", "1500"]).await;
|
||||
assert_contains(&pex, "1", "PEXPIRE exp:ms 1500 -> 1 (applied)");
|
||||
let ttl_ms1 = send_cmd(&mut s, &["TTL", "exp:ms"]).await;
|
||||
assert!(
|
||||
ttl_ms1.contains("1") || ttl_ms1.contains("0"),
|
||||
"TTL exp:ms should be 1 or 0 soon after PEXPIRE, got: {}",
|
||||
ttl_ms1
|
||||
);
|
||||
sleep(Duration::from_millis(1600)).await;
|
||||
let exists_ms_after = send_cmd(&mut s, &["EXISTS", "exp:ms"]).await;
|
||||
assert_contains(&exists_ms_after, "0", "EXISTS exp:ms after ms expiry -> 0");
|
||||
|
||||
// PERSIST: remove expiration
|
||||
let _ = send_cmd(&mut s, &["SET", "exp:persist", "v"]).await;
|
||||
let _ = send_cmd(&mut s, &["EXPIRE", "exp:persist", "5"]).await;
|
||||
let ttl_pre = send_cmd(&mut s, &["TTL", "exp:persist"]).await;
|
||||
assert!(
|
||||
ttl_pre.contains("5") || ttl_pre.contains("4") || ttl_pre.contains("3") || ttl_pre.contains("2") || ttl_pre.contains("1") || ttl_pre.contains("0"),
|
||||
"TTL exp:persist should be >=0 before persist, got: {}",
|
||||
ttl_pre
|
||||
);
|
||||
let persist1 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await;
|
||||
assert_contains(&persist1, "1", "PERSIST should remove expiration");
|
||||
let ttl_post = send_cmd(&mut s, &["TTL", "exp:persist"]).await;
|
||||
assert_contains(&ttl_post, "-1", "TTL after PERSIST -> -1 (no expiration)");
|
||||
// Second persist should return 0 (nothing to remove)
|
||||
let persist2 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await;
|
||||
assert_contains(&persist2, "0", "PERSIST again -> 0 (no expiration to remove)");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_11_set_with_options() {
|
||||
let (server, port) = start_test_server("set_opts").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// SET with GET on non-existing key -> returns Null, sets value
|
||||
let set_get1 = send_cmd(&mut s, &["SET", "s1", "v1", "GET"]).await;
|
||||
assert_contains(&set_get1, "$-1", "SET s1 v1 GET returns Null when key didn't exist");
|
||||
let g1 = send_cmd(&mut s, &["GET", "s1"]).await;
|
||||
assert_contains(&g1, "v1", "GET s1 after first SET");
|
||||
|
||||
// SET with GET should return old value, then set to new
|
||||
let set_get2 = send_cmd(&mut s, &["SET", "s1", "v2", "GET"]).await;
|
||||
assert_contains(&set_get2, "v1", "SET s1 v2 GET returns previous value v1");
|
||||
let g2 = send_cmd(&mut s, &["GET", "s1"]).await;
|
||||
assert_contains(&g2, "v2", "GET s1 now v2");
|
||||
|
||||
// NX prevents update when key exists; with GET should return Null and not change
|
||||
let set_nx = send_cmd(&mut s, &["SET", "s1", "v3", "NX", "GET"]).await;
|
||||
assert_contains(&set_nx, "$-1", "SET s1 v3 NX GET returns Null when not set");
|
||||
let g3 = send_cmd(&mut s, &["GET", "s1"]).await;
|
||||
assert_contains(&g3, "v2", "GET s1 remains v2 after NX prevented write");
|
||||
|
||||
// NX allows set when key does not exist
|
||||
let set_nx2 = send_cmd(&mut s, &["SET", "s2", "v10", "NX"]).await;
|
||||
assert_contains(&set_nx2, "OK", "SET s2 v10 NX -> OK for new key");
|
||||
let g4 = send_cmd(&mut s, &["GET", "s2"]).await;
|
||||
assert_contains(&g4, "v10", "GET s2 is v10");
|
||||
|
||||
// XX requires existing key; with GET returns old value and sets new
|
||||
let set_xx = send_cmd(&mut s, &["SET", "s2", "v11", "XX", "GET"]).await;
|
||||
assert_contains(&set_xx, "v10", "SET s2 v11 XX GET returns previous v10");
|
||||
let g5 = send_cmd(&mut s, &["GET", "s2"]).await;
|
||||
assert_contains(&g5, "v11", "GET s2 is now v11");
|
||||
|
||||
// PX expiration path via SET options
|
||||
let set_px = send_cmd(&mut s, &["SET", "s3", "vpx", "PX", "500"]).await;
|
||||
assert_contains(&set_px, "OK", "SET s3 vpx PX 500 -> OK");
|
||||
let ttl_px1 = send_cmd(&mut s, &["TTL", "s3"]).await;
|
||||
assert!(
|
||||
ttl_px1.contains("0") || ttl_px1.contains("1"),
|
||||
"TTL s3 immediately after PX should be 1 or 0, got: {}",
|
||||
ttl_px1
|
||||
);
|
||||
sleep(Duration::from_millis(650)).await;
|
||||
let g6 = send_cmd(&mut s, &["GET", "s3"]).await;
|
||||
assert_contains(&g6, "$-1", "GET s3 after PX expiry -> Null");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_09_mget_mset_and_variadic_exists_del() {
|
||||
let (server, port) = start_test_server("mget_mset_variadic").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// MSET multiple keys
|
||||
let mset = send_cmd(&mut s, &["MSET", "k1", "v1", "k2", "v2", "k3", "v3"]).await;
|
||||
assert_contains(&mset, "OK", "MSET k1 v1 k2 v2 k3 v3 -> OK");
|
||||
|
||||
// MGET should return values and Null for missing
|
||||
let mget = send_cmd(&mut s, &["MGET", "k1", "k2", "nope", "k3"]).await;
|
||||
// Expect an array with 4 entries; verify payloads
|
||||
assert_contains(&mget, "v1", "MGET k1");
|
||||
assert_contains(&mget, "v2", "MGET k2");
|
||||
assert_contains(&mget, "v3", "MGET k3");
|
||||
assert_contains(&mget, "$-1", "MGET missing returns Null");
|
||||
|
||||
// EXISTS variadic: count how many exist
|
||||
let exists_multi = send_cmd(&mut s, &["EXISTS", "k1", "nope", "k3"]).await;
|
||||
// Server returns SimpleString numeric, e.g. +2
|
||||
assert_contains(&exists_multi, "2", "EXISTS k1 nope k3 -> 2");
|
||||
|
||||
// DEL variadic: delete multiple keys, return count deleted
|
||||
let del_multi = send_cmd(&mut s, &["DEL", "k1", "k3", "nope"]).await;
|
||||
assert_contains(&del_multi, "2", "DEL k1 k3 nope -> 2");
|
||||
|
||||
// Verify deletion
|
||||
let exists_after = send_cmd(&mut s, &["EXISTS", "k1", "k3"]).await;
|
||||
assert_contains(&exists_after, "0", "EXISTS k1 k3 after DEL -> 0");
|
||||
|
||||
// MGET after deletion should include Nulls for deleted keys
|
||||
let mget_after = send_cmd(&mut s, &["MGET", "k1", "k2", "k3"]).await;
|
||||
assert_contains(&mget_after, "$-1", "MGET k1 after DEL -> Null");
|
||||
assert_contains(&mget_after, "v2", "MGET k2 remains");
|
||||
assert_contains(&mget_after, "$-1", "MGET k3 after DEL -> Null");
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test_12_hash_incr() {
|
||||
let (server, port) = start_test_server("hash_incr").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// Integer increments
|
||||
let _ = send_cmd(&mut s, &["HSET", "hinc", "a", "1"]).await;
|
||||
let r1 = send_cmd(&mut s, &["HINCRBY", "hinc", "a", "2"]).await;
|
||||
assert_contains(&r1, "3", "HINCRBY hinc a 2 -> 3");
|
||||
|
||||
let r2 = send_cmd(&mut s, &["HINCRBY", "hinc", "a", "-1"]).await;
|
||||
assert_contains(&r2, "2", "HINCRBY hinc a -1 -> 2");
|
||||
|
||||
let r3 = send_cmd(&mut s, &["HINCRBY", "hinc", "b", "5"]).await;
|
||||
assert_contains(&r3, "5", "HINCRBY hinc b 5 -> 5");
|
||||
|
||||
// HINCRBY error on non-integer field
|
||||
let _ = send_cmd(&mut s, &["HSET", "hinc", "s", "x"]).await;
|
||||
let r_err = send_cmd(&mut s, &["HINCRBY", "hinc", "s", "1"]).await;
|
||||
assert_contains(&r_err, "ERR", "HINCRBY on non-integer field should ERR");
|
||||
|
||||
// Float increments
|
||||
let r4 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "f", "1.5"]).await;
|
||||
assert_contains(&r4, "1.5", "HINCRBYFLOAT hinc f 1.5 -> 1.5");
|
||||
|
||||
let r5 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "f", "2.5"]).await;
|
||||
// Could be "4", "4.0", or "4.000000", accept "4" substring
|
||||
assert_contains(&r5, "4", "HINCRBYFLOAT hinc f 2.5 -> 4");
|
||||
|
||||
// HINCRBYFLOAT error on non-float field
|
||||
let _ = send_cmd(&mut s, &["HSET", "hinc", "notf", "abc"]).await;
|
||||
let r6 = send_cmd(&mut s, &["HINCRBYFLOAT", "hinc", "notf", "1"]).await;
|
||||
assert_contains(&r6, "ERR", "HINCRBYFLOAT on non-float field should ERR");
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test_05b_brpop_suite() {
|
||||
let (server, port) = start_test_server("lists_brpop").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut a = connect(port).await;
|
||||
|
||||
// RPUSH some initial data, BRPOP should take from the right
|
||||
let _ = send_cmd(&mut a, &["RPUSH", "q:rjobs", "1", "2"]).await;
|
||||
let br_nonblock = send_cmd(&mut a, &["BRPOP", "q:rjobs", "0"]).await;
|
||||
// Should pop the rightmost element "2"
|
||||
assert_contains(&br_nonblock, "q:rjobs", "BRPOP returns key");
|
||||
assert_contains(&br_nonblock, "2", "BRPOP returns rightmost element");
|
||||
|
||||
// Now test blocking BRPOP: start blocked client, then RPUSH from another client
|
||||
let c1 = connect(port).await;
|
||||
let mut c2 = connect(port).await;
|
||||
|
||||
// Start BRPOP on c1
|
||||
let brpop_task = tokio::spawn(async move {
|
||||
let mut c1_local = c1;
|
||||
send_cmd(&mut c1_local, &["BRPOP", "q:blockr", "5"]).await
|
||||
});
|
||||
|
||||
// Give it time to register waiter
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
// Push from right to wake BRPOP
|
||||
let _ = send_cmd(&mut c2, &["RPUSH", "q:blockr", "X"]).await;
|
||||
|
||||
// Await BRPOP result
|
||||
let brpop_res = brpop_task.await.expect("BRPOP task join");
|
||||
assert_contains(&brpop_res, "q:blockr", "BRPOP returned key");
|
||||
assert_contains(&brpop_res, "X", "BRPOP returned element");
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test_13_dbsize() {
|
||||
let (server, port) = start_test_server("dbsize").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// Initially empty
|
||||
let n0 = send_cmd(&mut s, &["DBSIZE"]).await;
|
||||
assert_contains(&n0, "0", "DBSIZE initial should be 0");
|
||||
|
||||
// Add a string, a hash, and a list -> dbsize = 3
|
||||
let _ = send_cmd(&mut s, &["SET", "s", "v"]).await;
|
||||
let _ = send_cmd(&mut s, &["HSET", "h", "f", "v"]).await;
|
||||
let _ = send_cmd(&mut s, &["LPUSH", "l", "a", "b"]).await;
|
||||
|
||||
let n3 = send_cmd(&mut s, &["DBSIZE"]).await;
|
||||
assert_contains(&n3, "3", "DBSIZE after adding s,h,l should be 3");
|
||||
|
||||
// Expire the string and wait, dbsize should drop to 2
|
||||
let _ = send_cmd(&mut s, &["PEXPIRE", "s", "400"]).await;
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
let n2 = send_cmd(&mut s, &["DBSIZE"]).await;
|
||||
assert_contains(&n2, "2", "DBSIZE after string expiry should be 2");
|
||||
|
||||
// Delete remaining keys and confirm 0
|
||||
let _ = send_cmd(&mut s, &["DEL", "h"]).await;
|
||||
let _ = send_cmd(&mut s, &["DEL", "l"]).await;
|
||||
|
||||
let n_final = send_cmd(&mut s, &["DBSIZE"]).await;
|
||||
assert_contains(&n_final, "0", "DBSIZE after deleting all keys should be 0");
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test_14_expireat_pexpireat() {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
let (server, port) = start_test_server("expireat_suite").await;
|
||||
spawn_listener(server, port).await;
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// EXPIREAT: seconds since epoch
|
||||
let now_secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as i64;
|
||||
let _ = send_cmd(&mut s, &["SET", "exp:at:s", "v"]).await;
|
||||
let exat = send_cmd(&mut s, &["EXPIREAT", "exp:at:s", &format!("{}", now_secs + 1)]).await;
|
||||
assert_contains(&exat, "1", "EXPIREAT exp:at:s now+1s -> 1 (applied)");
|
||||
let ttl1 = send_cmd(&mut s, &["TTL", "exp:at:s"]).await;
|
||||
assert!(
|
||||
ttl1.contains("1") || ttl1.contains("0"),
|
||||
"TTL exp:at:s should be 1 or 0 shortly after EXPIREAT, got: {}",
|
||||
ttl1
|
||||
);
|
||||
sleep(Duration::from_millis(1200)).await;
|
||||
let exists_after_exat = send_cmd(&mut s, &["EXISTS", "exp:at:s"]).await;
|
||||
assert_contains(&exists_after_exat, "0", "EXISTS exp:at:s after EXPIREAT expiry -> 0");
|
||||
|
||||
// PEXPIREAT: milliseconds since epoch
|
||||
let now_ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as i64;
|
||||
let _ = send_cmd(&mut s, &["SET", "exp:at:ms", "v"]).await;
|
||||
let pexat = send_cmd(&mut s, &["PEXPIREAT", "exp:at:ms", &format!("{}", now_ms + 450)]).await;
|
||||
assert_contains(&pexat, "1", "PEXPIREAT exp:at:ms now+450ms -> 1 (applied)");
|
||||
let ttl2 = send_cmd(&mut s, &["TTL", "exp:at:ms"]).await;
|
||||
assert!(
|
||||
ttl2.contains("0") || ttl2.contains("1"),
|
||||
"TTL exp:at:ms should be 0..1 soon after PEXPIREAT, got: {}",
|
||||
ttl2
|
||||
);
|
||||
sleep(Duration::from_millis(600)).await;
|
||||
let exists_after_pexat = send_cmd(&mut s, &["EXISTS", "exp:at:ms"]).await;
|
||||
assert_contains(&exists_after_pexat, "0", "EXISTS exp:at:ms after PEXPIREAT expiry -> 0");
|
||||
}
|
499
src/cmd.rs
499
src/cmd.rs
@@ -1,499 +0,0 @@
|
||||
use crate::{error::DBError, protocol::Protocol, server::Server};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Cmd {
|
||||
Ping,
|
||||
Echo(String),
|
||||
Get(String),
|
||||
Set(String, String),
|
||||
SetPx(String, String, u128),
|
||||
SetEx(String, String, u128),
|
||||
Keys,
|
||||
ConfigGet(String),
|
||||
Info(Option<String>),
|
||||
Del(String),
|
||||
Type(String),
|
||||
Incr(String),
|
||||
Multi,
|
||||
Exec,
|
||||
Discard,
|
||||
// Hash commands
|
||||
HSet(String, Vec<(String, String)>),
|
||||
HGet(String, String),
|
||||
HGetAll(String),
|
||||
HDel(String, Vec<String>),
|
||||
HExists(String, String),
|
||||
HKeys(String),
|
||||
HVals(String),
|
||||
HLen(String),
|
||||
HMGet(String, Vec<String>),
|
||||
HSetNx(String, String, String),
|
||||
Scan(u64, Option<String>, Option<u64>), // cursor, pattern, count
|
||||
Unknow,
|
||||
}
|
||||
|
||||
impl Cmd {
|
||||
pub fn from(s: &str) -> Result<(Self, Protocol), DBError> {
|
||||
let protocol = Protocol::from(s)?;
|
||||
match protocol.clone().0 {
|
||||
Protocol::Array(p) => {
|
||||
let cmd = p.into_iter().map(|x| x.decode()).collect::<Vec<_>>();
|
||||
if cmd.is_empty() {
|
||||
return Err(DBError("cmd length is 0".to_string()));
|
||||
}
|
||||
Ok((
|
||||
match cmd[0].to_lowercase().as_str() {
|
||||
"echo" => Cmd::Echo(cmd[1].clone()),
|
||||
"ping" => Cmd::Ping,
|
||||
"get" => Cmd::Get(cmd[1].clone()),
|
||||
"set" => {
|
||||
if cmd.len() == 5 && cmd[3].to_lowercase() == "px" {
|
||||
Cmd::SetPx(cmd[1].clone(), cmd[2].clone(), cmd[4].parse().unwrap())
|
||||
} else if cmd.len() == 5 && cmd[3].to_lowercase() == "ex" {
|
||||
Cmd::SetEx(cmd[1].clone(), cmd[2].clone(), cmd[4].parse().unwrap())
|
||||
} else if cmd.len() == 3 {
|
||||
Cmd::Set(cmd[1].clone(), cmd[2].clone())
|
||||
} else {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
}
|
||||
"config" => {
|
||||
if cmd.len() != 3 || cmd[1].to_lowercase() != "get" {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
} else {
|
||||
Cmd::ConfigGet(cmd[2].clone())
|
||||
}
|
||||
}
|
||||
"keys" => {
|
||||
if cmd.len() != 2 || cmd[1] != "*" {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
} else {
|
||||
Cmd::Keys
|
||||
}
|
||||
}
|
||||
"info" => {
|
||||
let section = if cmd.len() == 2 {
|
||||
Some(cmd[1].clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Cmd::Info(section)
|
||||
}
|
||||
"del" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
Cmd::Del(cmd[1].clone())
|
||||
}
|
||||
"type" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
Cmd::Type(cmd[1].clone())
|
||||
}
|
||||
"incr" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
Cmd::Incr(cmd[1].clone())
|
||||
}
|
||||
"multi" => {
|
||||
if cmd.len() != 1 {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
Cmd::Multi
|
||||
}
|
||||
"exec" => {
|
||||
if cmd.len() != 1 {
|
||||
return Err(DBError(format!("unsupported cmd {:?}", cmd)));
|
||||
}
|
||||
Cmd::Exec
|
||||
}
|
||||
"discard" => Cmd::Discard,
|
||||
// Hash commands
|
||||
"hset" => {
|
||||
if cmd.len() < 4 || (cmd.len() - 2) % 2 != 0 {
|
||||
return Err(DBError(format!("wrong number of arguments for HSET command")));
|
||||
}
|
||||
let mut pairs = Vec::new();
|
||||
let mut i = 2;
|
||||
while i < cmd.len() - 1 {
|
||||
pairs.push((cmd[i].clone(), cmd[i + 1].clone()));
|
||||
i += 2;
|
||||
}
|
||||
Cmd::HSet(cmd[1].clone(), pairs)
|
||||
}
|
||||
"hget" => {
|
||||
if cmd.len() != 3 {
|
||||
return Err(DBError(format!("wrong number of arguments for HGET command")));
|
||||
}
|
||||
Cmd::HGet(cmd[1].clone(), cmd[2].clone())
|
||||
}
|
||||
"hgetall" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("wrong number of arguments for HGETALL command")));
|
||||
}
|
||||
Cmd::HGetAll(cmd[1].clone())
|
||||
}
|
||||
"hdel" => {
|
||||
if cmd.len() < 3 {
|
||||
return Err(DBError(format!("wrong number of arguments for HDEL command")));
|
||||
}
|
||||
Cmd::HDel(cmd[1].clone(), cmd[2..].to_vec())
|
||||
}
|
||||
"hexists" => {
|
||||
if cmd.len() != 3 {
|
||||
return Err(DBError(format!("wrong number of arguments for HEXISTS command")));
|
||||
}
|
||||
Cmd::HExists(cmd[1].clone(), cmd[2].clone())
|
||||
}
|
||||
"hkeys" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("wrong number of arguments for HKEYS command")));
|
||||
}
|
||||
Cmd::HKeys(cmd[1].clone())
|
||||
}
|
||||
"hvals" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("wrong number of arguments for HVALS command")));
|
||||
}
|
||||
Cmd::HVals(cmd[1].clone())
|
||||
}
|
||||
"hlen" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError(format!("wrong number of arguments for HLEN command")));
|
||||
}
|
||||
Cmd::HLen(cmd[1].clone())
|
||||
}
|
||||
"hmget" => {
|
||||
if cmd.len() < 3 {
|
||||
return Err(DBError(format!("wrong number of arguments for HMGET command")));
|
||||
}
|
||||
Cmd::HMGet(cmd[1].clone(), cmd[2..].to_vec())
|
||||
}
|
||||
"hsetnx" => {
|
||||
if cmd.len() != 4 {
|
||||
return Err(DBError(format!("wrong number of arguments for HSETNX command")));
|
||||
}
|
||||
Cmd::HSetNx(cmd[1].clone(), cmd[2].clone(), cmd[3].clone())
|
||||
}
|
||||
"scan" => {
|
||||
if cmd.len() < 2 {
|
||||
return Err(DBError(format!("wrong number of arguments for SCAN command")));
|
||||
}
|
||||
|
||||
let cursor = cmd[1].parse::<u64>().map_err(|_|
|
||||
DBError("ERR invalid cursor".to_string()))?;
|
||||
|
||||
let mut pattern = None;
|
||||
let mut count = None;
|
||||
let mut i = 2;
|
||||
|
||||
while i < cmd.len() {
|
||||
match cmd[i].to_lowercase().as_str() {
|
||||
"match" => {
|
||||
if i + 1 >= cmd.len() {
|
||||
return Err(DBError("ERR syntax error".to_string()));
|
||||
}
|
||||
pattern = Some(cmd[i + 1].clone());
|
||||
i += 2;
|
||||
}
|
||||
"count" => {
|
||||
if i + 1 >= cmd.len() {
|
||||
return Err(DBError("ERR syntax error".to_string()));
|
||||
}
|
||||
count = Some(cmd[i + 1].parse::<u64>().map_err(|_|
|
||||
DBError("ERR value is not an integer or out of range".to_string()))?);
|
||||
i += 2;
|
||||
}
|
||||
_ => {
|
||||
return Err(DBError(format!("ERR syntax error")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Cmd::Scan(cursor, pattern, count)
|
||||
}
|
||||
_ => Cmd::Unknow,
|
||||
},
|
||||
protocol.0,
|
||||
))
|
||||
}
|
||||
_ => Err(DBError(format!(
|
||||
"fail to parse as cmd for {:?}",
|
||||
protocol.0
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
&self,
|
||||
server: &Server,
|
||||
protocol: Protocol,
|
||||
queued_cmd: &mut Option<Vec<(Cmd, Protocol)>>,
|
||||
) -> Result<Protocol, DBError> {
|
||||
// Handle queued commands for transactions
|
||||
if queued_cmd.is_some()
|
||||
&& !matches!(self, Cmd::Exec)
|
||||
&& !matches!(self, Cmd::Multi)
|
||||
&& !matches!(self, Cmd::Discard)
|
||||
{
|
||||
queued_cmd
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push((self.clone(), protocol.clone()));
|
||||
return Ok(Protocol::SimpleString("QUEUED".to_string()));
|
||||
}
|
||||
|
||||
match self {
|
||||
Cmd::Ping => Ok(Protocol::SimpleString("PONG".to_string())),
|
||||
Cmd::Echo(s) => Ok(Protocol::SimpleString(s.clone())),
|
||||
Cmd::Get(k) => get_cmd(server, k).await,
|
||||
Cmd::Set(k, v) => set_cmd(server, k, v).await,
|
||||
Cmd::SetPx(k, v, x) => set_px_cmd(server, k, v, x).await,
|
||||
Cmd::SetEx(k, v, x) => set_ex_cmd(server, k, v, x).await,
|
||||
Cmd::Del(k) => del_cmd(server, k).await,
|
||||
Cmd::ConfigGet(name) => config_get_cmd(name, server),
|
||||
Cmd::Keys => keys_cmd(server).await,
|
||||
Cmd::Info(section) => info_cmd(section),
|
||||
Cmd::Type(k) => type_cmd(server, k).await,
|
||||
Cmd::Incr(key) => incr_cmd(server, key).await,
|
||||
Cmd::Multi => {
|
||||
*queued_cmd = Some(Vec::<(Cmd, Protocol)>::new());
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
Cmd::Exec => exec_cmd(queued_cmd, server).await,
|
||||
Cmd::Discard => {
|
||||
if queued_cmd.is_some() {
|
||||
*queued_cmd = None;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
} else {
|
||||
Ok(Protocol::err("ERR DISCARD without MULTI"))
|
||||
}
|
||||
}
|
||||
// Hash commands
|
||||
Cmd::HSet(key, pairs) => hset_cmd(server, key, pairs).await,
|
||||
Cmd::HGet(key, field) => hget_cmd(server, key, field).await,
|
||||
Cmd::HGetAll(key) => hgetall_cmd(server, key).await,
|
||||
Cmd::HDel(key, fields) => hdel_cmd(server, key, fields).await,
|
||||
Cmd::HExists(key, field) => hexists_cmd(server, key, field).await,
|
||||
Cmd::HKeys(key) => hkeys_cmd(server, key).await,
|
||||
Cmd::HVals(key) => hvals_cmd(server, key).await,
|
||||
Cmd::HLen(key) => hlen_cmd(server, key).await,
|
||||
Cmd::HMGet(key, fields) => hmget_cmd(server, key, fields).await,
|
||||
Cmd::HSetNx(key, field, value) => hsetnx_cmd(server, key, field, value).await,
|
||||
Cmd::Scan(cursor, pattern, count) => scan_cmd(server, cursor, pattern.as_deref(), count).await,
|
||||
Cmd::Unknow => Ok(Protocol::err("unknown cmd")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec_cmd(
|
||||
queued_cmd: &mut Option<Vec<(Cmd, Protocol)>>,
|
||||
server: &Server,
|
||||
) -> Result<Protocol, DBError> {
|
||||
if queued_cmd.is_some() {
|
||||
let mut vec = Vec::new();
|
||||
for (cmd, protocol) in queued_cmd.as_ref().unwrap() {
|
||||
let res = Box::pin(cmd.run(server, protocol.clone(), &mut None)).await?;
|
||||
vec.push(res);
|
||||
}
|
||||
*queued_cmd = None;
|
||||
Ok(Protocol::Array(vec))
|
||||
} else {
|
||||
Ok(Protocol::err("ERR EXEC without MULTI"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn incr_cmd(server: &Server, key: &String) -> Result<Protocol, DBError> {
|
||||
let current_value = server.storage.get(key)?;
|
||||
|
||||
let new_value = match current_value {
|
||||
Some(v) => {
|
||||
match v.parse::<i64>() {
|
||||
Ok(num) => num + 1,
|
||||
Err(_) => return Ok(Protocol::err("ERR value is not an integer or out of range")),
|
||||
}
|
||||
}
|
||||
None => 1,
|
||||
};
|
||||
|
||||
server.storage.set(key.clone(), new_value.to_string())?;
|
||||
Ok(Protocol::SimpleString(new_value.to_string()))
|
||||
}
|
||||
|
||||
fn config_get_cmd(name: &String, server: &Server) -> Result<Protocol, DBError> {
|
||||
match name.as_str() {
|
||||
"dir" => Ok(Protocol::Array(vec![
|
||||
Protocol::BulkString(name.clone()),
|
||||
Protocol::BulkString(server.option.dir.clone()),
|
||||
])),
|
||||
"dbfilename" => Ok(Protocol::Array(vec![
|
||||
Protocol::BulkString(name.clone()),
|
||||
Protocol::BulkString("herodb.redb".to_string()),
|
||||
])),
|
||||
_ => Err(DBError(format!("unsupported config {:?}", name))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn keys_cmd(server: &Server) -> Result<Protocol, DBError> {
|
||||
let keys = server.storage.keys("*")?;
|
||||
Ok(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn info_cmd(section: &Option<String>) -> Result<Protocol, DBError> {
|
||||
match section {
|
||||
Some(s) => match s.as_str() {
|
||||
"replication" => Ok(Protocol::BulkString(
|
||||
"role:master\nmaster_replid:8371b4fb1155b71f4a04d3e1bc3e18c4a990aeea\nmaster_repl_offset:0\n".to_string()
|
||||
)),
|
||||
_ => Err(DBError(format!("unsupported section {:?}", s))),
|
||||
},
|
||||
None => Ok(Protocol::BulkString("# Server\nredis_version:7.0.0\n".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn type_cmd(server: &Server, k: &String) -> Result<Protocol, DBError> {
|
||||
match server.storage.get_key_type(k)? {
|
||||
Some(type_str) => Ok(Protocol::SimpleString(type_str)),
|
||||
None => Ok(Protocol::SimpleString("none".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn del_cmd(server: &Server, k: &str) -> Result<Protocol, DBError> {
|
||||
server.storage.del(k.to_string())?;
|
||||
Ok(Protocol::SimpleString("1".to_string()))
|
||||
}
|
||||
|
||||
async fn set_ex_cmd(
|
||||
server: &Server,
|
||||
k: &str,
|
||||
v: &str,
|
||||
x: &u128,
|
||||
) -> Result<Protocol, DBError> {
|
||||
server.storage.setx(k.to_string(), v.to_string(), *x * 1000)?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
async fn set_px_cmd(
|
||||
server: &Server,
|
||||
k: &str,
|
||||
v: &str,
|
||||
x: &u128,
|
||||
) -> Result<Protocol, DBError> {
|
||||
server.storage.setx(k.to_string(), v.to_string(), *x)?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
async fn set_cmd(server: &Server, k: &str, v: &str) -> Result<Protocol, DBError> {
|
||||
server.storage.set(k.to_string(), v.to_string())?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
async fn get_cmd(server: &Server, k: &str) -> Result<Protocol, DBError> {
|
||||
let v = server.storage.get(k)?;
|
||||
Ok(v.map_or(Protocol::Null, Protocol::BulkString))
|
||||
}
|
||||
|
||||
// Hash command implementations
|
||||
async fn hset_cmd(server: &Server, key: &str, pairs: &[(String, String)]) -> Result<Protocol, DBError> {
|
||||
let new_fields = server.storage.hset(key, pairs)?;
|
||||
Ok(Protocol::SimpleString(new_fields.to_string()))
|
||||
}
|
||||
|
||||
async fn hget_cmd(server: &Server, key: &str, field: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hget(key, field) {
|
||||
Ok(Some(value)) => Ok(Protocol::BulkString(value)),
|
||||
Ok(None) => Ok(Protocol::Null),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hgetall_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hgetall(key) {
|
||||
Ok(pairs) => {
|
||||
let mut result = Vec::new();
|
||||
for (field, value) in pairs {
|
||||
result.push(Protocol::BulkString(field));
|
||||
result.push(Protocol::BulkString(value));
|
||||
}
|
||||
Ok(Protocol::Array(result))
|
||||
}
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hdel_cmd(server: &Server, key: &str, fields: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.hdel(key, fields) {
|
||||
Ok(deleted) => Ok(Protocol::SimpleString(deleted.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hexists_cmd(server: &Server, key: &str, field: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hexists(key, field) {
|
||||
Ok(exists) => Ok(Protocol::SimpleString(if exists { "1" } else { "0" }.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hkeys_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hkeys(key) {
|
||||
Ok(keys) => Ok(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
)),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hvals_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hvals(key) {
|
||||
Ok(values) => Ok(Protocol::Array(
|
||||
values.into_iter().map(Protocol::BulkString).collect(),
|
||||
)),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hlen_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hlen(key) {
|
||||
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hmget_cmd(server: &Server, key: &str, fields: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.hmget(key, fields) {
|
||||
Ok(values) => {
|
||||
let result: Vec<Protocol> = values
|
||||
.into_iter()
|
||||
.map(|v| v.map_or(Protocol::Null, Protocol::BulkString))
|
||||
.collect();
|
||||
Ok(Protocol::Array(result))
|
||||
}
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hsetnx_cmd(server: &Server, key: &str, field: &str, value: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hsetnx(key, field, value) {
|
||||
Ok(was_set) => Ok(Protocol::SimpleString(if was_set { "1" } else { "0" }.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn scan_cmd(server: &Server, cursor: &u64, pattern: Option<&str>, count: &Option<u64>) -> Result<Protocol, DBError> {
|
||||
match server.storage.scan(*cursor, pattern, *count) {
|
||||
Ok((next_cursor, keys)) => {
|
||||
let mut result = Vec::new();
|
||||
result.push(Protocol::BulkString(next_cursor.to_string()));
|
||||
result.push(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
));
|
||||
Ok(Protocol::Array(result))
|
||||
}
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
mod cmd;
|
||||
pub mod error;
|
||||
pub mod options;
|
||||
mod protocol;
|
||||
pub mod server;
|
||||
mod storage;
|
@@ -1,5 +0,0 @@
|
||||
#[derive(Clone)]
|
||||
pub struct DBOption {
|
||||
pub dir: String,
|
||||
pub port: u16,
|
||||
}
|
176
src/protocol.rs
176
src/protocol.rs
@@ -1,176 +0,0 @@
|
||||
use core::fmt;
|
||||
|
||||
use crate::error::DBError;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Protocol {
|
||||
SimpleString(String),
|
||||
BulkString(String),
|
||||
Null,
|
||||
Array(Vec<Protocol>),
|
||||
}
|
||||
|
||||
impl fmt::Display for Protocol {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.decode().as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
pub fn from(protocol: &str) -> Result<(Self, usize), DBError> {
|
||||
let ret = match protocol.chars().nth(0) {
|
||||
Some('+') => Self::parse_simple_string_sfx(&protocol[1..]),
|
||||
Some('$') => Self::parse_bulk_string_sfx(&protocol[1..]),
|
||||
Some('*') => Self::parse_array_sfx(&protocol[1..]),
|
||||
_ => Err(DBError(format!(
|
||||
"[from] unsupported protocol: {:?}",
|
||||
protocol
|
||||
))),
|
||||
};
|
||||
match ret {
|
||||
Ok((p, s)) => Ok((p, s + 1)),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_vec(array: Vec<&str>) -> Self {
|
||||
let array = array
|
||||
.into_iter()
|
||||
.map(|x| Protocol::BulkString(x.to_string()))
|
||||
.collect();
|
||||
Protocol::Array(array)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ok() -> Self {
|
||||
Protocol::SimpleString("ok".to_string())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn err(msg: &str) -> Self {
|
||||
Protocol::SimpleString(msg.to_string())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write_on_slave_err() -> Self {
|
||||
Self::err("DISALLOW WRITE ON SLAVE")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn psync_on_slave_err() -> Self {
|
||||
Self::err("PSYNC ON SLAVE IS NOT ALLOWED")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn none() -> Self {
|
||||
Self::SimpleString("none".to_string())
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> String {
|
||||
match self {
|
||||
Protocol::SimpleString(s) => s.to_string(),
|
||||
Protocol::BulkString(s) => s.to_string(),
|
||||
Protocol::Null => "".to_string(),
|
||||
Protocol::Array(s) => s.iter().map(|x| x.decode()).collect::<Vec<_>>().join(" "),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> String {
|
||||
match self {
|
||||
Protocol::SimpleString(s) => format!("+{}\r\n", s),
|
||||
Protocol::BulkString(s) => format!("${}\r\n{}\r\n", s.len(), s),
|
||||
Protocol::Array(ss) => {
|
||||
format!("*{}\r\n", ss.len())
|
||||
+ ss.iter()
|
||||
.map(|x| x.encode())
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
.as_str()
|
||||
}
|
||||
Protocol::Null => "$-1\r\n".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_simple_string_sfx(protocol: &str) -> Result<(Self, usize), DBError> {
|
||||
match protocol.find("\r\n") {
|
||||
Some(x) => Ok((Self::SimpleString(protocol[..x].to_string()), x + 2)),
|
||||
_ => Err(DBError(format!(
|
||||
"[new simple string] unsupported protocol: {:?}",
|
||||
protocol
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_bulk_string_sfx(protocol: &str) -> Result<(Self, usize), DBError> {
|
||||
if let Some(len) = protocol.find("\r\n") {
|
||||
let size = Self::parse_usize(&protocol[..len])?;
|
||||
if let Some(data_len) = protocol[len + 2..].find("\r\n") {
|
||||
let s = Self::parse_string(&protocol[len + 2..len + 2 + data_len])?;
|
||||
if size != s.len() {
|
||||
Err(DBError(format!(
|
||||
"[new bulk string] unmatched string length in prototocl {:?}",
|
||||
protocol,
|
||||
)))
|
||||
} else {
|
||||
Ok((
|
||||
Protocol::BulkString(s.to_lowercase()),
|
||||
len + 2 + data_len + 2,
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(DBError(format!(
|
||||
"[new bulk string] unsupported protocol: {:?}",
|
||||
protocol
|
||||
)))
|
||||
}
|
||||
} else {
|
||||
Err(DBError(format!(
|
||||
"[new bulk string] unsupported protocol: {:?}",
|
||||
protocol
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_array_sfx(s: &str) -> Result<(Self, usize), DBError> {
|
||||
let mut offset = 0;
|
||||
match s.find("\r\n") {
|
||||
Some(x) => {
|
||||
let array_len = s[..x].parse::<usize>()?;
|
||||
offset += x + 2;
|
||||
let mut vec = vec![];
|
||||
for _ in 0..array_len {
|
||||
match Protocol::from(&s[offset..]) {
|
||||
Ok((p, len)) => {
|
||||
offset += len;
|
||||
vec.push(p);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((Protocol::Array(vec), offset))
|
||||
}
|
||||
_ => Err(DBError(format!(
|
||||
"[new array] unsupported protocol: {:?}",
|
||||
s
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_usize(protocol: &str) -> Result<usize, DBError> {
|
||||
match protocol.len() {
|
||||
0 => Err(DBError(format!("parse usize error: {:?}", protocol))),
|
||||
_ => Ok(protocol
|
||||
.parse::<usize>()
|
||||
.map_err(|_| DBError(format!("parse usize error: {}", protocol)))?),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_string(protocol: &str) -> Result<String, DBError> {
|
||||
match protocol.len() {
|
||||
0 => Err(DBError(format!("parse usize error: {:?}", protocol))),
|
||||
_ => Ok(protocol.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,68 +0,0 @@
|
||||
use core::str;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use crate::cmd::Cmd;
|
||||
use crate::error::DBError;
|
||||
use crate::options;
|
||||
use crate::protocol::Protocol;
|
||||
use crate::storage::Storage;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
pub storage: Arc<Storage>,
|
||||
pub option: options::DBOption,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub async fn new(option: options::DBOption) -> Self {
|
||||
// Create database file path with fixed filename
|
||||
let db_file_path = PathBuf::from(option.dir.clone()).join("herodb.redb");
|
||||
println!("will open db file path: {}", db_file_path.display());
|
||||
|
||||
// Initialize storage with redb
|
||||
let storage = Storage::new(db_file_path).expect("Failed to initialize storage");
|
||||
|
||||
Server {
|
||||
storage: Arc::new(storage),
|
||||
option,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
&mut self,
|
||||
mut stream: tokio::net::TcpStream,
|
||||
) -> Result<(), DBError> {
|
||||
let mut buf = [0; 512];
|
||||
let mut queued_cmd: Option<Vec<(Cmd, Protocol)>> = None;
|
||||
|
||||
loop {
|
||||
if let Ok(len) = stream.read(&mut buf).await {
|
||||
if len == 0 {
|
||||
println!("[handle] connection closed");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let s = str::from_utf8(&buf[..len])?;
|
||||
let (cmd, protocol) =
|
||||
Cmd::from(s).unwrap_or((Cmd::Unknow, Protocol::err("unknow cmd")));
|
||||
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
|
||||
|
||||
let res = cmd
|
||||
.run(self, protocol, &mut queued_cmd)
|
||||
.await
|
||||
.unwrap_or(Protocol::err("unknow cmd"));
|
||||
print!("queued cmd {:?}", queued_cmd);
|
||||
|
||||
println!("going to send response {}", res.encode());
|
||||
_ = stream.write(res.encode().as_bytes()).await?;
|
||||
} else {
|
||||
println!("[handle] going to break");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
509
src/storage.rs
509
src/storage.rs
@@ -1,509 +0,0 @@
|
||||
use std::{
|
||||
path::Path,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use redb::{Database, Error, ReadableTable, Table, TableDefinition, WriteTransaction, ReadTransaction};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::DBError;
|
||||
|
||||
// Table definitions for different Redis data types
|
||||
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
|
||||
const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("strings");
|
||||
const HASHES_TABLE: TableDefinition<(&str, &str), &str> = TableDefinition::new("hashes");
|
||||
const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta");
|
||||
const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data");
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StringValue {
|
||||
pub value: String,
|
||||
pub expires_at_ms: Option<u128>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StreamEntry {
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn now_in_millis() -> u128 {
|
||||
let start = SystemTime::now();
|
||||
let duration_since_epoch = start.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration_since_epoch.as_millis()
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
db: Database,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(path: impl AsRef<Path>) -> Result<Self, DBError> {
|
||||
let db = Database::create(path)?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(TYPES_TABLE)?;
|
||||
let _ = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let _ = write_txn.open_table(HASHES_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
Ok(Storage { db })
|
||||
}
|
||||
|
||||
pub fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match table.get(key)? {
|
||||
Some(type_val) => Ok(Some(type_val.value().to_string())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
// Check if key exists and is of string type
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
let strings_table = read_txn.open_table(STRINGS_TABLE)?;
|
||||
match strings_table.get(key)? {
|
||||
Some(data) => {
|
||||
let string_value: StringValue = bincode::deserialize(data.value())?;
|
||||
|
||||
// Check if expired
|
||||
if let Some(expires_at) = string_value.expires_at_ms {
|
||||
if now_in_millis() > expires_at {
|
||||
// Key expired, remove it
|
||||
drop(read_txn);
|
||||
self.del(key.to_string())?;
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(string_value.value))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set(&self, key: String, value: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let string_value = StringValue {
|
||||
value,
|
||||
expires_at_ms: None,
|
||||
};
|
||||
let serialized = bincode::serialize(&string_value)?;
|
||||
strings_table.insert(key.as_str(), serialized.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let string_value = StringValue {
|
||||
value,
|
||||
expires_at_ms: Some(expire_ms + now_in_millis()),
|
||||
};
|
||||
let serialized = bincode::serialize(&string_value)?;
|
||||
strings_table.insert(key.as_str(), serialized.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn del(&self, key: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
// Remove from type table
|
||||
types_table.remove(key.as_str())?;
|
||||
|
||||
// Remove from strings table
|
||||
strings_table.remove(key.as_str())?;
|
||||
|
||||
// Remove all hash fields for this key
|
||||
let mut to_remove = Vec::new();
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key.as_str() {
|
||||
to_remove.push((hash_key.to_string(), field.to_string()));
|
||||
}
|
||||
}
|
||||
drop(iter);
|
||||
|
||||
for (hash_key, field) in to_remove {
|
||||
hashes_table.remove((hash_key.as_str(), field.as_str()))?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let mut keys = Vec::new();
|
||||
let mut iter = table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let key = entry?.0.value().to_string();
|
||||
if pattern == "*" || key.contains(pattern) {
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
// Hash operations
|
||||
pub fn hset(&self, key: &str, pairs: &[(String, String)]) -> Result<u64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut new_fields = 0u64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
// Check if key exists and is of correct type
|
||||
let existing_type = match types_table.get(key)? {
|
||||
Some(type_val) => Some(type_val.value().to_string()),
|
||||
None => None,
|
||||
};
|
||||
|
||||
match existing_type {
|
||||
Some(ref type_str) if type_str != "hash" => {
|
||||
return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string()));
|
||||
}
|
||||
None => {
|
||||
// Set type to hash
|
||||
types_table.insert(key, "hash")?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
for (field, value) in pairs {
|
||||
let existed = hashes_table.get((key, field.as_str()))?.is_some();
|
||||
hashes_table.insert((key, field.as_str()), value.as_str())?;
|
||||
if !existed {
|
||||
new_fields += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(new_fields)
|
||||
}
|
||||
|
||||
pub fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
// Check type
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
match hashes_table.get((key, field))? {
|
||||
Some(value) => Ok(Some(value.value().to_string())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
// Check type
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
let value = entry.1.value();
|
||||
if hash_key == key {
|
||||
result.push((field.to_string(), value.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hdel(&self, key: &str, fields: &[String]) -> Result<u64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut deleted = 0u64;
|
||||
|
||||
{
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = types_table.get(key)?;
|
||||
match key_type {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
for field in fields {
|
||||
if hashes_table.remove((key, field.as_str()))?.is_some() {
|
||||
deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
pub fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
Ok(hashes_table.get((key, field))?.is_some())
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key {
|
||||
result.push(field.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
let value = entry.1.value();
|
||||
if hash_key == key {
|
||||
result.push(value.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hlen(&self, key: &str) -> Result<u64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut count = 0u64;
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hmget(&self, key: &str, fields: &[String]) -> Result<Vec<Option<String>>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "hash" => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
for field in fields {
|
||||
match hashes_table.get((key, field.as_str()))? {
|
||||
Some(value) => result.push(Some(value.value().to_string())),
|
||||
None => result.push(None),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(fields.iter().map(|_| None).collect()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = false;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
// Check if key exists and is of correct type
|
||||
let existing_type = match types_table.get(key)? {
|
||||
Some(type_val) => Some(type_val.value().to_string()),
|
||||
None => None,
|
||||
};
|
||||
|
||||
match existing_type {
|
||||
Some(ref type_str) if type_str != "hash" => {
|
||||
return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string()));
|
||||
}
|
||||
None => {
|
||||
// Set type to hash
|
||||
types_table.insert(key, "hash")?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Check if field already exists
|
||||
if hashes_table.get((key, field))?.is_none() {
|
||||
hashes_table.insert((key, field), value)?;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<String>), DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let count = count.unwrap_or(10); // Default count is 10
|
||||
let mut keys = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let mut returned_keys = 0u64;
|
||||
|
||||
let mut iter = table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let key = entry?.0.value().to_string();
|
||||
|
||||
// Skip keys until we reach the cursor position
|
||||
if current_cursor < cursor {
|
||||
current_cursor += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if key matches pattern
|
||||
let matches = match pattern {
|
||||
Some(pat) => {
|
||||
if pat == "*" {
|
||||
true
|
||||
} else if pat.contains('*') {
|
||||
// Simple glob pattern matching
|
||||
let pattern_parts: Vec<&str> = pat.split('*').collect();
|
||||
if pattern_parts.len() == 2 {
|
||||
let prefix = pattern_parts[0];
|
||||
let suffix = pattern_parts[1];
|
||||
key.starts_with(prefix) && key.ends_with(suffix)
|
||||
} else {
|
||||
key.contains(&pat.replace('*', ""))
|
||||
}
|
||||
} else {
|
||||
key.contains(pat)
|
||||
}
|
||||
}
|
||||
None => true,
|
||||
};
|
||||
|
||||
if matches {
|
||||
keys.push(key);
|
||||
returned_keys += 1;
|
||||
|
||||
// Stop if we've returned enough keys
|
||||
if returned_keys >= count {
|
||||
current_cursor += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current_cursor += 1;
|
||||
}
|
||||
|
||||
// If we've reached the end of iteration, return cursor 0 to indicate completion
|
||||
let next_cursor = if returned_keys < count { 0 } else { current_cursor };
|
||||
|
||||
Ok((next_cursor, keys))
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user