Compare commits
1 Commits
main
...
453e86edd2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
453e86edd2 |
1860
Cargo.lock
generated
1860
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
|||||||
[workspace]
|
|
||||||
members = [
|
|
||||||
"heromodels",
|
|
||||||
"heromodels_core",
|
|
||||||
"heromodels-derive",
|
|
||||||
]
|
|
||||||
246
do.sql
246
do.sql
@@ -1,246 +0,0 @@
|
|||||||
-- --------------------------------------------------------------
|
|
||||||
-- do.sql – create tables for HeroLedger models (PostgreSQL)
|
|
||||||
-- --------------------------------------------------------------
|
|
||||||
BEGIN;
|
|
||||||
|
|
||||||
-- 1. DNSZONE
|
|
||||||
CREATE TABLE dnszone (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
domain TEXT, -- @[index]
|
|
||||||
administrators INTEGER[], -- array of user ids
|
|
||||||
status TEXT,
|
|
||||||
metadata JSONB,
|
|
||||||
soarecord JSONB, -- store array of SOARecord structs as JSONB
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_dnszone_domain ON dnszone(domain);
|
|
||||||
|
|
||||||
-- 2. DNSRECORD
|
|
||||||
CREATE TABLE dnsrecord (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
subdomain TEXT,
|
|
||||||
record_type TEXT,
|
|
||||||
value TEXT,
|
|
||||||
priority INTEGER,
|
|
||||||
ttl INTEGER,
|
|
||||||
is_active BOOLEAN,
|
|
||||||
cat TEXT,
|
|
||||||
is_wildcard BOOLEAN,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
-- No explicit index required – rarely queried alone
|
|
||||||
|
|
||||||
-- 3. GROUP
|
|
||||||
CREATE TABLE "group" (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
dnsrecords INTEGER[], -- FK → dnsrecord.id (array)
|
|
||||||
administrators INTEGER[],
|
|
||||||
config JSONB, -- embedded GroupConfig struct
|
|
||||||
status TEXT,
|
|
||||||
visibility TEXT,
|
|
||||||
created_ts BIGINT,
|
|
||||||
updated_ts BIGINT,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX idx_group_name ON "group"(name);
|
|
||||||
|
|
||||||
-- 4. USER_GROUP_MEMBERSHIP
|
|
||||||
CREATE TABLE user_group_membership (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
user_id INTEGER NOT NULL,
|
|
||||||
group_ids INTEGER[], -- array of group ids
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_ugm_user_id ON user_group_membership(user_id);
|
|
||||||
CREATE INDEX idx_ugm_group_ids ON user_group_membership USING GIN (group_ids);
|
|
||||||
|
|
||||||
-- 5. MEMBER (circle/member.v)
|
|
||||||
CREATE TABLE member (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
user_id INTEGER NOT NULL,
|
|
||||||
role TEXT,
|
|
||||||
status TEXT,
|
|
||||||
joined_at BIGINT,
|
|
||||||
invited_by INTEGER,
|
|
||||||
permissions TEXT[],
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_member_user_id ON member(user_id);
|
|
||||||
|
|
||||||
-- 6. ACCOUNT
|
|
||||||
CREATE TABLE account (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
owner_id INTEGER,
|
|
||||||
address TEXT NOT NULL,
|
|
||||||
balance DOUBLE PRECISION,
|
|
||||||
currency TEXT,
|
|
||||||
assetid INTEGER,
|
|
||||||
last_activity BIGINT,
|
|
||||||
administrators INTEGER[],
|
|
||||||
accountpolicy INTEGER,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX idx_account_address ON account(address);
|
|
||||||
CREATE INDEX idx_account_assetid ON account(assetid);
|
|
||||||
|
|
||||||
-- 7. ASSET
|
|
||||||
CREATE TABLE asset (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
address TEXT NOT NULL,
|
|
||||||
assetid INTEGER NOT NULL,
|
|
||||||
asset_type TEXT,
|
|
||||||
issuer INTEGER,
|
|
||||||
supply DOUBLE PRECISION,
|
|
||||||
decimals SMALLINT,
|
|
||||||
is_frozen BOOLEAN,
|
|
||||||
metadata JSONB,
|
|
||||||
administrators INTEGER[],
|
|
||||||
min_signatures INTEGER,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX idx_asset_address ON asset(address);
|
|
||||||
CREATE UNIQUE INDEX idx_asset_assetid ON asset(assetid);
|
|
||||||
CREATE INDEX idx_asset_issuer ON asset(issuer);
|
|
||||||
|
|
||||||
-- 8. ACCOUNT_POLICY (holds three AccountPolicyItem JSONB blobs)
|
|
||||||
CREATE TABLE account_policy (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
transferpolicy JSONB,
|
|
||||||
adminpolicy JSONB,
|
|
||||||
clawbackpolicy JSONB,
|
|
||||||
freezepolicy JSONB,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
-- 9. ACCOUNT_POLICY_ITEM (stand‑alone if you ever need a table)
|
|
||||||
-- (optional – we store it as JSONB inside account_policy, so not created)
|
|
||||||
|
|
||||||
-- 10. TRANSACTION
|
|
||||||
CREATE TABLE transaction (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
txid INTEGER NOT NULL,
|
|
||||||
source INTEGER,
|
|
||||||
destination INTEGER,
|
|
||||||
assetid INTEGER,
|
|
||||||
amount DOUBLE PRECISION,
|
|
||||||
timestamp BIGINT,
|
|
||||||
status TEXT,
|
|
||||||
memo TEXT,
|
|
||||||
tx_type TEXT,
|
|
||||||
signatures JSONB, -- array of Signature JSON objects
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX idx_transaction_txid ON transaction(txid);
|
|
||||||
CREATE INDEX idx_transaction_source ON transaction(source);
|
|
||||||
CREATE INDEX idx_transaction_destination ON transaction(destination);
|
|
||||||
CREATE INDEX idx_transaction_assetid ON transaction(assetid);
|
|
||||||
|
|
||||||
-- 11. SIGNATURE
|
|
||||||
CREATE TABLE signature (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
signature_id INTEGER NOT NULL,
|
|
||||||
user_id INTEGER NOT NULL,
|
|
||||||
value TEXT,
|
|
||||||
objectid INTEGER,
|
|
||||||
objecttype TEXT,
|
|
||||||
status TEXT,
|
|
||||||
timestamp BIGINT,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_signature_signature_id ON signature(signature_id);
|
|
||||||
CREATE INDEX idx_signature_user_id ON signature(user_id);
|
|
||||||
CREATE INDEX idx_signature_objectid ON signature(objectid);
|
|
||||||
|
|
||||||
-- 12. USER_KVS
|
|
||||||
CREATE TABLE user_kvs (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
userid INTEGER NOT NULL,
|
|
||||||
name TEXT,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_userkvs_userid ON user_kvs(userid);
|
|
||||||
|
|
||||||
-- 13. USER_KVS_ITEM
|
|
||||||
CREATE TABLE user_kvs_item (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
userkvs_id INTEGER NOT NULL,
|
|
||||||
key TEXT NOT NULL,
|
|
||||||
value TEXT,
|
|
||||||
secretbox JSONB,
|
|
||||||
timestamp BIGINT,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_userkvs_item_userkvs_id ON user_kvs_item(userkvs_id);
|
|
||||||
CREATE INDEX idx_userkvs_item_key ON user_kvs_item(key);
|
|
||||||
|
|
||||||
-- 14. USER
|
|
||||||
CREATE TABLE "user" (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
created BIGINT,
|
|
||||||
updated BIGINT,
|
|
||||||
deleted BOOLEAN,
|
|
||||||
version INTEGER,
|
|
||||||
username TEXT NOT NULL,
|
|
||||||
pubkey TEXT NOT NULL,
|
|
||||||
email TEXT[] NOT NULL,
|
|
||||||
status TEXT,
|
|
||||||
userprofile JSONB,
|
|
||||||
kyc JSONB,
|
|
||||||
data JSONB NOT NULL
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX idx_user_username ON "user"(username);
|
|
||||||
CREATE UNIQUE INDEX idx_user_pubkey ON "user"(pubkey);
|
|
||||||
-- Email array index – use GIN for fast containment queries
|
|
||||||
CREATE INDEX idx_user_email ON "user" USING GIN (email);
|
|
||||||
|
|
||||||
COMMIT;
|
|
||||||
48
heromodels/Cargo.lock
generated
48
heromodels/Cargo.lock
generated
@@ -60,7 +60,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -233,14 +233,6 @@ dependencies = [
|
|||||||
"typenum",
|
"typenum",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "derive"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"quote",
|
|
||||||
"syn 1.0.109",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "digest"
|
name = "digest"
|
||||||
version = "0.10.7"
|
version = "0.10.7"
|
||||||
@@ -300,7 +292,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -387,7 +379,6 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"chrono",
|
"chrono",
|
||||||
"derive",
|
|
||||||
"heromodels-derive",
|
"heromodels-derive",
|
||||||
"heromodels_core",
|
"heromodels_core",
|
||||||
"jsonb",
|
"jsonb",
|
||||||
@@ -411,7 +402,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -514,7 +505,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -952,7 +943,7 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1015,7 +1006,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1145,7 +1136,7 @@ dependencies = [
|
|||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"rustversion",
|
"rustversion",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1154,17 +1145,6 @@ version = "2.6.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "syn"
|
|
||||||
version = "1.0.109"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"unicode-ident",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "2.0.104"
|
version = "2.0.104"
|
||||||
@@ -1199,7 +1179,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1254,7 +1234,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1409,7 +1389,7 @@ dependencies = [
|
|||||||
"log",
|
"log",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
"wasm-bindgen-shared",
|
"wasm-bindgen-shared",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1431,7 +1411,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
"wasm-bindgen-backend",
|
"wasm-bindgen-backend",
|
||||||
"wasm-bindgen-shared",
|
"wasm-bindgen-shared",
|
||||||
]
|
]
|
||||||
@@ -1487,7 +1467,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1498,7 +1478,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1633,5 +1613,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.104",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -10,18 +10,16 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
bincode = { version = "2", features = ["serde"] }
|
bincode = { version = "2", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
ourdb = { path = "../../herolib_rust/packages/data/ourdb" }
|
ourdb = { path = "../ourdb" }
|
||||||
tst = { path = "../../herolib_rust/packages/data/tst" }
|
tst = { path = "../tst" }
|
||||||
heromodels-derive = { path = "../heromodels-derive" }
|
heromodels-derive = { path = "../heromodels-derive" }
|
||||||
heromodels_core = { path = "../heromodels_core" }
|
heromodels_core = { path = "../heromodels_core" }
|
||||||
rhailib-macros = { path = "../../herolib_rust/rhailib/src/macros" }
|
|
||||||
rhai = { version = "1.21.0", features = [
|
rhai = { version = "1.21.0", features = [
|
||||||
"std",
|
"std",
|
||||||
"sync",
|
"sync",
|
||||||
"decimal",
|
"decimal",
|
||||||
"internals",
|
"internals",
|
||||||
] } # Added "decimal" feature, sync for Arc<Mutex<>>
|
] } # Added "decimal" feature, sync for Arc<Mutex<>>
|
||||||
rust_decimal = { version = "1.36", features = ["serde"] }
|
|
||||||
strum = "0.26"
|
strum = "0.26"
|
||||||
strum_macros = "0.26"
|
strum_macros = "0.26"
|
||||||
uuid = { version = "1.17.0", features = ["v4"] }
|
uuid = { version = "1.17.0", features = ["v4"] }
|
||||||
|
|||||||
318
heromodels/docs/payment_usage.md
Normal file
318
heromodels/docs/payment_usage.md
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
# Payment Model Usage Guide
|
||||||
|
|
||||||
|
This document provides comprehensive instructions for AI assistants on how to use the Payment model in the heromodels repository.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Payment model represents a payment transaction in the system, typically associated with company registration or subscription payments. It integrates with Stripe for payment processing and maintains comprehensive status tracking.
|
||||||
|
|
||||||
|
## Model Structure
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct Payment {
|
||||||
|
pub base_data: BaseModelData, // Auto-managed ID, timestamps, comments
|
||||||
|
pub payment_intent_id: String, // Stripe payment intent ID
|
||||||
|
pub company_id: u32, // Foreign key to Company
|
||||||
|
pub payment_plan: String, // "monthly", "yearly", "two_year"
|
||||||
|
pub setup_fee: f64, // One-time setup fee
|
||||||
|
pub monthly_fee: f64, // Recurring monthly fee
|
||||||
|
pub total_amount: f64, // Total amount paid
|
||||||
|
pub currency: String, // Currency code (defaults to "usd")
|
||||||
|
pub status: PaymentStatus, // Current payment status
|
||||||
|
pub stripe_customer_id: Option<String>, // Stripe customer ID (set on completion)
|
||||||
|
pub created_at: i64, // Payment creation timestamp
|
||||||
|
pub completed_at: Option<i64>, // Payment completion timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum PaymentStatus {
|
||||||
|
Pending, // Initial state - payment created but not processed
|
||||||
|
Processing, // Payment is being processed by Stripe
|
||||||
|
Completed, // Payment successfully completed
|
||||||
|
Failed, // Payment processing failed
|
||||||
|
Refunded, // Payment was refunded
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Basic Usage
|
||||||
|
|
||||||
|
### 1. Creating a New Payment
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use heromodels::models::biz::{Payment, PaymentStatus};
|
||||||
|
|
||||||
|
// Create a new payment with required fields
|
||||||
|
let payment = Payment::new(
|
||||||
|
"pi_1234567890".to_string(), // Stripe payment intent ID
|
||||||
|
company_id, // Company ID from database
|
||||||
|
"monthly".to_string(), // Payment plan
|
||||||
|
100.0, // Setup fee
|
||||||
|
49.99, // Monthly fee
|
||||||
|
149.99, // Total amount
|
||||||
|
);
|
||||||
|
|
||||||
|
// Payment defaults:
|
||||||
|
// - status: PaymentStatus::Pending
|
||||||
|
// - currency: "usd"
|
||||||
|
// - stripe_customer_id: None
|
||||||
|
// - created_at: current timestamp
|
||||||
|
// - completed_at: None
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Using Builder Pattern
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let payment = Payment::new(
|
||||||
|
"pi_1234567890".to_string(),
|
||||||
|
company_id,
|
||||||
|
"yearly".to_string(),
|
||||||
|
500.0,
|
||||||
|
99.99,
|
||||||
|
1699.88,
|
||||||
|
)
|
||||||
|
.currency("eur".to_string())
|
||||||
|
.stripe_customer_id(Some("cus_existing_customer".to_string()));
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Database Operations
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use heromodels::db::Collection;
|
||||||
|
|
||||||
|
// Save payment to database
|
||||||
|
let db = get_db()?;
|
||||||
|
let (payment_id, saved_payment) = db.set(&payment)?;
|
||||||
|
|
||||||
|
// Retrieve payment by ID
|
||||||
|
let retrieved_payment: Payment = db.get_by_id(payment_id)?.unwrap();
|
||||||
|
|
||||||
|
// Update payment
|
||||||
|
let updated_payment = saved_payment.complete_payment(Some("cus_new_customer".to_string()));
|
||||||
|
let (_, final_payment) = db.set(&updated_payment)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Payment Status Management
|
||||||
|
|
||||||
|
### Status Transitions
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// 1. Start with Pending status (default)
|
||||||
|
let payment = Payment::new(/* ... */);
|
||||||
|
assert!(payment.is_pending());
|
||||||
|
|
||||||
|
// 2. Mark as processing when Stripe starts processing
|
||||||
|
let processing_payment = payment.process_payment();
|
||||||
|
assert!(processing_payment.is_processing());
|
||||||
|
|
||||||
|
// 3. Complete payment when Stripe confirms success
|
||||||
|
let completed_payment = processing_payment.complete_payment(Some("cus_123".to_string()));
|
||||||
|
assert!(completed_payment.is_completed());
|
||||||
|
assert!(completed_payment.completed_at.is_some());
|
||||||
|
|
||||||
|
// 4. Handle failure if payment fails
|
||||||
|
let failed_payment = processing_payment.fail_payment();
|
||||||
|
assert!(failed_payment.has_failed());
|
||||||
|
|
||||||
|
// 5. Refund if needed
|
||||||
|
let refunded_payment = completed_payment.refund_payment();
|
||||||
|
assert!(refunded_payment.is_refunded());
|
||||||
|
```
|
||||||
|
|
||||||
|
### Status Check Methods
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Check current status
|
||||||
|
if payment.is_pending() {
|
||||||
|
// Show "Payment Pending" UI
|
||||||
|
} else if payment.is_processing() {
|
||||||
|
// Show "Processing Payment" UI
|
||||||
|
} else if payment.is_completed() {
|
||||||
|
// Show "Payment Successful" UI
|
||||||
|
// Enable company features
|
||||||
|
} else if payment.has_failed() {
|
||||||
|
// Show "Payment Failed" UI
|
||||||
|
// Offer retry option
|
||||||
|
} else if payment.is_refunded() {
|
||||||
|
// Show "Payment Refunded" UI
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with Company Model
|
||||||
|
|
||||||
|
### Complete Payment Flow
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use heromodels::models::biz::{Company, CompanyStatus, Payment, PaymentStatus};
|
||||||
|
|
||||||
|
// 1. Create company with pending payment status
|
||||||
|
let company = Company::new(
|
||||||
|
"TechStart Inc.".to_string(),
|
||||||
|
"REG-TS-2024-001".to_string(),
|
||||||
|
chrono::Utc::now().timestamp(),
|
||||||
|
)
|
||||||
|
.email("contact@techstart.com".to_string())
|
||||||
|
.status(CompanyStatus::PendingPayment);
|
||||||
|
|
||||||
|
let (company_id, company) = db.set(&company)?;
|
||||||
|
|
||||||
|
// 2. Create payment for the company
|
||||||
|
let payment = Payment::new(
|
||||||
|
stripe_payment_intent_id,
|
||||||
|
company_id,
|
||||||
|
"yearly".to_string(),
|
||||||
|
500.0, // Setup fee
|
||||||
|
99.0, // Monthly fee
|
||||||
|
1688.0, // Total (setup + 12 months)
|
||||||
|
);
|
||||||
|
|
||||||
|
let (payment_id, payment) = db.set(&payment)?;
|
||||||
|
|
||||||
|
// 3. Process payment through Stripe
|
||||||
|
let processing_payment = payment.process_payment();
|
||||||
|
let (_, processing_payment) = db.set(&processing_payment)?;
|
||||||
|
|
||||||
|
// 4. On successful Stripe webhook
|
||||||
|
let completed_payment = processing_payment.complete_payment(Some(stripe_customer_id));
|
||||||
|
let (_, completed_payment) = db.set(&completed_payment)?;
|
||||||
|
|
||||||
|
// 5. Activate company
|
||||||
|
let active_company = company.status(CompanyStatus::Active);
|
||||||
|
let (_, active_company) = db.set(&active_company)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Indexing
|
||||||
|
|
||||||
|
The Payment model provides custom indexes for efficient querying:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Indexed fields for fast lookups:
|
||||||
|
// - payment_intent_id: Find payment by Stripe intent ID
|
||||||
|
// - company_id: Find all payments for a company
|
||||||
|
// - status: Find payments by status
|
||||||
|
|
||||||
|
// Example queries (conceptual - actual implementation depends on your query layer)
|
||||||
|
// let pending_payments = db.find_by_index("status", "Pending")?;
|
||||||
|
// let company_payments = db.find_by_index("company_id", company_id.to_string())?;
|
||||||
|
// let stripe_payment = db.find_by_index("payment_intent_id", "pi_1234567890")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling Best Practices
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use heromodels::db::DbError;
|
||||||
|
|
||||||
|
fn process_payment_flow(payment_intent_id: String, company_id: u32) -> Result<Payment, DbError> {
|
||||||
|
let db = get_db()?;
|
||||||
|
|
||||||
|
// Create payment
|
||||||
|
let payment = Payment::new(
|
||||||
|
payment_intent_id,
|
||||||
|
company_id,
|
||||||
|
"monthly".to_string(),
|
||||||
|
100.0,
|
||||||
|
49.99,
|
||||||
|
149.99,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Save to database
|
||||||
|
let (payment_id, payment) = db.set(&payment)?;
|
||||||
|
|
||||||
|
// Process through Stripe (external API call)
|
||||||
|
match process_stripe_payment(&payment.payment_intent_id) {
|
||||||
|
Ok(stripe_customer_id) => {
|
||||||
|
// Success: complete payment
|
||||||
|
let completed_payment = payment.complete_payment(Some(stripe_customer_id));
|
||||||
|
let (_, final_payment) = db.set(&completed_payment)?;
|
||||||
|
Ok(final_payment)
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// Failure: mark as failed
|
||||||
|
let failed_payment = payment.fail_payment();
|
||||||
|
let (_, final_payment) = db.set(&failed_payment)?;
|
||||||
|
Ok(final_payment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
The Payment model includes comprehensive tests in `tests/payment.rs`. When working with payments:
|
||||||
|
|
||||||
|
1. **Always test status transitions**
|
||||||
|
2. **Verify timestamp handling**
|
||||||
|
3. **Test database persistence**
|
||||||
|
4. **Test integration with Company model**
|
||||||
|
5. **Test builder pattern methods**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run payment tests
|
||||||
|
cargo test payment
|
||||||
|
|
||||||
|
# Run specific test
|
||||||
|
cargo test test_payment_completion
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
### 1. Payment Retry Logic
|
||||||
|
```rust
|
||||||
|
fn retry_failed_payment(payment: Payment) -> Payment {
|
||||||
|
if payment.has_failed() {
|
||||||
|
// Reset to pending for retry
|
||||||
|
Payment::new(
|
||||||
|
payment.payment_intent_id,
|
||||||
|
payment.company_id,
|
||||||
|
payment.payment_plan,
|
||||||
|
payment.setup_fee,
|
||||||
|
payment.monthly_fee,
|
||||||
|
payment.total_amount,
|
||||||
|
)
|
||||||
|
.currency(payment.currency)
|
||||||
|
} else {
|
||||||
|
payment
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Payment Summary
|
||||||
|
```rust
|
||||||
|
fn get_payment_summary(payment: &Payment) -> String {
|
||||||
|
format!(
|
||||||
|
"Payment {} for company {}: {} {} ({})",
|
||||||
|
payment.payment_intent_id,
|
||||||
|
payment.company_id,
|
||||||
|
payment.total_amount,
|
||||||
|
payment.currency.to_uppercase(),
|
||||||
|
payment.status
|
||||||
|
)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Payment Validation
|
||||||
|
```rust
|
||||||
|
fn validate_payment(payment: &Payment) -> Result<(), String> {
|
||||||
|
if payment.total_amount <= 0.0 {
|
||||||
|
return Err("Total amount must be positive".to_string());
|
||||||
|
}
|
||||||
|
if payment.payment_intent_id.is_empty() {
|
||||||
|
return Err("Payment intent ID is required".to_string());
|
||||||
|
}
|
||||||
|
if payment.company_id == 0 {
|
||||||
|
return Err("Valid company ID is required".to_string());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Points for AI Assistants
|
||||||
|
|
||||||
|
1. **Always use auto-generated IDs** - Don't manually set IDs, let OurDB handle them
|
||||||
|
2. **Follow status flow** - Pending → Processing → Completed/Failed → (optionally) Refunded
|
||||||
|
3. **Update timestamps** - `completed_at` is automatically set when calling `complete_payment()`
|
||||||
|
4. **Use builder pattern** - For optional fields and cleaner code
|
||||||
|
5. **Test thoroughly** - Payment logic is critical, always verify with tests
|
||||||
|
6. **Handle errors gracefully** - Payment failures should be tracked, not ignored
|
||||||
|
7. **Integrate with Company** - Payments typically affect company status
|
||||||
|
8. **Use proper indexing** - Leverage indexed fields for efficient queries
|
||||||
|
|
||||||
|
This model follows the heromodels patterns and integrates seamlessly with the existing codebase architecture.
|
||||||
@@ -1,300 +0,0 @@
|
|||||||
# AI Prompt: Convert V Language Specs to Rust Hero Models
|
|
||||||
|
|
||||||
## Objective
|
|
||||||
Convert V language model specifications (`.v` files) to Rust hero models that integrate with the heromodels framework. The generated Rust models should follow the established patterns for base data embedding, indexing, fluent builder APIs, and Rhai scripting integration.
|
|
||||||
|
|
||||||
## V Language Input Structure Analysis
|
|
||||||
|
|
||||||
### V Spec Patterns to Recognize:
|
|
||||||
1. **Module Declaration**: `module circle` or `module group`
|
|
||||||
2. **Base Embedding**: `core.Base` - represents the base model data
|
|
||||||
3. **Index Fields**: Fields marked with `@[index]` comments
|
|
||||||
4. **Mutability**: Fields declared with `pub mut:`
|
|
||||||
5. **Enums**: `pub enum Status { active, inactive, suspended }`
|
|
||||||
6. **Nested Structs**: Embedded configuration or related data structures
|
|
||||||
7. **Collections**: `[]u32`, `[]string`, `map[string]string`
|
|
||||||
8. **References**: `u32` fields typically represent foreign key references
|
|
||||||
|
|
||||||
### Example V Spec Structure:
|
|
||||||
```v
|
|
||||||
module circle
|
|
||||||
|
|
||||||
import freeflowuniverse.herolib.hero.models.core
|
|
||||||
|
|
||||||
pub struct User {
|
|
||||||
core.Base
|
|
||||||
pub mut:
|
|
||||||
username string @[index] // Unique username
|
|
||||||
email []string @[index] // Multiple email addresses
|
|
||||||
status UserStatus // Enum reference
|
|
||||||
profile UserProfile // Nested struct
|
|
||||||
metadata map[string]string // Key-value pairs
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum UserStatus {
|
|
||||||
active
|
|
||||||
inactive
|
|
||||||
suspended
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct UserProfile {
|
|
||||||
pub mut:
|
|
||||||
full_name string
|
|
||||||
bio string
|
|
||||||
links map[string]string
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Rust Hero Model Conversion Rules
|
|
||||||
|
|
||||||
### 1. File Structure and Imports
|
|
||||||
```rust
|
|
||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use rhai::CustomType;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Base Data Embedding
|
|
||||||
- **V**: `core.Base`
|
|
||||||
- **Rust**: `pub base_data: BaseModelData,`
|
|
||||||
|
|
||||||
### 3. Index Field Conversion
|
|
||||||
- **V**: `field_name string @[index]`
|
|
||||||
- **Rust**: `#[index] pub field_name: String,`
|
|
||||||
|
|
||||||
### 4. Type Mappings
|
|
||||||
| V Type | Rust Type |
|
|
||||||
|--------|-----------|
|
|
||||||
| `string` | `String` |
|
|
||||||
| `[]string` | `Vec<String>` |
|
|
||||||
| `[]u32` | `Vec<u32>` |
|
|
||||||
| `u32` | `u32` |
|
|
||||||
| `u64` | `u64` |
|
|
||||||
| `f64` | `f64` |
|
|
||||||
| `bool` | `bool` |
|
|
||||||
| `map[string]string` | `std::collections::HashMap<String, String>` |
|
|
||||||
|
|
||||||
### 5. Struct Declaration Pattern
|
|
||||||
```rust
|
|
||||||
/// Documentation comment describing the model
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType, Default, RhaiApi)]
|
|
||||||
pub struct ModelName {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub indexed_field: String,
|
|
||||||
pub regular_field: String,
|
|
||||||
pub optional_field: Option<String>,
|
|
||||||
pub nested_struct: NestedType,
|
|
||||||
pub collection: Vec<u32>,
|
|
||||||
pub metadata: std::collections::HashMap<String, String>,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 6. Enum Conversion
|
|
||||||
```rust
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum UserStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Suspended,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 7. Fluent Builder Implementation
|
|
||||||
Every model must implement a fluent builder pattern:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
impl ModelName {
|
|
||||||
/// Create a new instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
Self {
|
|
||||||
base_data: BaseModelData::new(id),
|
|
||||||
indexed_field: String::new(),
|
|
||||||
regular_field: String::new(),
|
|
||||||
optional_field: None,
|
|
||||||
nested_struct: NestedType::new(),
|
|
||||||
collection: Vec::new(),
|
|
||||||
metadata: std::collections::HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set indexed field (fluent)
|
|
||||||
pub fn indexed_field(mut self, value: impl ToString) -> Self {
|
|
||||||
self.indexed_field = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set regular field (fluent)
|
|
||||||
pub fn regular_field(mut self, value: impl ToString) -> Self {
|
|
||||||
self.regular_field = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set optional field (fluent)
|
|
||||||
pub fn optional_field(mut self, value: impl ToString) -> Self {
|
|
||||||
self.optional_field = Some(value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set nested struct (fluent)
|
|
||||||
pub fn nested_struct(mut self, value: NestedType) -> Self {
|
|
||||||
self.nested_struct = value;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add to collection (fluent)
|
|
||||||
pub fn add_to_collection(mut self, value: u32) -> Self {
|
|
||||||
self.collection.push(value);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set entire collection (fluent)
|
|
||||||
pub fn collection(mut self, value: Vec<u32>) -> Self {
|
|
||||||
self.collection = value;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add metadata entry (fluent)
|
|
||||||
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.metadata.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 8. Model Trait Implementation
|
|
||||||
```rust
|
|
||||||
impl Model for ModelName {
|
|
||||||
fn db_prefix() -> &'static str {
|
|
||||||
"modelname"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_id(&self) -> u32 {
|
|
||||||
self.base_data.id
|
|
||||||
}
|
|
||||||
|
|
||||||
fn base_data_mut(&mut self) -> &mut BaseModelData {
|
|
||||||
&mut self.base_data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn db_keys(&self) -> Vec<IndexKey> {
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
|
|
||||||
// Add index keys for fields marked with #[index]
|
|
||||||
keys.push(IndexKey::new("indexed_field", &self.indexed_field));
|
|
||||||
|
|
||||||
// Add additional index keys as needed
|
|
||||||
keys
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 9. Nested Struct Builder Pattern
|
|
||||||
For embedded types, implement similar builder patterns:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct NestedType {
|
|
||||||
pub field1: String,
|
|
||||||
pub field2: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NestedType {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
field1: String::new(),
|
|
||||||
field2: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn field1(mut self, value: impl ToString) -> Self {
|
|
||||||
self.field1 = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn field2(mut self, value: impl ToString) -> Self {
|
|
||||||
self.field2 = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Conversion Steps
|
|
||||||
|
|
||||||
1. **Analyze V Spec Structure**
|
|
||||||
- Identify the module name and main structs
|
|
||||||
- Note which fields are marked with `@[index]`
|
|
||||||
- Identify nested structs and enums
|
|
||||||
- Map field types from V to Rust
|
|
||||||
|
|
||||||
2. **Create Rust File Structure**
|
|
||||||
- Add appropriate imports
|
|
||||||
- Convert enums first (they're often referenced by structs)
|
|
||||||
- Convert nested structs before main structs
|
|
||||||
|
|
||||||
3. **Implement Main Struct**
|
|
||||||
- Add `#[model]` macro and derives
|
|
||||||
- Embed `BaseModelData` as `base_data`
|
|
||||||
- Mark indexed fields with `#[index]`
|
|
||||||
- Convert field types according to mapping table
|
|
||||||
|
|
||||||
4. **Implement Builder Pattern**
|
|
||||||
- Add `new(id: u32)` constructor
|
|
||||||
- Add fluent setter methods for each field
|
|
||||||
- Handle optional fields appropriately
|
|
||||||
- Add collection manipulation methods
|
|
||||||
|
|
||||||
5. **Implement Model Trait**
|
|
||||||
- Define appropriate `db_prefix`
|
|
||||||
- Implement required trait methods
|
|
||||||
- Add index keys for searchable fields
|
|
||||||
|
|
||||||
6. **Add Documentation**
|
|
||||||
- Document the struct and its purpose
|
|
||||||
- Document each field's meaning
|
|
||||||
- Add usage examples in comments
|
|
||||||
|
|
||||||
## Example Usage After Conversion
|
|
||||||
|
|
||||||
```rust
|
|
||||||
let user = User::new(1)
|
|
||||||
.username("john_doe")
|
|
||||||
.add_email("john@example.com")
|
|
||||||
.add_email("john.doe@company.com")
|
|
||||||
.status(UserStatus::Active)
|
|
||||||
.profile(
|
|
||||||
UserProfile::new()
|
|
||||||
.full_name("John Doe")
|
|
||||||
.bio("Software developer")
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
.add_metadata("department", "engineering")
|
|
||||||
.build();
|
|
||||||
```
|
|
||||||
|
|
||||||
## Notes and Best Practices
|
|
||||||
|
|
||||||
1. **Field Naming**: Convert V snake_case to Rust snake_case (usually no change needed)
|
|
||||||
2. **Optional Fields**: Use `Option<T>` for fields that may be empty in V
|
|
||||||
3. **Collections**: Always provide both `add_item` and `set_collection` methods
|
|
||||||
4. **Error Handling**: Builder methods should not panic; use appropriate defaults
|
|
||||||
5. **Documentation**: Include comprehensive documentation for public APIs
|
|
||||||
6. **Testing**: Consider adding unit tests for builder patterns
|
|
||||||
7. **Validation**: Add validation logic in builder methods if needed
|
|
||||||
|
|
||||||
## File Organization
|
|
||||||
|
|
||||||
Place the converted Rust models in the appropriate subdirectory under `heromodels/src/models/` based on the domain (e.g., `user/`, `finance/`, `governance/`, etc.).
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
// heroledger.rhai - Demonstration of HeroLedger models in Rhai
|
|
||||||
|
|
||||||
print("=== HeroLedger Models Demo ===");
|
|
||||||
|
|
||||||
// Create a new user
|
|
||||||
print("\n--- Creating User ---");
|
|
||||||
let new_user = new_user()
|
|
||||||
.name("Alice Johnson")
|
|
||||||
.email("alice@herocode.com")
|
|
||||||
.pubkey("0x1234567890abcdef")
|
|
||||||
.status("Active")
|
|
||||||
.save_user();
|
|
||||||
|
|
||||||
print("Created user: " + new_user.get_name());
|
|
||||||
print("User ID: " + new_user.get_id());
|
|
||||||
print("User email: " + new_user.get_email());
|
|
||||||
print("User pubkey: " + new_user.get_pubkey());
|
|
||||||
|
|
||||||
// Create a new group
|
|
||||||
print("\n--- Creating Group ---");
|
|
||||||
let new_group = new_group()
|
|
||||||
.name("HeroCode Developers")
|
|
||||||
.description("A group for HeroCode development team members")
|
|
||||||
.visibility("Public")
|
|
||||||
.save_group();
|
|
||||||
|
|
||||||
print("Created group: " + new_group.get_name());
|
|
||||||
print("Group ID: " + new_group.get_id());
|
|
||||||
print("Group description: " + new_group.get_description());
|
|
||||||
|
|
||||||
// Create a new account
|
|
||||||
print("\n--- Creating Account ---");
|
|
||||||
let new_account = new_account()
|
|
||||||
.name("Alice's Main Account")
|
|
||||||
.description("Primary account for Alice Johnson")
|
|
||||||
.currency("USD")
|
|
||||||
.save_account();
|
|
||||||
|
|
||||||
print("Created account: " + new_account.get_name());
|
|
||||||
print("Account ID: " + new_account.get_id());
|
|
||||||
print("Account currency: " + new_account.get_currency());
|
|
||||||
|
|
||||||
// Create a new DNS zone
|
|
||||||
print("\n--- Creating DNS Zone ---");
|
|
||||||
let new_dns_zone = new_dns_zone()
|
|
||||||
.name("herocode.com")
|
|
||||||
.description("Main domain for HeroCode")
|
|
||||||
.save_dns_zone();
|
|
||||||
|
|
||||||
print("Created DNS zone: " + new_dns_zone.get_name());
|
|
||||||
print("DNS zone ID: " + new_dns_zone.get_id());
|
|
||||||
|
|
||||||
print("\n=== Demo Complete ===");
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
use heromodels_core::db::hero::OurDB;
|
|
||||||
use rhai::{Dynamic, Engine};
|
|
||||||
use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::{fs, path::Path};
|
|
||||||
|
|
||||||
const CALLER_ID: &str = "example_caller";
|
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|
||||||
// Initialize Rhai engine
|
|
||||||
let mut engine = Engine::new();
|
|
||||||
|
|
||||||
// Initialize database with OurDB
|
|
||||||
let db_path = "temp_heroledger_db";
|
|
||||||
// Clean up previous database file if it exists
|
|
||||||
if Path::new(db_path).exists() {
|
|
||||||
fs::remove_dir_all(db_path)?;
|
|
||||||
}
|
|
||||||
let _db = Arc::new(OurDB::new(db_path, true).expect("Failed to create database"));
|
|
||||||
|
|
||||||
// Register the heroledger modules with Rhai
|
|
||||||
register_heroledger_rhai_modules(&mut engine);
|
|
||||||
|
|
||||||
let mut db_config = rhai::Map::new();
|
|
||||||
db_config.insert("DB_PATH".into(), db_path.into());
|
|
||||||
db_config.insert("CALLER_ID".into(), CALLER_ID.into());
|
|
||||||
db_config.insert("CONTEXT_ID".into(), CALLER_ID.into());
|
|
||||||
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
|
|
||||||
|
|
||||||
// Load and evaluate the Rhai script
|
|
||||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
|
||||||
let script_path = Path::new(manifest_dir)
|
|
||||||
.join("examples")
|
|
||||||
.join("heroledger")
|
|
||||||
.join("heroledger.rhai");
|
|
||||||
println!("Script path: {}", script_path.display());
|
|
||||||
let script = fs::read_to_string(&script_path)?;
|
|
||||||
|
|
||||||
println!("--- Running HeroLedger Rhai Script ---");
|
|
||||||
match engine.eval::<()>(&script) {
|
|
||||||
Ok(_) => println!("\n--- Script executed successfully! ---"),
|
|
||||||
Err(e) => eprintln!("\n--- Script execution failed: {} ---", e),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up the database file
|
|
||||||
fs::remove_dir_all(db_path)?;
|
|
||||||
println!("--- Cleaned up temporary database. ---");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -73,7 +73,7 @@ fn main() {
|
|||||||
|
|
||||||
// The `#[model]` derive handles `created_at` and `updated_at` in `base_data`.
|
// The `#[model]` derive handles `created_at` and `updated_at` in `base_data`.
|
||||||
// `base_data.touch()` might be called internally by setters or needs explicit call if fields are set directly.
|
// `base_data.touch()` might be called internally by setters or needs explicit call if fields are set directly.
|
||||||
// For builder pattern, the final state of `base_data.modified_at` reflects the time of the last builder call if `touch()` is implicit.
|
// For builder pattern, the final state of `base_data.updated_at` reflects the time of the last builder call if `touch()` is implicit.
|
||||||
// If not, one might call `contract.base_data.touch()` after building.
|
// If not, one might call `contract.base_data.touch()` after building.
|
||||||
|
|
||||||
println!("\n--- Initial Contract Details ---");
|
println!("\n--- Initial Contract Details ---");
|
||||||
|
|||||||
@@ -1,148 +0,0 @@
|
|||||||
use crate::db::Db;
|
|
||||||
use rhailib_macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Dynamic, Engine, EvalAltResult, Module};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use heromodels::models::access::Access;
|
|
||||||
type RhaiAccess = Access;
|
|
||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::Collection;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_access_module {
|
|
||||||
// --- Access Functions ---
|
|
||||||
#[rhai_fn(name = "new_access", return_raw)]
|
|
||||||
pub fn new_access() -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let access = Access::new();
|
|
||||||
Ok(access)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the access object_id
|
|
||||||
#[rhai_fn(name = "object_id", return_raw)]
|
|
||||||
pub fn set_object_id(
|
|
||||||
access: &mut RhaiAccess,
|
|
||||||
object_id: i64,
|
|
||||||
) -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let id = macros::id_from_i64_to_u32(object_id)?;
|
|
||||||
let owned_access = std::mem::take(access);
|
|
||||||
*access = owned_access.object_id(id);
|
|
||||||
Ok(access.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the circle public key
|
|
||||||
#[rhai_fn(name = "circle_public_key", return_raw)]
|
|
||||||
pub fn set_circle_pk(
|
|
||||||
access: &mut RhaiAccess,
|
|
||||||
circle_pk: String,
|
|
||||||
) -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let owned_access = std::mem::take(access);
|
|
||||||
*access = owned_access.circle_pk(circle_pk);
|
|
||||||
Ok(access.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the group id
|
|
||||||
#[rhai_fn(name = "group_id", return_raw)]
|
|
||||||
pub fn set_group_id(
|
|
||||||
access: &mut RhaiAccess,
|
|
||||||
group_id: i64,
|
|
||||||
) -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let id = macros::id_from_i64_to_u32(group_id)?;
|
|
||||||
let owned_access = std::mem::take(access);
|
|
||||||
*access = owned_access.group_id(id);
|
|
||||||
Ok(access.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the contact id
|
|
||||||
#[rhai_fn(name = "contact_id", return_raw)]
|
|
||||||
pub fn set_contact_id(
|
|
||||||
access: &mut RhaiAccess,
|
|
||||||
contact_id: i64,
|
|
||||||
) -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let id = macros::id_from_i64_to_u32(contact_id)?;
|
|
||||||
let owned_access = std::mem::take(access);
|
|
||||||
*access = owned_access.contact_id(id);
|
|
||||||
Ok(access.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the expiration time
|
|
||||||
#[rhai_fn(name = "expires_at", return_raw)]
|
|
||||||
pub fn set_expires_at(
|
|
||||||
access: &mut RhaiAccess,
|
|
||||||
expires_at: i64,
|
|
||||||
) -> Result<RhaiAccess, Box<EvalAltResult>> {
|
|
||||||
let owned_access = std::mem::take(access);
|
|
||||||
*access = owned_access.expires_at(expires_at);
|
|
||||||
Ok(access.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Access Getters
|
|
||||||
#[rhai_fn(name = "get_access_id")]
|
|
||||||
pub fn get_access_id(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.base.id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_object_id")]
|
|
||||||
pub fn get_access_object_id(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.object_id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_circle_pk")]
|
|
||||||
pub fn get_access_circle_pk(access: &mut RhaiAccess) -> String {
|
|
||||||
access.circle_pk.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_group_id")]
|
|
||||||
pub fn get_access_group_id(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.group_id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_contact_id")]
|
|
||||||
pub fn get_access_contact_id(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.contact_id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_expires_at")]
|
|
||||||
pub fn get_access_expires_at(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.expires_at
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_created_at")]
|
|
||||||
pub fn get_access_created_at(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.base.created_at
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_access_modified_at")]
|
|
||||||
pub fn get_access_modified_at(access: &mut RhaiAccess) -> i64 {
|
|
||||||
access.base.modified_at
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_access_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_access_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_access",
|
|
||||||
resource_type_str: "Access",
|
|
||||||
rhai_return_rust_type: heromodels::models::access::Access
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_access",
|
|
||||||
resource_type_str: "Access",
|
|
||||||
rhai_return_rust_type: heromodels::models::access::Access
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_access",
|
|
||||||
resource_type_str: "Access",
|
|
||||||
rhai_return_rust_type: heromodels::models::access::Access
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,422 +0,0 @@
|
|||||||
use heromodels::db::Db;
|
|
||||||
use macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Array, Engine, EvalAltResult, Module, Position, FLOAT, INT};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::Collection;
|
|
||||||
use heromodels::models::biz::product::{Product, ProductComponent, ProductStatus, ProductType};
|
|
||||||
use heromodels::models::biz::company::{BusinessType, Company, CompanyStatus};
|
|
||||||
use heromodels::models::biz::sale::{Sale, SaleItem, SaleStatus};
|
|
||||||
use heromodels::models::biz::shareholder::{Shareholder, ShareholderType};
|
|
||||||
|
|
||||||
type RhaiProduct = Product;
|
|
||||||
type RhaiProductComponent = ProductComponent;
|
|
||||||
type RhaiCompany = Company;
|
|
||||||
type RhaiSale = Sale;
|
|
||||||
type RhaiSaleItem = SaleItem;
|
|
||||||
type RhaiShareholder = Shareholder;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_product_component_module {
|
|
||||||
use super::{RhaiProductComponent, INT};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_product_component", return_raw)]
|
|
||||||
pub fn new_product_component() -> Result<RhaiProductComponent, Box<EvalAltResult>> {
|
|
||||||
Ok(ProductComponent::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_name(
|
|
||||||
component: &mut RhaiProductComponent,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(component);
|
|
||||||
*component = owned.name(name);
|
|
||||||
Ok(component.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "description", return_raw)]
|
|
||||||
pub fn set_description(
|
|
||||||
component: &mut RhaiProductComponent,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(component);
|
|
||||||
*component = owned.description(description);
|
|
||||||
Ok(component.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "quantity", return_raw)]
|
|
||||||
pub fn set_quantity(
|
|
||||||
component: &mut RhaiProductComponent,
|
|
||||||
quantity: INT,
|
|
||||||
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(component);
|
|
||||||
*component = owned.quantity(quantity as u32);
|
|
||||||
Ok(component.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Getters ---
|
|
||||||
#[rhai_fn(name = "get_name")]
|
|
||||||
pub fn get_name(c: &mut RhaiProductComponent) -> String {
|
|
||||||
c.name.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_description")]
|
|
||||||
pub fn get_description(c: &mut RhaiProductComponent) -> String {
|
|
||||||
c.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_quantity")]
|
|
||||||
pub fn get_quantity(c: &mut RhaiProductComponent) -> INT {
|
|
||||||
c.quantity as INT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_product_module {
|
|
||||||
use super::{Array, ProductStatus, ProductType, RhaiProduct, RhaiProductComponent, FLOAT, INT};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_product", return_raw)]
|
|
||||||
pub fn new_product() -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
Ok(Product::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Setters ---
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_name(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.name(name);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "description", return_raw)]
|
|
||||||
pub fn set_description(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.description(description);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "price", return_raw)]
|
|
||||||
pub fn set_price(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
price: FLOAT,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.price(price);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "category", return_raw)]
|
|
||||||
pub fn set_category(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
category: String,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.category(category);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "max_amount", return_raw)]
|
|
||||||
pub fn set_max_amount(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
max_amount: INT,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.max_amount(max_amount as u32);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "purchase_till", return_raw)]
|
|
||||||
pub fn set_purchase_till(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
purchase_till: INT,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.purchase_till(purchase_till);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "active_till", return_raw)]
|
|
||||||
pub fn set_active_till(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
active_till: INT,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.active_till(active_till);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "type", return_raw)]
|
|
||||||
pub fn set_type(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
type_str: String,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let product_type = match type_str.to_lowercase().as_str() {
|
|
||||||
"physical" => ProductType::Physical,
|
|
||||||
"digital" => ProductType::Digital,
|
|
||||||
"service" => ProductType::Service,
|
|
||||||
"subscription" => ProductType::Subscription,
|
|
||||||
_ => {
|
|
||||||
return Err(EvalAltResult::ErrorSystem(
|
|
||||||
"Invalid ProductType".to_string(),
|
|
||||||
"Must be one of: Physical, Digital, Service, Subscription".into(),
|
|
||||||
)
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.product_type(product_type);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "status", return_raw)]
|
|
||||||
pub fn set_status(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
status_str: String,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let status = match status_str.to_lowercase().as_str() {
|
|
||||||
"active" => ProductStatus::Active,
|
|
||||||
"inactive" => ProductStatus::Inactive,
|
|
||||||
"discontinued" => ProductStatus::Discontinued,
|
|
||||||
_ => {
|
|
||||||
return Err(EvalAltResult::ErrorSystem(
|
|
||||||
"Invalid ProductStatus".to_string(),
|
|
||||||
"Must be one of: Active, Inactive, Discontinued".into(),
|
|
||||||
)
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.status(status);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "add_component", return_raw)]
|
|
||||||
pub fn add_component(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
component: RhaiProductComponent,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.add_component(component);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "set_components", return_raw)]
|
|
||||||
pub fn set_components(
|
|
||||||
product: &mut RhaiProduct,
|
|
||||||
components: Array,
|
|
||||||
) -> Result<RhaiProduct, Box<EvalAltResult>> {
|
|
||||||
let mut product_components = Vec::new();
|
|
||||||
for component_dynamic in components {
|
|
||||||
if let Ok(component) = component_dynamic.try_cast::<RhaiProductComponent>() {
|
|
||||||
product_components.push(component);
|
|
||||||
} else {
|
|
||||||
return Err(EvalAltResult::ErrorSystem(
|
|
||||||
"Invalid component type".to_string(),
|
|
||||||
"All components must be ProductComponent objects".into(),
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let owned = std::mem::take(product);
|
|
||||||
*product = owned.components(product_components);
|
|
||||||
Ok(product.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Getters ---
|
|
||||||
#[rhai_fn(name = "get_id")]
|
|
||||||
pub fn get_id(p: &mut RhaiProduct) -> i64 {
|
|
||||||
p.base.id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_name")]
|
|
||||||
pub fn get_name(p: &mut RhaiProduct) -> String {
|
|
||||||
p.name.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_description")]
|
|
||||||
pub fn get_description(p: &mut RhaiProduct) -> String {
|
|
||||||
p.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_price")]
|
|
||||||
pub fn get_price(p: &mut RhaiProduct) -> FLOAT {
|
|
||||||
p.price
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_category")]
|
|
||||||
pub fn get_category(p: &mut RhaiProduct) -> String {
|
|
||||||
p.category.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_max_amount")]
|
|
||||||
pub fn get_max_amount(p: &mut RhaiProduct) -> INT {
|
|
||||||
p.max_amount as INT
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_purchase_till")]
|
|
||||||
pub fn get_purchase_till(p: &mut RhaiProduct) -> INT {
|
|
||||||
p.purchase_till
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_active_till")]
|
|
||||||
pub fn get_active_till(p: &mut RhaiProduct) -> INT {
|
|
||||||
p.active_till
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_type")]
|
|
||||||
pub fn get_type(p: &mut RhaiProduct) -> String {
|
|
||||||
format!("{:?}", p.product_type)
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_status")]
|
|
||||||
pub fn get_status(p: &mut RhaiProduct) -> String {
|
|
||||||
format!("{:?}", p.status)
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_components")]
|
|
||||||
pub fn get_components(p: &mut RhaiProduct) -> Array {
|
|
||||||
p.components
|
|
||||||
.iter()
|
|
||||||
.map(|c| rhai::Dynamic::from(c.clone()))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_product_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut product_module = exported_module!(rhai_product_module);
|
|
||||||
let mut component_module = exported_module!(rhai_product_component_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
product_module: &mut product_module,
|
|
||||||
rhai_fn_name: "save_product",
|
|
||||||
resource_type_str: "Product",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::product::Product
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
product_module: &mut product_module,
|
|
||||||
rhai_fn_name: "get_product",
|
|
||||||
resource_type_str: "Product",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::product::Product
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
product_module: &mut product_module,
|
|
||||||
rhai_fn_name: "delete_product",
|
|
||||||
resource_type_str: "Product",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::product::Product
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(product_module.into());
|
|
||||||
engine.register_global_module(component_module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Company Rhai wrapper functions
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_company_module {
|
|
||||||
use super::{BusinessType, CompanyStatus, RhaiCompany};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_company", return_raw)]
|
|
||||||
pub fn new_company() -> Result<RhaiCompany, Box<EvalAltResult>> {
|
|
||||||
Ok(Company::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_name(
|
|
||||||
company: &mut RhaiCompany,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiCompany, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(company);
|
|
||||||
*company = owned.name(name);
|
|
||||||
Ok(company.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_company_id")]
|
|
||||||
pub fn get_company_id(company: &mut RhaiCompany) -> i64 {
|
|
||||||
company.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_company_name")]
|
|
||||||
pub fn get_company_name(company: &mut RhaiCompany) -> String {
|
|
||||||
company.name().clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_company_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_company_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_company",
|
|
||||||
resource_type_str: "Company",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::company::Company
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_company",
|
|
||||||
resource_type_str: "Company",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::company::Company
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sale Rhai wrapper functions
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_sale_module {
|
|
||||||
use super::{RhaiSale, RhaiSaleItem, SaleStatus};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_sale", return_raw)]
|
|
||||||
pub fn new_sale() -> Result<RhaiSale, Box<EvalAltResult>> {
|
|
||||||
Ok(Sale::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_sale_item", return_raw)]
|
|
||||||
pub fn new_sale_item() -> Result<RhaiSaleItem, Box<EvalAltResult>> {
|
|
||||||
Ok(SaleItem::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "company_id", return_raw)]
|
|
||||||
pub fn set_sale_company_id(sale: &mut RhaiSale, company_id: i64) -> Result<RhaiSale, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(sale);
|
|
||||||
*sale = owned.company_id(company_id as u32);
|
|
||||||
Ok(sale.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "total_amount", return_raw)]
|
|
||||||
pub fn set_sale_total_amount(sale: &mut RhaiSale, total_amount: f64) -> Result<RhaiSale, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(sale);
|
|
||||||
*sale = owned.total_amount(total_amount);
|
|
||||||
Ok(sale.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_sale_id")]
|
|
||||||
pub fn get_sale_id(sale: &mut RhaiSale) -> i64 {
|
|
||||||
sale.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_sale_total_amount")]
|
|
||||||
pub fn get_sale_total_amount(sale: &mut RhaiSale) -> f64 {
|
|
||||||
sale.total_amount()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_sale_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_sale_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_sale",
|
|
||||||
resource_type_str: "Sale",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::sale::Sale
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_sale",
|
|
||||||
resource_type_str: "Sale",
|
|
||||||
rhai_return_rust_type: heromodels::models::biz::sale::Sale
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,246 +0,0 @@
|
|||||||
use crate::db::Db;
|
|
||||||
use rhailib_macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::models::calendar::{AttendanceStatus, Attendee, Calendar, Event};
|
|
||||||
type RhaiCalendar = Calendar;
|
|
||||||
type RhaiEvent = Event;
|
|
||||||
type RhaiAttendee = Attendee;
|
|
||||||
use crate::db::hero::OurDB;
|
|
||||||
use crate::db::Collection;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_calendar_module {
|
|
||||||
use super::{AttendanceStatus, RhaiAttendee, RhaiCalendar, RhaiEvent};
|
|
||||||
|
|
||||||
// --- Attendee Builder ---
|
|
||||||
#[rhai_fn(name = "new_attendee", return_raw)]
|
|
||||||
pub fn new_attendee(contact_id: i64) -> Result<RhaiAttendee, Box<EvalAltResult>> {
|
|
||||||
Ok(Attendee::new(contact_id as u32))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "status", return_raw)]
|
|
||||||
pub fn set_attendee_status(
|
|
||||||
attendee: &mut RhaiAttendee,
|
|
||||||
status_str: String,
|
|
||||||
) -> Result<RhaiAttendee, Box<EvalAltResult>> {
|
|
||||||
let status = match status_str.to_lowercase().as_str() {
|
|
||||||
"accepted" => AttendanceStatus::Accepted,
|
|
||||||
"declined" => AttendanceStatus::Declined,
|
|
||||||
"tentative" => AttendanceStatus::Tentative,
|
|
||||||
"noresponse" => AttendanceStatus::NoResponse,
|
|
||||||
_ => {
|
|
||||||
return Err(EvalAltResult::ErrorSystem(
|
|
||||||
"Invalid Status".to_string(),
|
|
||||||
"Must be one of: Accepted, Declined, Tentative, NoResponse".into(),
|
|
||||||
)
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let owned = std::mem::take(attendee);
|
|
||||||
*attendee = owned.status(status);
|
|
||||||
Ok(attendee.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Event Builder ---
|
|
||||||
#[rhai_fn(name = "new_event", return_raw)]
|
|
||||||
pub fn new_event() -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
Ok(Event::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "title", return_raw)]
|
|
||||||
pub fn set_event_title(
|
|
||||||
event: &mut RhaiEvent,
|
|
||||||
title: String,
|
|
||||||
) -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(event);
|
|
||||||
*event = owned.title(title);
|
|
||||||
Ok(event.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "description", return_raw)]
|
|
||||||
pub fn set_event_description(
|
|
||||||
event: &mut RhaiEvent,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(event);
|
|
||||||
*event = owned.description(description);
|
|
||||||
Ok(event.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "location", return_raw)]
|
|
||||||
pub fn set_event_location(
|
|
||||||
event: &mut RhaiEvent,
|
|
||||||
location: String,
|
|
||||||
) -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(event);
|
|
||||||
*event = owned.location(location);
|
|
||||||
Ok(event.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "add_attendee", return_raw)]
|
|
||||||
pub fn add_event_attendee(
|
|
||||||
event: &mut RhaiEvent,
|
|
||||||
attendee: RhaiAttendee,
|
|
||||||
) -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(event);
|
|
||||||
*event = owned.add_attendee(attendee);
|
|
||||||
Ok(event.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "reschedule", return_raw)]
|
|
||||||
pub fn reschedule_event(
|
|
||||||
event: &mut RhaiEvent,
|
|
||||||
start_time: i64,
|
|
||||||
end_time: i64,
|
|
||||||
) -> Result<RhaiEvent, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(event);
|
|
||||||
*event = owned.reschedule(start_time, end_time);
|
|
||||||
Ok(event.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Calendar Builder ---
|
|
||||||
#[rhai_fn(name = "new_calendar", return_raw)]
|
|
||||||
pub fn new_calendar(name: String) -> Result<RhaiCalendar, Box<EvalAltResult>> {
|
|
||||||
Ok(Calendar::new().name(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "calendar_name", return_raw)]
|
|
||||||
pub fn set_calendar_name(
|
|
||||||
calendar: &mut RhaiCalendar,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(calendar);
|
|
||||||
*calendar = owned.name(name);
|
|
||||||
Ok(calendar.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "calendar_description", return_raw)]
|
|
||||||
pub fn set_calendar_description(
|
|
||||||
calendar: &mut RhaiCalendar,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(calendar);
|
|
||||||
*calendar = owned.description(description);
|
|
||||||
Ok(calendar.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "add_event", return_raw)]
|
|
||||||
pub fn add_calendar_event(
|
|
||||||
calendar: &mut RhaiCalendar,
|
|
||||||
event_id: i64,
|
|
||||||
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(calendar);
|
|
||||||
*calendar = owned.add_event(event_id as u32);
|
|
||||||
Ok(calendar.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Getters ---
|
|
||||||
// Calendar
|
|
||||||
#[rhai_fn(name = "get_calendar_id")]
|
|
||||||
pub fn get_calendar_id(c: &mut RhaiCalendar) -> i64 {
|
|
||||||
c.base.id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_calendar_name")]
|
|
||||||
pub fn get_calendar_name(c: &mut RhaiCalendar) -> String {
|
|
||||||
c.name.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_calendar_description")]
|
|
||||||
pub fn get_calendar_description(c: &mut RhaiCalendar) -> Option<String> {
|
|
||||||
c.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_calendar_events")]
|
|
||||||
pub fn get_calendar_events(c: &mut RhaiCalendar) -> Array {
|
|
||||||
c.events.iter().map(|id| Dynamic::from(*id as i64)).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event
|
|
||||||
#[rhai_fn(name = "get_event_id")]
|
|
||||||
pub fn get_event_id(e: &mut RhaiEvent) -> i64 {
|
|
||||||
e.base.id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_title")]
|
|
||||||
pub fn get_event_title(e: &mut RhaiEvent) -> String {
|
|
||||||
e.title.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_description")]
|
|
||||||
pub fn get_event_description(e: &mut RhaiEvent) -> Option<String> {
|
|
||||||
e.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_start_time")]
|
|
||||||
pub fn get_event_start_time(e: &mut RhaiEvent) -> i64 {
|
|
||||||
e.start_time
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_end_time")]
|
|
||||||
pub fn get_event_end_time(e: &mut RhaiEvent) -> i64 {
|
|
||||||
e.end_time
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_attendees")]
|
|
||||||
pub fn get_event_attendees(e: &mut RhaiEvent) -> Array {
|
|
||||||
e.attendees.iter().map(|a| Dynamic::from(a.clone())).collect()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_event_location")]
|
|
||||||
pub fn get_event_location(e: &mut RhaiEvent) -> Option<String> {
|
|
||||||
e.location.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attendee
|
|
||||||
#[rhai_fn(name = "get_attendee_contact_id")]
|
|
||||||
pub fn get_attendee_contact_id(a: &mut RhaiAttendee) -> i64 {
|
|
||||||
a.contact_id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_attendee_status")]
|
|
||||||
pub fn get_attendee_status(a: &mut RhaiAttendee) -> String {
|
|
||||||
format!("{:?}", a.status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_calendar_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_calendar_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_calendar",
|
|
||||||
resource_type_str: "Calendar",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Calendar
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_calendar",
|
|
||||||
resource_type_str: "Calendar",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Calendar
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_calendar",
|
|
||||||
resource_type_str: "Calendar",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Calendar
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_event",
|
|
||||||
resource_type_str: "Event",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Event
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_event",
|
|
||||||
resource_type_str: "Event",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Event
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_event",
|
|
||||||
resource_type_str: "Event",
|
|
||||||
rhai_return_rust_type: heromodels::models::calendar::Event
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,155 +1,412 @@
|
|||||||
use crate::db::Db;
|
use crate::db::Db;
|
||||||
use rhailib_macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn, register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
use rhai::plugin::*;
|
||||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
|
use rhai::{Array, CustomType, Dynamic, Engine, EvalAltResult, INT, Module, Position};
|
||||||
use std::collections::HashMap;
|
use std::mem;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::models::circle::Circle;
|
use super::circle::{Circle, ThemeData};
|
||||||
type RhaiCircle = Circle;
|
type RhaiCircle = Circle;
|
||||||
use crate::db::hero::OurDB;
|
type RhaiThemeData = ThemeData;
|
||||||
|
|
||||||
use crate::db::Collection;
|
use crate::db::Collection;
|
||||||
use crate::models::circle::ThemeData;
|
use crate::db::hero::OurDB;
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde_json;
|
||||||
|
|
||||||
|
/// Registers a `.json()` method for any type `T` that implements the required traits.
|
||||||
|
fn register_json_method<T>(engine: &mut Engine)
|
||||||
|
where
|
||||||
|
T: CustomType + Clone + Serialize,
|
||||||
|
{
|
||||||
|
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
|
||||||
|
serde_json::to_string(obj).map_err(|e| e.to_string().into())
|
||||||
|
};
|
||||||
|
engine.build_type::<T>().register_fn("json", to_json_fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper to convert i64 from Rhai to u32 for IDs
|
||||||
|
fn id_from_i64_to_u32(id_i64: i64) -> Result<u32, Box<EvalAltResult>> {
|
||||||
|
u32::try_from(id_i64).map_err(|_| {
|
||||||
|
Box::new(EvalAltResult::ErrorArithmetic(
|
||||||
|
format!("Failed to convert ID '{}' to u32", id_i64).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[export_module]
|
||||||
|
mod rhai_theme_data_module {
|
||||||
|
#[rhai_fn(name = "new_theme_data")]
|
||||||
|
pub fn new_theme_data() -> RhaiThemeData {
|
||||||
|
ThemeData::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Setters for ThemeData ---
|
||||||
|
#[rhai_fn(name = "primary_color", return_raw, global, pure)]
|
||||||
|
pub fn set_primary_color(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
color: String,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.primary_color = color;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "background_color", return_raw, global, pure)]
|
||||||
|
pub fn set_background_color(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
color: String,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.background_color = color;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "background_pattern", return_raw, global, pure)]
|
||||||
|
pub fn set_background_pattern(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
pattern: String,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.background_pattern = pattern;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "logo_symbol", return_raw, global, pure)]
|
||||||
|
pub fn set_logo_symbol(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
symbol: String,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.logo_symbol = symbol;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "logo_url", return_raw, global, pure)]
|
||||||
|
pub fn set_logo_url(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
url: String,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.logo_url = url;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "nav_dashboard_visible", return_raw, global, pure)]
|
||||||
|
pub fn set_nav_dashboard_visible(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
visible: bool,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.nav_dashboard_visible = visible;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "nav_timeline_visible", return_raw, global, pure)]
|
||||||
|
pub fn set_nav_timeline_visible(
|
||||||
|
theme: &mut RhaiThemeData,
|
||||||
|
visible: bool,
|
||||||
|
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
|
||||||
|
let mut owned_theme = mem::take(theme);
|
||||||
|
owned_theme.nav_timeline_visible = visible;
|
||||||
|
*theme = owned_theme;
|
||||||
|
Ok(theme.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Getters for ThemeData ---
|
||||||
|
#[rhai_fn(name = "get_primary_color", pure)]
|
||||||
|
pub fn get_primary_color(theme: &mut RhaiThemeData) -> String {
|
||||||
|
theme.primary_color.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_background_color", pure)]
|
||||||
|
pub fn get_background_color(theme: &mut RhaiThemeData) -> String {
|
||||||
|
theme.background_color.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_background_pattern", pure)]
|
||||||
|
pub fn get_background_pattern(theme: &mut RhaiThemeData) -> String {
|
||||||
|
theme.background_pattern.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_logo_symbol", pure)]
|
||||||
|
pub fn get_logo_symbol(theme: &mut RhaiThemeData) -> String {
|
||||||
|
theme.logo_symbol.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_logo_url", pure)]
|
||||||
|
pub fn get_logo_url(theme: &mut RhaiThemeData) -> String {
|
||||||
|
theme.logo_url.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_nav_dashboard_visible", pure)]
|
||||||
|
pub fn get_nav_dashboard_visible(theme: &mut RhaiThemeData) -> bool {
|
||||||
|
theme.nav_dashboard_visible
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rhai_fn(name = "get_nav_timeline_visible", pure)]
|
||||||
|
pub fn get_nav_timeline_visible(theme: &mut RhaiThemeData) -> bool {
|
||||||
|
theme.nav_timeline_visible
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[export_module]
|
#[export_module]
|
||||||
mod rhai_circle_module {
|
mod rhai_circle_module {
|
||||||
use super::RhaiCircle;
|
// --- Circle Functions ---
|
||||||
|
#[rhai_fn(name = "new_circle")]
|
||||||
// this one configures the users own circle
|
pub fn new_circle() -> RhaiCircle {
|
||||||
#[rhai_fn(name = "configure", return_raw)]
|
Circle::new()
|
||||||
pub fn configure() -> Result<RhaiCircle, Box<EvalAltResult>> {
|
|
||||||
Ok(Circle::new())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "new_circle", return_raw)]
|
/// Sets the circle title
|
||||||
pub fn new_circle() -> Result<RhaiCircle, Box<EvalAltResult>> {
|
#[rhai_fn(name = "title", return_raw, global, pure)]
|
||||||
Ok(Circle::new())
|
pub fn circle_title(
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "set_title", return_raw)]
|
|
||||||
pub fn set_title(
|
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
title: String,
|
title: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.title(title);
|
*circle = owned_circle.title(title);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "set_ws_url", return_raw)]
|
/// Sets the circle ws_url
|
||||||
pub fn set_ws_url(
|
#[rhai_fn(name = "ws_url", return_raw, global, pure)]
|
||||||
|
pub fn circle_ws_url(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
ws_url: String,
|
ws_url: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.ws_url(ws_url);
|
*circle = owned_circle.ws_url(ws_url);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "set_description", return_raw)]
|
/// Sets the circle description
|
||||||
pub fn set_description(
|
#[rhai_fn(name = "description", return_raw, global, pure)]
|
||||||
|
pub fn circle_description(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
description: String,
|
description: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.description(description);
|
*circle = owned_circle.description(description);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "set_logo", return_raw)]
|
/// Sets the circle logo
|
||||||
pub fn set_logo(
|
#[rhai_fn(name = "logo", return_raw, global, pure)]
|
||||||
|
pub fn circle_logo(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
logo: String,
|
logo: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.logo(logo);
|
*circle = owned_circle.logo(logo);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "set_theme", return_raw)]
|
/// Sets the circle theme
|
||||||
pub fn set_theme(
|
#[rhai_fn(name = "theme", return_raw, global, pure)]
|
||||||
|
pub fn circle_theme(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
theme: ThemeData,
|
theme: RhaiThemeData,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.theme(theme);
|
*circle = owned_circle.theme(theme);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "add_circle", return_raw)]
|
/// Adds an attendee to the circle
|
||||||
pub fn add_circle(
|
#[rhai_fn(name = "add_circle", return_raw, global, pure)]
|
||||||
|
pub fn circle_add_circle(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
new_circle: String,
|
added_circle: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.add_circle(new_circle);
|
*circle = owned_circle.add_circle(added_circle);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rhai_fn(name = "add_member", return_raw)]
|
/// Adds an attendee to the circle
|
||||||
pub fn add_member(
|
#[rhai_fn(name = "add_member", return_raw, global, pure)]
|
||||||
|
pub fn circle_add_member(
|
||||||
circle: &mut RhaiCircle,
|
circle: &mut RhaiCircle,
|
||||||
member: String,
|
added_member: String,
|
||||||
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
) -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
let owned = std::mem::take(circle);
|
let owned_circle = mem::take(circle);
|
||||||
*circle = owned.add_member(member);
|
*circle = owned_circle.add_member(added_member);
|
||||||
Ok(circle.clone())
|
Ok(circle.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Getters ---
|
// Circle Getters
|
||||||
#[rhai_fn(name = "get_id")]
|
#[rhai_fn(name = "get_id", pure)]
|
||||||
pub fn get_id(c: &mut RhaiCircle) -> i64 {
|
pub fn get_circle_id(circle: &mut RhaiCircle) -> i64 {
|
||||||
c.base_data.id as i64
|
circle.base_data.id as i64
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_title")]
|
#[rhai_fn(name = "get_created_at", pure)]
|
||||||
pub fn get_title(c: &mut RhaiCircle) -> String {
|
pub fn get_circle_created_at(circle: &mut RhaiCircle) -> i64 {
|
||||||
c.title.clone()
|
circle.base_data.created_at
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_ws_url")]
|
#[rhai_fn(name = "get_modified_at", pure)]
|
||||||
pub fn get_ws_url(c: &mut RhaiCircle) -> String {
|
pub fn get_circle_modified_at(circle: &mut RhaiCircle) -> i64 {
|
||||||
c.ws_url.clone()
|
circle.base_data.modified_at
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_description")]
|
|
||||||
pub fn get_description(c: &mut RhaiCircle) -> Option<String> {
|
#[rhai_fn(name = "get_title", pure)]
|
||||||
c.description.clone()
|
pub fn get_circle_title(circle: &mut RhaiCircle) -> String {
|
||||||
|
circle.title.clone()
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_logo")]
|
#[rhai_fn(name = "get_description", pure)]
|
||||||
pub fn get_logo(c: &mut RhaiCircle) -> Option<String> {
|
pub fn get_circle_description(circle: &mut RhaiCircle) -> Option<String> {
|
||||||
c.logo.clone()
|
circle.description.clone()
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_circles")]
|
#[rhai_fn(name = "get_circles", pure)]
|
||||||
pub fn get_circles(c: &mut RhaiCircle) -> Array {
|
pub fn get_circle_circles(circle: &mut RhaiCircle) -> Vec<String> {
|
||||||
c.circles.iter().map(|s| Dynamic::from(s.clone())).collect()
|
circle.circles.clone()
|
||||||
}
|
}
|
||||||
#[rhai_fn(name = "get_members")]
|
#[rhai_fn(name = "get_ws_url", pure)]
|
||||||
pub fn get_members(c: &mut RhaiCircle) -> Array {
|
pub fn get_circle_ws_url(circle: &mut RhaiCircle) -> String {
|
||||||
c.members.iter().map(|s| Dynamic::from(s.clone())).collect()
|
circle.ws_url.clone()
|
||||||
|
}
|
||||||
|
#[rhai_fn(name = "get_logo", pure)]
|
||||||
|
pub fn get_circle_logo(circle: &mut RhaiCircle) -> Option<String> {
|
||||||
|
circle.logo.clone()
|
||||||
|
}
|
||||||
|
#[rhai_fn(name = "get_theme", pure)]
|
||||||
|
pub fn get_circle_theme(circle: &mut RhaiCircle) -> RhaiThemeData {
|
||||||
|
circle.theme.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn register_circle_rhai_module(engine: &mut Engine) {
|
pub fn register_circle_rhai_module(engine: &mut Engine, db: Arc<OurDB>) {
|
||||||
let mut module = exported_module!(rhai_circle_module);
|
engine.build_type::<RhaiCircle>();
|
||||||
|
engine.build_type::<RhaiThemeData>();
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
let mut db_module = Module::new();
|
||||||
module: &mut module,
|
let circle_module = exported_module!(rhai_circle_module);
|
||||||
rhai_fn_name: "save_circle",
|
let theme_data_module = exported_module!(rhai_theme_data_module);
|
||||||
resource_type_str: "Circle",
|
|
||||||
rhai_return_rust_type: crate::models::circle::Circle
|
engine.register_global_module(circle_module.into());
|
||||||
);
|
engine.register_global_module(theme_data_module.into());
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
register_json_method::<Circle>(engine);
|
||||||
rhai_fn_name: "get_circle",
|
register_json_method::<ThemeData>(engine);
|
||||||
resource_type_str: "Circle",
|
|
||||||
rhai_return_rust_type: crate::models::circle::Circle
|
// Manually register database functions as they need to capture 'db'
|
||||||
);
|
let db_clone_set_circle = db.clone();
|
||||||
register_authorized_delete_by_id_fn!(
|
db_module.set_native_fn(
|
||||||
module: &mut module,
|
"save_circle",
|
||||||
rhai_fn_name: "delete_circle",
|
move |circle: Circle| -> Result<Circle, Box<EvalAltResult>> {
|
||||||
resource_type_str: "Circle",
|
let result = db_clone_set_circle.set(&circle).map_err(|e| {
|
||||||
rhai_return_rust_type: crate::models::circle::Circle
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("DB Error set_circle: {}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
Ok(result.1)
|
||||||
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
let db_clone_delete_circle = db.clone();
|
||||||
|
db_module.set_native_fn(
|
||||||
|
"delete_circle",
|
||||||
|
move |circle: Circle| -> Result<(), Box<EvalAltResult>> {
|
||||||
|
let result = db_clone_delete_circle
|
||||||
|
.collection::<Circle>()
|
||||||
|
.expect("can open circle collection")
|
||||||
|
.delete_by_id(circle.base_data.id)
|
||||||
|
.expect("can delete circle");
|
||||||
|
Ok(result)
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let db_clone_get_circle = db.clone();
|
||||||
|
db_module.set_native_fn(
|
||||||
|
"get_circle",
|
||||||
|
move || -> Result<Circle, Box<EvalAltResult>> {
|
||||||
|
let all_circles: Vec<Circle> = db_clone_get_circle.get_all().map_err(|e| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("DB Error get_circle: {}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if let Some(first_circle) = all_circles.first() {
|
||||||
|
Ok(first_circle.clone())
|
||||||
|
} else {
|
||||||
|
Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
"Circle not found".into(),
|
||||||
|
Position::NONE,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// --- Collection DB Functions ---
|
||||||
|
let db_clone = db.clone();
|
||||||
|
db_module.set_native_fn(
|
||||||
|
"save_circle",
|
||||||
|
move |circle: RhaiCircle| -> Result<RhaiCircle, Box<EvalAltResult>> {
|
||||||
|
let result = db_clone.set(&circle).map_err(|e| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("DB Error: {:?}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
Ok(result.1)
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let db_clone_get_circle_by_id = db.clone();
|
||||||
|
db_module.set_native_fn(
|
||||||
|
"get_circle_by_id",
|
||||||
|
move |id_i64: INT| -> Result<Circle, Box<EvalAltResult>> {
|
||||||
|
let id_u32 = id_from_i64_to_u32(id_i64)?;
|
||||||
|
db_clone_get_circle_by_id
|
||||||
|
.get_by_id(id_u32)
|
||||||
|
.map_err(|e| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("DB Error get_circle_by_id: {}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("Circle with ID {} not found", id_u32).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let db_clone_list_circles = db.clone();
|
||||||
|
db_module.set_native_fn(
|
||||||
|
"list_circles",
|
||||||
|
move || -> Result<Dynamic, Box<EvalAltResult>> {
|
||||||
|
let collection = db_clone_list_circles.collection::<Circle>().map_err(|e| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("Failed to get circle collection: {:?}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let circles = collection.get_all().map_err(|e| {
|
||||||
|
Box::new(EvalAltResult::ErrorRuntime(
|
||||||
|
format!("Failed to get all circles: {:?}", e).into(),
|
||||||
|
Position::NONE,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let mut array = Array::new();
|
||||||
|
for circle in circles {
|
||||||
|
array.push(Dynamic::from(circle));
|
||||||
|
}
|
||||||
|
Ok(Dynamic::from(array))
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
engine.register_global_module(db_module.into());
|
||||||
|
|
||||||
|
println!("Successfully registered circle Rhai module using export_module approach.");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,232 +0,0 @@
|
|||||||
use crate::db::Db;
|
|
||||||
use rhailib_macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::models::contact::{Contact, Group};
|
|
||||||
type RhaiContact = Contact;
|
|
||||||
type RhaiGroup = Group;
|
|
||||||
use crate::db::hero::OurDB;
|
|
||||||
use crate::db::Collection;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_contact_module {
|
|
||||||
use super::{RhaiContact, RhaiGroup};
|
|
||||||
|
|
||||||
// --- Contact Builder ---
|
|
||||||
#[rhai_fn(name = "new_contact", return_raw)]
|
|
||||||
pub fn new_contact() -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
Ok(Contact::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_contact_name(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.name(name);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "description", return_raw)]
|
|
||||||
pub fn set_contact_description(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.description(description);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "address", return_raw)]
|
|
||||||
pub fn set_contact_address(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
address: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.address(address);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "phone", return_raw)]
|
|
||||||
pub fn set_contact_phone(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
phone: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.phone(phone);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "email", return_raw)]
|
|
||||||
pub fn set_contact_email(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
email: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.email(email);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "notes", return_raw)]
|
|
||||||
pub fn set_contact_notes(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
notes: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.notes(notes);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "circle", return_raw)]
|
|
||||||
pub fn set_contact_circle(
|
|
||||||
contact: &mut RhaiContact,
|
|
||||||
circle: String,
|
|
||||||
) -> Result<RhaiContact, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(contact);
|
|
||||||
*contact = owned.circle(circle);
|
|
||||||
Ok(contact.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Group Builder ---
|
|
||||||
#[rhai_fn(name = "new_group", return_raw)]
|
|
||||||
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
Ok(Group::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "group_name", return_raw)]
|
|
||||||
pub fn set_group_name(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.name(name);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "group_description", return_raw)]
|
|
||||||
pub fn set_group_description(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.description(description);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "add_contact", return_raw)]
|
|
||||||
pub fn add_group_contact(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
contact_id: i64,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.add_contact(contact_id as u32);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Getters ---
|
|
||||||
// Contact
|
|
||||||
#[rhai_fn(name = "get_contact_id")]
|
|
||||||
pub fn get_contact_id(c: &mut RhaiContact) -> i64 {
|
|
||||||
c.base.id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_name")]
|
|
||||||
pub fn get_contact_name(c: &mut RhaiContact) -> String {
|
|
||||||
c.name.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_description")]
|
|
||||||
pub fn get_contact_description(c: &mut RhaiContact) -> Option<String> {
|
|
||||||
c.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_address")]
|
|
||||||
pub fn get_contact_address(c: &mut RhaiContact) -> String {
|
|
||||||
c.address.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_phone")]
|
|
||||||
pub fn get_contact_phone(c: &mut RhaiContact) -> String {
|
|
||||||
c.phone.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_email")]
|
|
||||||
pub fn get_contact_email(c: &mut RhaiContact) -> String {
|
|
||||||
c.email.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_notes")]
|
|
||||||
pub fn get_contact_notes(c: &mut RhaiContact) -> Option<String> {
|
|
||||||
c.notes.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_contact_circle")]
|
|
||||||
pub fn get_contact_circle(c: &mut RhaiContact) -> String {
|
|
||||||
c.circle.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group
|
|
||||||
#[rhai_fn(name = "get_group_id")]
|
|
||||||
pub fn get_group_id(g: &mut RhaiGroup) -> i64 {
|
|
||||||
g.base.id as i64
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_group_name")]
|
|
||||||
pub fn get_group_name(g: &mut RhaiGroup) -> String {
|
|
||||||
g.name.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_group_description")]
|
|
||||||
pub fn get_group_description(g: &mut RhaiGroup) -> Option<String> {
|
|
||||||
g.description.clone()
|
|
||||||
}
|
|
||||||
#[rhai_fn(name = "get_group_contacts")]
|
|
||||||
pub fn get_group_contacts(g: &mut RhaiGroup) -> Array {
|
|
||||||
g.contacts
|
|
||||||
.iter()
|
|
||||||
.map(|id| Dynamic::from(*id as i64))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_contact_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_contact_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_contact",
|
|
||||||
resource_type_str: "Contact",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Contact
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_contact",
|
|
||||||
resource_type_str: "Contact",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Contact
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_contact",
|
|
||||||
resource_type_str: "Contact",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Contact
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_group",
|
|
||||||
resource_type_str: "Group",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Group
|
|
||||||
);
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_group",
|
|
||||||
resource_type_str: "Group",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Group
|
|
||||||
);
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_group",
|
|
||||||
resource_type_str: "Group",
|
|
||||||
rhai_return_rust_type: heromodels::models::contact::Group
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,86 +0,0 @@
|
|||||||
use heromodels::db::Db;
|
|
||||||
use macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Engine, EvalAltResult, Module, INT};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use heromodels::models::core::comment::Comment;
|
|
||||||
type RhaiComment = Comment;
|
|
||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::Collection;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_comment_module {
|
|
||||||
use super::{RhaiComment, INT};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_comment", return_raw)]
|
|
||||||
pub fn new_comment() -> Result<RhaiComment, Box<EvalAltResult>> {
|
|
||||||
Ok(Comment::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "user_id", return_raw)]
|
|
||||||
pub fn set_user_id(
|
|
||||||
comment: &mut RhaiComment,
|
|
||||||
user_id: i64,
|
|
||||||
) -> Result<RhaiComment, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(comment);
|
|
||||||
*comment = owned.user_id(user_id as u32);
|
|
||||||
Ok(comment.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "content", return_raw)]
|
|
||||||
pub fn set_content(
|
|
||||||
comment: &mut RhaiComment,
|
|
||||||
content: String,
|
|
||||||
) -> Result<RhaiComment, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(comment);
|
|
||||||
*comment = owned.content(content);
|
|
||||||
Ok(comment.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_comment_id")]
|
|
||||||
pub fn get_comment_id(comment: &mut RhaiComment) -> i64 {
|
|
||||||
comment.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_comment_user_id")]
|
|
||||||
pub fn get_comment_user_id(comment: &mut RhaiComment) -> i64 {
|
|
||||||
comment.user_id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_comment_content")]
|
|
||||||
pub fn get_comment_content(comment: &mut RhaiComment) -> String {
|
|
||||||
comment.content().clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_comment_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_comment_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_comment",
|
|
||||||
resource_type_str: "Comment",
|
|
||||||
rhai_return_rust_type: heromodels::models::core::comment::Comment
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_comment",
|
|
||||||
resource_type_str: "Comment",
|
|
||||||
rhai_return_rust_type: heromodels::models::core::comment::Comment
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_delete_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "delete_comment",
|
|
||||||
resource_type_str: "Comment",
|
|
||||||
rhai_return_rust_type: heromodels::models::core::comment::Comment
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
use heromodels::db::Db;
|
|
||||||
use macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Array, Engine, EvalAltResult, Module, INT};
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::Collection;
|
|
||||||
use heromodels::models::finance::account::Account;
|
|
||||||
|
|
||||||
type RhaiAccount = Account;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_account_module {
|
|
||||||
use super::{Array, RhaiAccount, INT};
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_account", return_raw)]
|
|
||||||
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
Ok(Account::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_name(
|
|
||||||
account: &mut RhaiAccount,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(account);
|
|
||||||
*account = owned.name(name);
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "user_id", return_raw)]
|
|
||||||
pub fn set_user_id(
|
|
||||||
account: &mut RhaiAccount,
|
|
||||||
user_id: INT,
|
|
||||||
) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(account);
|
|
||||||
*account = owned.user_id(user_id as u32);
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_account_id")]
|
|
||||||
pub fn get_account_id(account: &mut RhaiAccount) -> i64 {
|
|
||||||
account.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_account_name")]
|
|
||||||
pub fn get_account_name(account: &mut RhaiAccount) -> String {
|
|
||||||
account.name().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_account_user_id")]
|
|
||||||
pub fn get_account_user_id(account: &mut RhaiAccount) -> INT {
|
|
||||||
account.user_id() as INT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_account_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_account_module);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_account",
|
|
||||||
resource_type_str: "Account",
|
|
||||||
rhai_return_rust_type: heromodels::models::finance::account::Account
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_account",
|
|
||||||
resource_type_str: "Account",
|
|
||||||
rhai_return_rust_type: heromodels::models::finance::account::Account
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
pub mod node;
|
|
||||||
|
|
||||||
pub use node::{
|
|
||||||
Node,
|
|
||||||
DeviceInfo,
|
|
||||||
StorageDevice,
|
|
||||||
MemoryDevice,
|
|
||||||
CPUDevice,
|
|
||||||
GPUDevice,
|
|
||||||
NetworkDevice,
|
|
||||||
NodeCapacity,
|
|
||||||
ComputeSlice,
|
|
||||||
StorageSlice,
|
|
||||||
PricingPolicy,
|
|
||||||
SLAPolicy,
|
|
||||||
};
|
|
||||||
@@ -1,265 +0,0 @@
|
|||||||
use heromodels_core::BaseModelData;
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use rhai::CustomType;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Storage device information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct StorageDevice {
|
|
||||||
/// can be used in node
|
|
||||||
pub id: String,
|
|
||||||
/// Size of the storage device in gigabytes
|
|
||||||
pub size_gb: f64,
|
|
||||||
/// Description of the storage device
|
|
||||||
pub description: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Memory device information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct MemoryDevice {
|
|
||||||
/// can be used in node
|
|
||||||
pub id: String,
|
|
||||||
/// Size of the memory device in gigabytes
|
|
||||||
pub size_gb: f64,
|
|
||||||
/// Description of the memory device
|
|
||||||
pub description: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// CPU device information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct CPUDevice {
|
|
||||||
/// can be used in node
|
|
||||||
pub id: String,
|
|
||||||
/// Number of CPU cores
|
|
||||||
pub cores: i32,
|
|
||||||
/// Passmark score
|
|
||||||
pub passmark: i32,
|
|
||||||
/// Description of the CPU
|
|
||||||
pub description: String,
|
|
||||||
/// Brand of the CPU
|
|
||||||
pub cpu_brand: String,
|
|
||||||
/// Version of the CPU
|
|
||||||
pub cpu_version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// GPU device information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct GPUDevice {
|
|
||||||
/// can be used in node
|
|
||||||
pub id: String,
|
|
||||||
/// Number of GPU cores
|
|
||||||
pub cores: i32,
|
|
||||||
/// Size of the GPU memory in gigabytes
|
|
||||||
pub memory_gb: f64,
|
|
||||||
/// Description of the GPU
|
|
||||||
pub description: String,
|
|
||||||
pub gpu_brand: String,
|
|
||||||
pub gpu_version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Network device information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct NetworkDevice {
|
|
||||||
/// can be used in node
|
|
||||||
pub id: String,
|
|
||||||
/// Network speed in Mbps
|
|
||||||
pub speed_mbps: i32,
|
|
||||||
/// Description of the network device
|
|
||||||
pub description: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Aggregated device info for a node
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct DeviceInfo {
|
|
||||||
pub vendor: String,
|
|
||||||
pub storage: Vec<StorageDevice>,
|
|
||||||
pub memory: Vec<MemoryDevice>,
|
|
||||||
pub cpu: Vec<CPUDevice>,
|
|
||||||
pub gpu: Vec<GPUDevice>,
|
|
||||||
pub network: Vec<NetworkDevice>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// NodeCapacity represents the hardware capacity details of a node.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct NodeCapacity {
|
|
||||||
/// Total storage in gigabytes
|
|
||||||
pub storage_gb: f64,
|
|
||||||
/// Total memory in gigabytes
|
|
||||||
pub mem_gb: f64,
|
|
||||||
/// Total GPU memory in gigabytes
|
|
||||||
pub mem_gb_gpu: f64,
|
|
||||||
/// Passmark score for the node
|
|
||||||
pub passmark: i32,
|
|
||||||
/// Total virtual cores
|
|
||||||
pub vcores: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pricing policy for slices (minimal version until full spec available)
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct PricingPolicy {
|
|
||||||
/// Human friendly policy name (e.g. "fixed", "market")
|
|
||||||
pub name: String,
|
|
||||||
/// Optional free-form details as JSON-encoded string
|
|
||||||
pub details: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SLA policy for slices (minimal version until full spec available)
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct SLAPolicy {
|
|
||||||
/// Uptime in percentage (0..100)
|
|
||||||
pub uptime: f32,
|
|
||||||
/// Max response time in ms
|
|
||||||
pub max_response_time_ms: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute slice (typically represents a base unit of compute)
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct ComputeSlice {
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
/// the node in the grid, there is an object describing the node
|
|
||||||
#[index]
|
|
||||||
pub nodeid: u32,
|
|
||||||
/// the id of the slice in the node
|
|
||||||
#[index]
|
|
||||||
pub id: i32,
|
|
||||||
pub mem_gb: f64,
|
|
||||||
pub storage_gb: f64,
|
|
||||||
pub passmark: i32,
|
|
||||||
pub vcores: i32,
|
|
||||||
pub cpu_oversubscription: i32,
|
|
||||||
pub storage_oversubscription: i32,
|
|
||||||
/// Min/max allowed price range for validation
|
|
||||||
#[serde(default)]
|
|
||||||
pub price_range: Vec<f64>,
|
|
||||||
/// nr of GPU's see node to know what GPU's are
|
|
||||||
pub gpus: u8,
|
|
||||||
/// price per slice (even if the grouped one)
|
|
||||||
pub price_cc: f64,
|
|
||||||
pub pricing_policy: PricingPolicy,
|
|
||||||
pub sla_policy: SLAPolicy,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ComputeSlice {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
base_data: BaseModelData::new(),
|
|
||||||
nodeid: 0,
|
|
||||||
id: 0,
|
|
||||||
mem_gb: 0.0,
|
|
||||||
storage_gb: 0.0,
|
|
||||||
passmark: 0,
|
|
||||||
vcores: 0,
|
|
||||||
cpu_oversubscription: 0,
|
|
||||||
storage_oversubscription: 0,
|
|
||||||
price_range: vec![0.0, 0.0],
|
|
||||||
gpus: 0,
|
|
||||||
price_cc: 0.0,
|
|
||||||
pricing_policy: PricingPolicy::default(),
|
|
||||||
sla_policy: SLAPolicy::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
|
|
||||||
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
|
|
||||||
pub fn mem_gb(mut self, v: f64) -> Self { self.mem_gb = v; self }
|
|
||||||
pub fn storage_gb(mut self, v: f64) -> Self { self.storage_gb = v; self }
|
|
||||||
pub fn passmark(mut self, v: i32) -> Self { self.passmark = v; self }
|
|
||||||
pub fn vcores(mut self, v: i32) -> Self { self.vcores = v; self }
|
|
||||||
pub fn cpu_oversubscription(mut self, v: i32) -> Self { self.cpu_oversubscription = v; self }
|
|
||||||
pub fn storage_oversubscription(mut self, v: i32) -> Self { self.storage_oversubscription = v; self }
|
|
||||||
pub fn price_range(mut self, min_max: Vec<f64>) -> Self { self.price_range = min_max; self }
|
|
||||||
pub fn gpus(mut self, v: u8) -> Self { self.gpus = v; self }
|
|
||||||
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
|
|
||||||
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
|
|
||||||
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Storage slice (typically 1GB of storage)
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct StorageSlice {
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
/// the node in the grid
|
|
||||||
#[index]
|
|
||||||
pub nodeid: u32,
|
|
||||||
/// the id of the slice in the node, are tracked in the node itself
|
|
||||||
#[index]
|
|
||||||
pub id: i32,
|
|
||||||
/// price per slice (even if the grouped one)
|
|
||||||
pub price_cc: f64,
|
|
||||||
pub pricing_policy: PricingPolicy,
|
|
||||||
pub sla_policy: SLAPolicy,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StorageSlice {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
base_data: BaseModelData::new(),
|
|
||||||
nodeid: 0,
|
|
||||||
id: 0,
|
|
||||||
price_cc: 0.0,
|
|
||||||
pricing_policy: PricingPolicy::default(),
|
|
||||||
sla_policy: SLAPolicy::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
|
|
||||||
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
|
|
||||||
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
|
|
||||||
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
|
|
||||||
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Grid4 Node model
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
|
|
||||||
pub struct Node {
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
/// Link to node group
|
|
||||||
#[index]
|
|
||||||
pub nodegroupid: i32,
|
|
||||||
/// Uptime percentage 0..100
|
|
||||||
pub uptime: i32,
|
|
||||||
pub computeslices: Vec<ComputeSlice>,
|
|
||||||
pub storageslices: Vec<StorageSlice>,
|
|
||||||
pub devices: DeviceInfo,
|
|
||||||
/// 2 letter code
|
|
||||||
#[index]
|
|
||||||
pub country: String,
|
|
||||||
/// Hardware capacity details
|
|
||||||
pub capacity: NodeCapacity,
|
|
||||||
/// lets keep it simple and compatible
|
|
||||||
pub provisiontime: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Node {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
base_data: BaseModelData::new(),
|
|
||||||
nodegroupid: 0,
|
|
||||||
uptime: 0,
|
|
||||||
computeslices: Vec::new(),
|
|
||||||
storageslices: Vec::new(),
|
|
||||||
devices: DeviceInfo::default(),
|
|
||||||
country: String::new(),
|
|
||||||
capacity: NodeCapacity::default(),
|
|
||||||
provisiontime: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn nodegroupid(mut self, v: i32) -> Self { self.nodegroupid = v; self }
|
|
||||||
pub fn uptime(mut self, v: i32) -> Self { self.uptime = v; self }
|
|
||||||
pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { self.computeslices.push(s); self }
|
|
||||||
pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { self.storageslices.push(s); self }
|
|
||||||
pub fn devices(mut self, d: DeviceInfo) -> Self { self.devices = d; self }
|
|
||||||
pub fn country(mut self, c: impl ToString) -> Self { self.country = c.to_string(); self }
|
|
||||||
pub fn capacity(mut self, c: NodeCapacity) -> Self { self.capacity = c; self }
|
|
||||||
pub fn provisiontime(mut self, t: u32) -> Self { self.provisiontime = t; self }
|
|
||||||
|
|
||||||
/// Placeholder for capacity recalculation out of the devices on the Node
|
|
||||||
pub fn recalc_capacity(mut self) -> Self {
|
|
||||||
// TODO: calculate NodeCapacity out of the devices on the Node
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,194 +0,0 @@
|
|||||||
|
|
||||||
# Grid4 Data Model
|
|
||||||
|
|
||||||
This module defines data models for nodes, groups, and slices in a cloud/grid infrastructure. Each root object is marked with `@[heap]` and can be indexed for efficient querying.
|
|
||||||
|
|
||||||
## Root Objects Overview
|
|
||||||
|
|
||||||
| Object | Description | Index Fields |
|
|
||||||
| ----------- | --------------------------------------------- | ------------------------------ |
|
|
||||||
| `Node` | Represents a single node in the grid | `id`, `nodegroupid`, `country` |
|
|
||||||
| `NodeGroup` | Represents a group of nodes owned by a farmer | `id`, `farmerid` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Node
|
|
||||||
|
|
||||||
Represents a single node in the grid with slices, devices, and capacity.
|
|
||||||
|
|
||||||
| Field | Type | Description | Indexed |
|
|
||||||
| --------------- | ---------------- | -------------------------------------------- | ------- |
|
|
||||||
| `id` | `int` | Unique node ID | ✅ |
|
|
||||||
| `nodegroupid` | `int` | ID of the owning node group | ✅ |
|
|
||||||
| `uptime` | `int` | Uptime percentage (0-100) | ✅ |
|
|
||||||
| `computeslices` | `[]ComputeSlice` | List of compute slices | ❌ |
|
|
||||||
| `storageslices` | `[]StorageSlice` | List of storage slices | ❌ |
|
|
||||||
| `devices` | `DeviceInfo` | Hardware device info (storage, memory, etc.) | ❌ |
|
|
||||||
| `country` | `string` | 2-letter country code | ✅ |
|
|
||||||
| `capacity` | `NodeCapacity` | Aggregated hardware capacity | ❌ |
|
|
||||||
| `provisiontime` | `u32` | Provisioning time (simple/compatible format) | ✅ |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## NodeGroup
|
|
||||||
|
|
||||||
Represents a group of nodes owned by a farmer, with policies.
|
|
||||||
|
|
||||||
| Field | Type | Description | Indexed |
|
|
||||||
| ------------------------------------- | --------------- | ---------------------------------------------- | ------- |
|
|
||||||
| `id` | `u32` | Unique group ID | ✅ |
|
|
||||||
| `farmerid` | `u32` | Farmer/user ID | ✅ |
|
|
||||||
| `secret` | `string` | Encrypted secret for booting nodes | ❌ |
|
|
||||||
| `description` | `string` | Group description | ❌ |
|
|
||||||
| `slapolicy` | `SLAPolicy` | SLA policy details | ❌ |
|
|
||||||
| `pricingpolicy` | `PricingPolicy` | Pricing policy details | ❌ |
|
|
||||||
| `compute_slice_normalized_pricing_cc` | `f64` | Pricing per 2GB compute slice in cloud credits | ❌ |
|
|
||||||
| `storage_slice_normalized_pricing_cc` | `f64` | Pricing per 1GB storage slice in cloud credits | ❌ |
|
|
||||||
| `reputation` | `int` | Reputation (0-100) | ✅ |
|
|
||||||
| `uptime` | `int` | Uptime (0-100) | ✅ |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## ComputeSlice
|
|
||||||
|
|
||||||
Represents a compute slice (e.g., 1GB memory unit).
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| -------------------------- | --------------- | -------------------------------- |
|
|
||||||
| `nodeid` | `u32` | Owning node ID |
|
|
||||||
| `id` | `int` | Slice ID in node |
|
|
||||||
| `mem_gb` | `f64` | Memory in GB |
|
|
||||||
| `storage_gb` | `f64` | Storage in GB |
|
|
||||||
| `passmark` | `int` | Passmark score |
|
|
||||||
| `vcores` | `int` | Virtual cores |
|
|
||||||
| `cpu_oversubscription` | `int` | CPU oversubscription ratio |
|
|
||||||
| `storage_oversubscription` | `int` | Storage oversubscription ratio |
|
|
||||||
| `price_range` | `[]f64` | Price range [min, max] |
|
|
||||||
| `gpus` | `u8` | Number of GPUs |
|
|
||||||
| `price_cc` | `f64` | Price per slice in cloud credits |
|
|
||||||
| `pricing_policy` | `PricingPolicy` | Pricing policy |
|
|
||||||
| `sla_policy` | `SLAPolicy` | SLA policy |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## StorageSlice
|
|
||||||
|
|
||||||
Represents a 1GB storage slice.
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ---------------- | --------------- | -------------------------------- |
|
|
||||||
| `nodeid` | `u32` | Owning node ID |
|
|
||||||
| `id` | `int` | Slice ID in node |
|
|
||||||
| `price_cc` | `f64` | Price per slice in cloud credits |
|
|
||||||
| `pricing_policy` | `PricingPolicy` | Pricing policy |
|
|
||||||
| `sla_policy` | `SLAPolicy` | SLA policy |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## DeviceInfo
|
|
||||||
|
|
||||||
Hardware device information for a node.
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| --------- | ----------------- | ----------------------- |
|
|
||||||
| `vendor` | `string` | Vendor of the node |
|
|
||||||
| `storage` | `[]StorageDevice` | List of storage devices |
|
|
||||||
| `memory` | `[]MemoryDevice` | List of memory devices |
|
|
||||||
| `cpu` | `[]CPUDevice` | List of CPU devices |
|
|
||||||
| `gpu` | `[]GPUDevice` | List of GPU devices |
|
|
||||||
| `network` | `[]NetworkDevice` | List of network devices |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## StorageDevice
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------- | -------- | --------------------- |
|
|
||||||
| `id` | `string` | Unique ID for device |
|
|
||||||
| `size_gb` | `f64` | Size in GB |
|
|
||||||
| `description` | `string` | Description of device |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## MemoryDevice
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------- | -------- | --------------------- |
|
|
||||||
| `id` | `string` | Unique ID for device |
|
|
||||||
| `size_gb` | `f64` | Size in GB |
|
|
||||||
| `description` | `string` | Description of device |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CPUDevice
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------- | -------- | ------------------------ |
|
|
||||||
| `id` | `string` | Unique ID for device |
|
|
||||||
| `cores` | `int` | Number of CPU cores |
|
|
||||||
| `passmark` | `int` | Passmark benchmark score |
|
|
||||||
| `description` | `string` | Description of device |
|
|
||||||
| `cpu_brand` | `string` | Brand of the CPU |
|
|
||||||
| `cpu_version` | `string` | Version of the CPU |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## GPUDevice
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------- | -------- | --------------------- |
|
|
||||||
| `id` | `string` | Unique ID for device |
|
|
||||||
| `cores` | `int` | Number of GPU cores |
|
|
||||||
| `memory_gb` | `f64` | GPU memory in GB |
|
|
||||||
| `description` | `string` | Description of device |
|
|
||||||
| `gpu_brand` | `string` | Brand of the GPU |
|
|
||||||
| `gpu_version` | `string` | Version of the GPU |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## NetworkDevice
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------- | -------- | --------------------- |
|
|
||||||
| `id` | `string` | Unique ID for device |
|
|
||||||
| `speed_mbps` | `int` | Network speed in Mbps |
|
|
||||||
| `description` | `string` | Description of device |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## NodeCapacity
|
|
||||||
|
|
||||||
Aggregated hardware capacity for a node.
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ------------ | ----- | ---------------------- |
|
|
||||||
| `storage_gb` | `f64` | Total storage in GB |
|
|
||||||
| `mem_gb` | `f64` | Total memory in GB |
|
|
||||||
| `mem_gb_gpu` | `f64` | Total GPU memory in GB |
|
|
||||||
| `passmark` | `int` | Total passmark score |
|
|
||||||
| `vcores` | `int` | Total virtual cores |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## SLAPolicy
|
|
||||||
|
|
||||||
Service Level Agreement policy for slices or node groups.
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| -------------------- | ----- | --------------------------------------- |
|
|
||||||
| `sla_uptime` | `int` | Required uptime % (e.g., 90) |
|
|
||||||
| `sla_bandwidth_mbit` | `int` | Guaranteed bandwidth in Mbps (0 = none) |
|
|
||||||
| `sla_penalty` | `int` | Penalty % if SLA is breached (0-100) |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## PricingPolicy
|
|
||||||
|
|
||||||
Pricing policy for slices or node groups.
|
|
||||||
|
|
||||||
| Field | Type | Description |
|
|
||||||
| ---------------------------- | ------- | --------------------------------------------------------- |
|
|
||||||
| `marketplace_year_discounts` | `[]int` | Discounts for 1Y, 2Y, 3Y prepaid usage (e.g. [30,40,50]) |
|
|
||||||
| `volume_discounts` | `[]int` | Volume discounts based on purchase size (e.g. [10,20,30]) |
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
module datamodel
|
|
||||||
|
|
||||||
// I can bid for infra, and optionally get accepted
|
|
||||||
@[heap]
|
|
||||||
pub struct Bid {
|
|
||||||
pub mut:
|
|
||||||
id u32
|
|
||||||
customer_id u32 // links back to customer for this capacity (user on ledger)
|
|
||||||
compute_slices_nr int // nr of slices I need in 1 machine
|
|
||||||
compute_slice_price f64 // price per 1 GB slice I want to accept
|
|
||||||
storage_slices_nr int
|
|
||||||
storage_slice_price f64 // price per 1 GB storage slice I want to accept
|
|
||||||
storage_slices_nr int
|
|
||||||
status BidStatus
|
|
||||||
obligation bool // if obligation then will be charged and money needs to be in escrow, otherwise its an intent
|
|
||||||
start_date u32 // epoch
|
|
||||||
end_date u32
|
|
||||||
signature_user string // signature as done by a user/consumer to validate their identity and intent
|
|
||||||
billing_period BillingPeriod
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum BidStatus {
|
|
||||||
pending
|
|
||||||
confirmed
|
|
||||||
assigned
|
|
||||||
cancelled
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
pub enum BillingPeriod {
|
|
||||||
hourly
|
|
||||||
monthly
|
|
||||||
yearly
|
|
||||||
biannually
|
|
||||||
triannually
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
module datamodel
|
|
||||||
|
|
||||||
// I can bid for infra, and optionally get accepted
|
|
||||||
@[heap]
|
|
||||||
pub struct Contract {
|
|
||||||
pub mut:
|
|
||||||
id u32
|
|
||||||
customer_id u32 // links back to customer for this capacity (user on ledger)
|
|
||||||
compute_slices []ComputeSliceProvisioned
|
|
||||||
storage_slices []StorageSliceProvisioned
|
|
||||||
compute_slice_price f64 // price per 1 GB agreed upon
|
|
||||||
storage_slice_price f64 // price per 1 GB agreed upon
|
|
||||||
network_slice_price f64 // price per 1 GB agreed upon (transfer)
|
|
||||||
status ContractStatus
|
|
||||||
start_date u32 // epoch
|
|
||||||
end_date u32
|
|
||||||
signature_user string // signature as done by a user/consumer to validate their identity and intent
|
|
||||||
signature_hoster string // signature as done by the hoster
|
|
||||||
billing_period BillingPeriod
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum ConctractStatus {
|
|
||||||
active
|
|
||||||
cancelled
|
|
||||||
error
|
|
||||||
paused
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// typically 1GB of memory, but can be adjusted based based on size of machine
|
|
||||||
pub struct ComputeSliceProvisioned {
|
|
||||||
pub mut:
|
|
||||||
node_id u32
|
|
||||||
id u16 // the id of the slice in the node
|
|
||||||
mem_gb f64
|
|
||||||
storage_gb f64
|
|
||||||
passmark int
|
|
||||||
vcores int
|
|
||||||
cpu_oversubscription int
|
|
||||||
tags string
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1GB of storage
|
|
||||||
pub struct StorageSliceProvisioned {
|
|
||||||
pub mut:
|
|
||||||
node_id u32
|
|
||||||
id u16 // the id of the slice in the node, are tracked in the node itself
|
|
||||||
storage_size_gb int
|
|
||||||
tags string
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
module datamodel
|
|
||||||
|
|
||||||
//ACCESS ONLY TF
|
|
||||||
|
|
||||||
@[heap]
|
|
||||||
pub struct Node {
|
|
||||||
pub mut:
|
|
||||||
id int
|
|
||||||
nodegroupid int
|
|
||||||
uptime int // 0..100
|
|
||||||
computeslices []ComputeSlice
|
|
||||||
storageslices []StorageSlice
|
|
||||||
devices DeviceInfo
|
|
||||||
country string // 2 letter code as specified in lib/data/countries/data/countryInfo.txt, use that library for validation
|
|
||||||
capacity NodeCapacity // Hardware capacity details
|
|
||||||
birthtime u32 // first time node was active
|
|
||||||
pubkey string
|
|
||||||
signature_node string // signature done on node to validate pubkey with privkey
|
|
||||||
signature_farmer string // signature as done by farmers to validate their identity
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DeviceInfo {
|
|
||||||
pub mut:
|
|
||||||
vendor string
|
|
||||||
storage []StorageDevice
|
|
||||||
memory []MemoryDevice
|
|
||||||
cpu []CPUDevice
|
|
||||||
gpu []GPUDevice
|
|
||||||
network []NetworkDevice
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct StorageDevice {
|
|
||||||
pub mut:
|
|
||||||
id string // can be used in node
|
|
||||||
size_gb f64 // Size of the storage device in gigabytes
|
|
||||||
description string // Description of the storage device
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct MemoryDevice {
|
|
||||||
pub mut:
|
|
||||||
id string // can be used in node
|
|
||||||
size_gb f64 // Size of the memory device in gigabytes
|
|
||||||
description string // Description of the memory device
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CPUDevice {
|
|
||||||
pub mut:
|
|
||||||
id string // can be used in node
|
|
||||||
cores int // Number of CPU cores
|
|
||||||
passmark int
|
|
||||||
description string // Description of the CPU
|
|
||||||
cpu_brand string // Brand of the CPU
|
|
||||||
cpu_version string // Version of the CPU
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct GPUDevice {
|
|
||||||
pub mut:
|
|
||||||
id string // can be used in node
|
|
||||||
cores int // Number of GPU cores
|
|
||||||
memory_gb f64 // Size of the GPU memory in gigabytes
|
|
||||||
description string // Description of the GPU
|
|
||||||
gpu_brand string
|
|
||||||
gpu_version string
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct NetworkDevice {
|
|
||||||
pub mut:
|
|
||||||
id string // can be used in node
|
|
||||||
speed_mbps int // Network speed in Mbps
|
|
||||||
description string // Description of the network device
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeCapacity represents the hardware capacity details of a node.
|
|
||||||
pub struct NodeCapacity {
|
|
||||||
pub mut:
|
|
||||||
storage_gb f64 // Total storage in gigabytes
|
|
||||||
mem_gb f64 // Total memory in gigabytes
|
|
||||||
mem_gb_gpu f64 // Total GPU memory in gigabytes
|
|
||||||
passmark int // Passmark score for the node
|
|
||||||
vcores int // Total virtual cores
|
|
||||||
}
|
|
||||||
|
|
||||||
// typically 1GB of memory, but can be adjusted based based on size of machine
|
|
||||||
pub struct ComputeSlice {
|
|
||||||
pub mut:
|
|
||||||
u16 int // the id of the slice in the node
|
|
||||||
mem_gb f64
|
|
||||||
storage_gb f64
|
|
||||||
passmark int
|
|
||||||
vcores int
|
|
||||||
cpu_oversubscription int
|
|
||||||
storage_oversubscription int
|
|
||||||
gpus u8 // nr of GPU's see node to know what GPU's are
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1GB of storage
|
|
||||||
pub struct StorageSlice {
|
|
||||||
pub mut:
|
|
||||||
u16 int // the id of the slice in the node, are tracked in the node itself
|
|
||||||
}
|
|
||||||
|
|
||||||
fn (mut n Node) check() ! {
|
|
||||||
// todo calculate NodeCapacity out of the devices on the Node
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
module datamodel
|
|
||||||
|
|
||||||
// is a root object, is the only obj farmer needs to configure in the UI, this defines how slices will be created
|
|
||||||
@[heap]
|
|
||||||
pub struct NodeGroup {
|
|
||||||
pub mut:
|
|
||||||
id u32
|
|
||||||
farmerid u32 // link back to farmer who owns the nodegroup, is a user?
|
|
||||||
secret string // only visible by farmer, in future encrypted, used to boot a node
|
|
||||||
description string
|
|
||||||
slapolicy SLAPolicy
|
|
||||||
pricingpolicy PricingPolicy
|
|
||||||
compute_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 2GB node slice
|
|
||||||
storage_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 1GB storage slice
|
|
||||||
signature_farmer string // signature as done by farmers to validate that they created this group
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SLAPolicy {
|
|
||||||
pub mut:
|
|
||||||
sla_uptime int // should +90
|
|
||||||
sla_bandwidth_mbit int // minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee
|
|
||||||
sla_penalty int // 0-100, percent of money given back in relation to month if sla breached, e.g. 200 means we return 2 months worth of rev if sla missed
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PricingPolicy {
|
|
||||||
pub mut:
|
|
||||||
marketplace_year_discounts []int = [30, 40, 50] // e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization on all his purchaes then this provider gives 30%, 2Y 40%, ...
|
|
||||||
// volume_discounts []int = [10, 20, 30] // e.g. 10,20,30
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
|
|
||||||
@[heap]
|
|
||||||
pub struct NodeGroupReputation {
|
|
||||||
pub mut:
|
|
||||||
nodegroup_id u32
|
|
||||||
reputation int = 50 // between 0 and 100, earned over time
|
|
||||||
uptime int // between 0 and 100, set by system, farmer has no ability to set this
|
|
||||||
nodes []NodeReputation
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct NodeReputation {
|
|
||||||
pub mut:
|
|
||||||
node_id u32
|
|
||||||
reputation int = 50 // between 0 and 100, earned over time
|
|
||||||
uptime int // between 0 and 100, set by system, farmer has no ability to set this
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,301 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Defines the supported DNS record types
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum NameType {
|
|
||||||
A,
|
|
||||||
AAAA,
|
|
||||||
CNAME,
|
|
||||||
MX,
|
|
||||||
TXT,
|
|
||||||
SRV,
|
|
||||||
PTR,
|
|
||||||
NS,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NameType {
|
|
||||||
fn default() -> Self {
|
|
||||||
NameType::A
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Category of the DNS record
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum NameCat {
|
|
||||||
IPv4,
|
|
||||||
IPv6,
|
|
||||||
Mycelium,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NameCat {
|
|
||||||
fn default() -> Self {
|
|
||||||
NameCat::IPv4
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Status of a DNS zone
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum DNSZoneStatus {
|
|
||||||
Active,
|
|
||||||
Suspended,
|
|
||||||
Archived,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for DNSZoneStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
DNSZoneStatus::Active
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a DNS record configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct DNSRecord {
|
|
||||||
pub subdomain: String,
|
|
||||||
pub record_type: NameType,
|
|
||||||
pub value: String,
|
|
||||||
pub priority: u32,
|
|
||||||
pub ttl: u32,
|
|
||||||
pub is_active: bool,
|
|
||||||
pub cat: NameCat,
|
|
||||||
pub is_wildcard: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DNSRecord {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
subdomain: String::new(),
|
|
||||||
record_type: NameType::default(),
|
|
||||||
value: String::new(),
|
|
||||||
priority: 0,
|
|
||||||
ttl: 3600,
|
|
||||||
is_active: true,
|
|
||||||
cat: NameCat::default(),
|
|
||||||
is_wildcard: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn subdomain(mut self, subdomain: impl ToString) -> Self {
|
|
||||||
self.subdomain = subdomain.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn record_type(mut self, record_type: NameType) -> Self {
|
|
||||||
self.record_type = record_type;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn value(mut self, value: impl ToString) -> Self {
|
|
||||||
self.value = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn priority(mut self, priority: u32) -> Self {
|
|
||||||
self.priority = priority;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ttl(mut self, ttl: u32) -> Self {
|
|
||||||
self.ttl = ttl;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_active(mut self, is_active: bool) -> Self {
|
|
||||||
self.is_active = is_active;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cat(mut self, cat: NameCat) -> Self {
|
|
||||||
self.cat = cat;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_wildcard(mut self, is_wildcard: bool) -> Self {
|
|
||||||
self.is_wildcard = is_wildcard;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SOA (Start of Authority) record for a DNS zone
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct SOARecord {
|
|
||||||
pub zone_id: u32,
|
|
||||||
pub primary_ns: String,
|
|
||||||
pub admin_email: String,
|
|
||||||
pub serial: u64,
|
|
||||||
pub refresh: u32,
|
|
||||||
pub retry: u32,
|
|
||||||
pub expire: u32,
|
|
||||||
pub minimum_ttl: u32,
|
|
||||||
pub is_active: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SOARecord {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
zone_id: 0,
|
|
||||||
primary_ns: String::new(),
|
|
||||||
admin_email: String::new(),
|
|
||||||
serial: 0,
|
|
||||||
refresh: 3600,
|
|
||||||
retry: 600,
|
|
||||||
expire: 604800,
|
|
||||||
minimum_ttl: 3600,
|
|
||||||
is_active: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn zone_id(mut self, zone_id: u32) -> Self {
|
|
||||||
self.zone_id = zone_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn primary_ns(mut self, primary_ns: impl ToString) -> Self {
|
|
||||||
self.primary_ns = primary_ns.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn admin_email(mut self, admin_email: impl ToString) -> Self {
|
|
||||||
self.admin_email = admin_email.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serial(mut self, serial: u64) -> Self {
|
|
||||||
self.serial = serial;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn refresh(mut self, refresh: u32) -> Self {
|
|
||||||
self.refresh = refresh;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn retry(mut self, retry: u32) -> Self {
|
|
||||||
self.retry = retry;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn expire(mut self, expire: u32) -> Self {
|
|
||||||
self.expire = expire;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn minimum_ttl(mut self, minimum_ttl: u32) -> Self {
|
|
||||||
self.minimum_ttl = minimum_ttl;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_active(mut self, is_active: bool) -> Self {
|
|
||||||
self.is_active = is_active;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a DNS zone with its configuration and records
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct DNSZone {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub domain: String,
|
|
||||||
pub dnsrecords: Vec<DNSRecord>,
|
|
||||||
pub administrators: Vec<u32>,
|
|
||||||
pub status: DNSZoneStatus,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
pub soarecord: Vec<SOARecord>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DNSZone {
|
|
||||||
/// Create a new DNS zone instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
domain: String::new(),
|
|
||||||
dnsrecords: Vec::new(),
|
|
||||||
administrators: Vec::new(),
|
|
||||||
status: DNSZoneStatus::default(),
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
soarecord: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the domain name (fluent)
|
|
||||||
pub fn domain(mut self, domain: impl ToString) -> Self {
|
|
||||||
self.domain = domain.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a DNS record (fluent)
|
|
||||||
pub fn add_dnsrecord(mut self, record: DNSRecord) -> Self {
|
|
||||||
self.dnsrecords.push(record);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all DNS records (fluent)
|
|
||||||
pub fn dnsrecords(mut self, dnsrecords: Vec<DNSRecord>) -> Self {
|
|
||||||
self.dnsrecords = dnsrecords;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an administrator (fluent)
|
|
||||||
pub fn add_administrator(mut self, admin_id: u32) -> Self {
|
|
||||||
self.administrators.push(admin_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all administrators (fluent)
|
|
||||||
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
|
|
||||||
self.administrators = administrators;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the zone status (fluent)
|
|
||||||
pub fn status(mut self, status: DNSZoneStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add metadata entry (fluent)
|
|
||||||
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.metadata.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all metadata (fluent)
|
|
||||||
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
|
|
||||||
self.metadata = metadata;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an SOA record (fluent)
|
|
||||||
pub fn add_soarecord(mut self, soa: SOARecord) -> Self {
|
|
||||||
self.soarecord.push(soa);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all SOA records (fluent)
|
|
||||||
pub fn soarecord(mut self, soarecord: Vec<SOARecord>) -> Self {
|
|
||||||
self.soarecord = soarecord;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final DNS zone instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,236 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Defines the lifecycle of a group
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum GroupStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Suspended,
|
|
||||||
Archived,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for GroupStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
GroupStatus::Active
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Visibility controls who can discover or view the group
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum Visibility {
|
|
||||||
Public, // Anyone can see and request to join
|
|
||||||
Private, // Only invited users can see the group
|
|
||||||
Unlisted, // Not visible in search; only accessible by direct link or DNS
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Visibility {
|
|
||||||
fn default() -> Self {
|
|
||||||
Visibility::Public
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// GroupConfig holds rules that govern group membership and behavior
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct GroupConfig {
|
|
||||||
pub max_members: u32,
|
|
||||||
pub allow_guests: bool,
|
|
||||||
pub auto_approve: bool,
|
|
||||||
pub require_invite: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GroupConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
max_members: 0,
|
|
||||||
allow_guests: false,
|
|
||||||
auto_approve: false,
|
|
||||||
require_invite: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn max_members(mut self, max_members: u32) -> Self {
|
|
||||||
self.max_members = max_members;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allow_guests(mut self, allow_guests: bool) -> Self {
|
|
||||||
self.allow_guests = allow_guests;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn auto_approve(mut self, auto_approve: bool) -> Self {
|
|
||||||
self.auto_approve = auto_approve;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn require_invite(mut self, require_invite: bool) -> Self {
|
|
||||||
self.require_invite = require_invite;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a collaborative or access-controlled unit within the system
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Group {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub name: String,
|
|
||||||
pub description: String,
|
|
||||||
pub dnsrecords: Vec<u32>,
|
|
||||||
pub administrators: Vec<u32>,
|
|
||||||
pub config: GroupConfig,
|
|
||||||
pub status: GroupStatus,
|
|
||||||
pub visibility: Visibility,
|
|
||||||
pub created: u64,
|
|
||||||
pub updated: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Group {
|
|
||||||
/// Create a new group instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
name: String::new(),
|
|
||||||
description: String::new(),
|
|
||||||
dnsrecords: Vec::new(),
|
|
||||||
administrators: Vec::new(),
|
|
||||||
config: GroupConfig::new(),
|
|
||||||
status: GroupStatus::default(),
|
|
||||||
visibility: Visibility::default(),
|
|
||||||
created: 0,
|
|
||||||
updated: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the group name (fluent)
|
|
||||||
pub fn name(mut self, name: impl ToString) -> Self {
|
|
||||||
self.name = name.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the group description (fluent)
|
|
||||||
pub fn description(mut self, description: impl ToString) -> Self {
|
|
||||||
self.description = description.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a DNS record ID (fluent)
|
|
||||||
pub fn add_dnsrecord(mut self, dnsrecord_id: u32) -> Self {
|
|
||||||
self.dnsrecords.push(dnsrecord_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all DNS record IDs (fluent)
|
|
||||||
pub fn dnsrecords(mut self, dnsrecords: Vec<u32>) -> Self {
|
|
||||||
self.dnsrecords = dnsrecords;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an administrator user ID (fluent)
|
|
||||||
pub fn add_administrator(mut self, user_id: u32) -> Self {
|
|
||||||
self.administrators.push(user_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all administrator user IDs (fluent)
|
|
||||||
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
|
|
||||||
self.administrators = administrators;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the group configuration (fluent)
|
|
||||||
pub fn config(mut self, config: GroupConfig) -> Self {
|
|
||||||
self.config = config;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the group status (fluent)
|
|
||||||
pub fn status(mut self, status: GroupStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the group visibility (fluent)
|
|
||||||
pub fn visibility(mut self, visibility: Visibility) -> Self {
|
|
||||||
self.visibility = visibility;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the created timestamp (fluent)
|
|
||||||
pub fn created(mut self, created: u64) -> Self {
|
|
||||||
self.created = created;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the updated timestamp (fluent)
|
|
||||||
pub fn updated(mut self, updated: u64) -> Self {
|
|
||||||
self.updated = updated;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final group instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Represents the membership relationship between users and groups
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct UserGroupMembership {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub user_id: u32,
|
|
||||||
pub group_ids: Vec<u32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserGroupMembership {
|
|
||||||
/// Create a new user group membership instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
user_id: 0,
|
|
||||||
group_ids: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID (fluent)
|
|
||||||
pub fn user_id(mut self, user_id: u32) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a group ID (fluent)
|
|
||||||
pub fn add_group_id(mut self, group_id: u32) -> Self {
|
|
||||||
self.group_ids.push(group_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all group IDs (fluent)
|
|
||||||
pub fn group_ids(mut self, group_ids: Vec<u32>) -> Self {
|
|
||||||
self.group_ids = group_ids;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final membership instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Defines the possible roles a member can have
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum MemberRole {
|
|
||||||
Owner,
|
|
||||||
Admin,
|
|
||||||
Moderator,
|
|
||||||
Member,
|
|
||||||
Guest,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for MemberRole {
|
|
||||||
fn default() -> Self {
|
|
||||||
MemberRole::Member
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents the current status of membership
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum MemberStatus {
|
|
||||||
Active,
|
|
||||||
Pending,
|
|
||||||
Suspended,
|
|
||||||
Removed,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for MemberStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
MemberStatus::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a member within a circle
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Member {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub user_id: u32,
|
|
||||||
pub role: MemberRole,
|
|
||||||
pub status: MemberStatus,
|
|
||||||
pub joined_at: u64,
|
|
||||||
pub invited_by: u32,
|
|
||||||
pub permissions: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Member {
|
|
||||||
/// Create a new member instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
user_id: 0,
|
|
||||||
role: MemberRole::default(),
|
|
||||||
status: MemberStatus::default(),
|
|
||||||
joined_at: 0,
|
|
||||||
invited_by: 0,
|
|
||||||
permissions: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID (fluent)
|
|
||||||
pub fn user_id(mut self, user_id: u32) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the member role (fluent)
|
|
||||||
pub fn role(mut self, role: MemberRole) -> Self {
|
|
||||||
self.role = role;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the member status (fluent)
|
|
||||||
pub fn status(mut self, status: MemberStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the joined timestamp (fluent)
|
|
||||||
pub fn joined_at(mut self, joined_at: u64) -> Self {
|
|
||||||
self.joined_at = joined_at;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set who invited this member (fluent)
|
|
||||||
pub fn invited_by(mut self, invited_by: u32) -> Self {
|
|
||||||
self.invited_by = invited_by;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a permission (fluent)
|
|
||||||
pub fn add_permission(mut self, permission: impl ToString) -> Self {
|
|
||||||
self.permissions.push(permission.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all permissions (fluent)
|
|
||||||
pub fn permissions(mut self, permissions: Vec<String>) -> Self {
|
|
||||||
self.permissions = permissions;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final member instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
// Export all heroledger model modules
|
|
||||||
pub mod user;
|
|
||||||
pub mod group;
|
|
||||||
pub mod money;
|
|
||||||
pub mod membership;
|
|
||||||
pub mod dnsrecord;
|
|
||||||
pub mod secretbox;
|
|
||||||
pub mod signature;
|
|
||||||
pub mod user_kvs;
|
|
||||||
pub mod rhai;
|
|
||||||
|
|
||||||
// Re-export key types for convenience
|
|
||||||
pub use user::{User, UserStatus, UserProfile, KYCInfo, KYCStatus, SecretBox};
|
|
||||||
pub use group::{Group, UserGroupMembership, GroupStatus, Visibility, GroupConfig};
|
|
||||||
pub use money::{Account, Asset, AccountPolicy, AccountPolicyItem, Transaction, AccountStatus, TransactionType, Signature as TransactionSignature};
|
|
||||||
pub use membership::{Member, MemberRole, MemberStatus};
|
|
||||||
pub use dnsrecord::{DNSZone, DNSRecord, SOARecord, NameType, NameCat, DNSZoneStatus};
|
|
||||||
pub use secretbox::{Notary, NotaryStatus, SecretBoxCategory};
|
|
||||||
pub use signature::{Signature, SignatureStatus, ObjectType};
|
|
||||||
pub use user_kvs::{UserKVS, UserKVSItem};
|
|
||||||
@@ -1,515 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Represents the status of an account
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum AccountStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Suspended,
|
|
||||||
Archived,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for AccountStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
AccountStatus::Active
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents the type of transaction
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum TransactionType {
|
|
||||||
Transfer,
|
|
||||||
Clawback,
|
|
||||||
Freeze,
|
|
||||||
Unfreeze,
|
|
||||||
Issue,
|
|
||||||
Burn,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for TransactionType {
|
|
||||||
fn default() -> Self {
|
|
||||||
TransactionType::Transfer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a signature for transactions
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct Signature {
|
|
||||||
pub signer_id: u32,
|
|
||||||
pub signature: String,
|
|
||||||
pub timestamp: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signature {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
signer_id: 0,
|
|
||||||
signature: String::new(),
|
|
||||||
timestamp: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signer_id(mut self, signer_id: u32) -> Self {
|
|
||||||
self.signer_id = signer_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signature(mut self, signature: impl ToString) -> Self {
|
|
||||||
self.signature = signature.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn timestamp(mut self, timestamp: u64) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Policy item for account operations
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct AccountPolicyItem {
|
|
||||||
pub signers: Vec<u32>,
|
|
||||||
pub min_signatures: u32,
|
|
||||||
pub enabled: bool,
|
|
||||||
pub threshold: f64,
|
|
||||||
pub recipient: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountPolicyItem {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
signers: Vec::new(),
|
|
||||||
min_signatures: 0,
|
|
||||||
enabled: false,
|
|
||||||
threshold: 0.0,
|
|
||||||
recipient: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_signer(mut self, signer_id: u32) -> Self {
|
|
||||||
self.signers.push(signer_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signers(mut self, signers: Vec<u32>) -> Self {
|
|
||||||
self.signers = signers;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn min_signatures(mut self, min_signatures: u32) -> Self {
|
|
||||||
self.min_signatures = min_signatures;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn enabled(mut self, enabled: bool) -> Self {
|
|
||||||
self.enabled = enabled;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn threshold(mut self, threshold: f64) -> Self {
|
|
||||||
self.threshold = threshold;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn recipient(mut self, recipient: u32) -> Self {
|
|
||||||
self.recipient = recipient;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents an account in the financial system
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Account {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
pub owner_id: u32,
|
|
||||||
#[index]
|
|
||||||
pub address: String,
|
|
||||||
pub balance: f64,
|
|
||||||
pub currency: String,
|
|
||||||
pub assetid: u32,
|
|
||||||
pub last_activity: u64,
|
|
||||||
pub administrators: Vec<u32>,
|
|
||||||
pub accountpolicy: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Account {
|
|
||||||
/// Create a new account instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
owner_id: 0,
|
|
||||||
address: String::new(),
|
|
||||||
balance: 0.0,
|
|
||||||
currency: String::new(),
|
|
||||||
assetid: 0,
|
|
||||||
last_activity: 0,
|
|
||||||
administrators: Vec::new(),
|
|
||||||
accountpolicy: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the owner ID (fluent)
|
|
||||||
pub fn owner_id(mut self, owner_id: u32) -> Self {
|
|
||||||
self.owner_id = owner_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the blockchain address (fluent)
|
|
||||||
pub fn address(mut self, address: impl ToString) -> Self {
|
|
||||||
self.address = address.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the balance (fluent)
|
|
||||||
pub fn balance(mut self, balance: f64) -> Self {
|
|
||||||
self.balance = balance;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the currency (fluent)
|
|
||||||
pub fn currency(mut self, currency: impl ToString) -> Self {
|
|
||||||
self.currency = currency.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the asset ID (fluent)
|
|
||||||
pub fn assetid(mut self, assetid: u32) -> Self {
|
|
||||||
self.assetid = assetid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the last activity timestamp (fluent)
|
|
||||||
pub fn last_activity(mut self, last_activity: u64) -> Self {
|
|
||||||
self.last_activity = last_activity;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an administrator (fluent)
|
|
||||||
pub fn add_administrator(mut self, admin_id: u32) -> Self {
|
|
||||||
self.administrators.push(admin_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all administrators (fluent)
|
|
||||||
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
|
|
||||||
self.administrators = administrators;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the account policy ID (fluent)
|
|
||||||
pub fn accountpolicy(mut self, accountpolicy: u32) -> Self {
|
|
||||||
self.accountpolicy = accountpolicy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final account instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Represents an asset in the financial system
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Asset {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub address: String,
|
|
||||||
pub assetid: u32,
|
|
||||||
pub asset_type: String,
|
|
||||||
pub issuer: u32,
|
|
||||||
pub supply: f64,
|
|
||||||
pub decimals: u8,
|
|
||||||
pub is_frozen: bool,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
pub administrators: Vec<u32>,
|
|
||||||
pub min_signatures: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Asset {
|
|
||||||
/// Create a new asset instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
address: String::new(),
|
|
||||||
assetid: 0,
|
|
||||||
asset_type: String::new(),
|
|
||||||
issuer: 0,
|
|
||||||
supply: 0.0,
|
|
||||||
decimals: 0,
|
|
||||||
is_frozen: false,
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
administrators: Vec::new(),
|
|
||||||
min_signatures: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the blockchain address (fluent)
|
|
||||||
pub fn address(mut self, address: impl ToString) -> Self {
|
|
||||||
self.address = address.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the asset ID (fluent)
|
|
||||||
pub fn assetid(mut self, assetid: u32) -> Self {
|
|
||||||
self.assetid = assetid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the asset type (fluent)
|
|
||||||
pub fn asset_type(mut self, asset_type: impl ToString) -> Self {
|
|
||||||
self.asset_type = asset_type.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the issuer (fluent)
|
|
||||||
pub fn issuer(mut self, issuer: u32) -> Self {
|
|
||||||
self.issuer = issuer;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the supply (fluent)
|
|
||||||
pub fn supply(mut self, supply: f64) -> Self {
|
|
||||||
self.supply = supply;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the decimals (fluent)
|
|
||||||
pub fn decimals(mut self, decimals: u8) -> Self {
|
|
||||||
self.decimals = decimals;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the frozen status (fluent)
|
|
||||||
pub fn is_frozen(mut self, is_frozen: bool) -> Self {
|
|
||||||
self.is_frozen = is_frozen;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add metadata entry (fluent)
|
|
||||||
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.metadata.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all metadata (fluent)
|
|
||||||
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
|
|
||||||
self.metadata = metadata;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an administrator (fluent)
|
|
||||||
pub fn add_administrator(mut self, admin_id: u32) -> Self {
|
|
||||||
self.administrators.push(admin_id);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all administrators (fluent)
|
|
||||||
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
|
|
||||||
self.administrators = administrators;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set minimum signatures required (fluent)
|
|
||||||
pub fn min_signatures(mut self, min_signatures: u32) -> Self {
|
|
||||||
self.min_signatures = min_signatures;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final asset instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Represents account policies for various operations
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct AccountPolicy {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
pub transferpolicy: AccountPolicyItem,
|
|
||||||
pub adminpolicy: AccountPolicyItem,
|
|
||||||
pub clawbackpolicy: AccountPolicyItem,
|
|
||||||
pub freezepolicy: AccountPolicyItem,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountPolicy {
|
|
||||||
/// Create a new account policy instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
transferpolicy: AccountPolicyItem::new(),
|
|
||||||
adminpolicy: AccountPolicyItem::new(),
|
|
||||||
clawbackpolicy: AccountPolicyItem::new(),
|
|
||||||
freezepolicy: AccountPolicyItem::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the transfer policy (fluent)
|
|
||||||
pub fn transferpolicy(mut self, transferpolicy: AccountPolicyItem) -> Self {
|
|
||||||
self.transferpolicy = transferpolicy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the admin policy (fluent)
|
|
||||||
pub fn adminpolicy(mut self, adminpolicy: AccountPolicyItem) -> Self {
|
|
||||||
self.adminpolicy = adminpolicy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the clawback policy (fluent)
|
|
||||||
pub fn clawbackpolicy(mut self, clawbackpolicy: AccountPolicyItem) -> Self {
|
|
||||||
self.clawbackpolicy = clawbackpolicy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the freeze policy (fluent)
|
|
||||||
pub fn freezepolicy(mut self, freezepolicy: AccountPolicyItem) -> Self {
|
|
||||||
self.freezepolicy = freezepolicy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final account policy instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Represents a financial transaction
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Transaction {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
pub txid: u32,
|
|
||||||
pub source: u32,
|
|
||||||
pub destination: u32,
|
|
||||||
pub assetid: u32,
|
|
||||||
pub amount: f64,
|
|
||||||
pub timestamp: u64,
|
|
||||||
pub status: String,
|
|
||||||
pub memo: String,
|
|
||||||
pub tx_type: TransactionType,
|
|
||||||
pub signatures: Vec<Signature>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Transaction {
|
|
||||||
/// Create a new transaction instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
txid: 0,
|
|
||||||
source: 0,
|
|
||||||
destination: 0,
|
|
||||||
assetid: 0,
|
|
||||||
amount: 0.0,
|
|
||||||
timestamp: 0,
|
|
||||||
status: String::new(),
|
|
||||||
memo: String::new(),
|
|
||||||
tx_type: TransactionType::default(),
|
|
||||||
signatures: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the transaction ID (fluent)
|
|
||||||
pub fn txid(mut self, txid: u32) -> Self {
|
|
||||||
self.txid = txid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the source account (fluent)
|
|
||||||
pub fn source(mut self, source: u32) -> Self {
|
|
||||||
self.source = source;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the destination account (fluent)
|
|
||||||
pub fn destination(mut self, destination: u32) -> Self {
|
|
||||||
self.destination = destination;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the asset ID (fluent)
|
|
||||||
pub fn assetid(mut self, assetid: u32) -> Self {
|
|
||||||
self.assetid = assetid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the amount (fluent)
|
|
||||||
pub fn amount(mut self, amount: f64) -> Self {
|
|
||||||
self.amount = amount;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the timestamp (fluent)
|
|
||||||
pub fn timestamp(mut self, timestamp: u64) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the status (fluent)
|
|
||||||
pub fn status(mut self, status: impl ToString) -> Self {
|
|
||||||
self.status = status.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the memo (fluent)
|
|
||||||
pub fn memo(mut self, memo: impl ToString) -> Self {
|
|
||||||
self.memo = memo.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the transaction type (fluent)
|
|
||||||
pub fn tx_type(mut self, tx_type: TransactionType) -> Self {
|
|
||||||
self.tx_type = tx_type;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a signature (fluent)
|
|
||||||
pub fn add_signature(mut self, signature: Signature) -> Self {
|
|
||||||
self.signatures.push(signature);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all signatures (fluent)
|
|
||||||
pub fn signatures(mut self, signatures: Vec<Signature>) -> Self {
|
|
||||||
self.signatures = signatures;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final transaction instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,317 +0,0 @@
|
|||||||
use ::rhai::plugin::*;
|
|
||||||
use ::rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
|
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
use crate::models::heroledger::*;
|
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// User Module
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
type RhaiUser = User;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_user_module {
|
|
||||||
use super::RhaiUser;
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_user", return_raw)]
|
|
||||||
pub fn new_user() -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
Ok(User::new(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "username", return_raw)]
|
|
||||||
pub fn set_username(
|
|
||||||
user: &mut RhaiUser,
|
|
||||||
username: String,
|
|
||||||
) -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(user);
|
|
||||||
*user = owned.username(username);
|
|
||||||
Ok(user.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "add_email", return_raw)]
|
|
||||||
pub fn add_email(
|
|
||||||
user: &mut RhaiUser,
|
|
||||||
email: String,
|
|
||||||
) -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(user);
|
|
||||||
*user = owned.add_email(email);
|
|
||||||
Ok(user.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "pubkey", return_raw)]
|
|
||||||
pub fn set_pubkey(
|
|
||||||
user: &mut RhaiUser,
|
|
||||||
pubkey: String,
|
|
||||||
) -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(user);
|
|
||||||
*user = owned.pubkey(pubkey);
|
|
||||||
Ok(user.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "status", return_raw)]
|
|
||||||
pub fn set_status(
|
|
||||||
user: &mut RhaiUser,
|
|
||||||
status: String,
|
|
||||||
) -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
let status_enum = match status.as_str() {
|
|
||||||
"Active" => UserStatus::Active,
|
|
||||||
"Inactive" => UserStatus::Inactive,
|
|
||||||
"Suspended" => UserStatus::Suspended,
|
|
||||||
"Archived" => UserStatus::Archived,
|
|
||||||
_ => return Err(format!("Invalid user status: {}", status).into()),
|
|
||||||
};
|
|
||||||
let owned = std::mem::take(user);
|
|
||||||
*user = owned.status(status_enum);
|
|
||||||
Ok(user.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "save_user", return_raw)]
|
|
||||||
pub fn save_user(user: &mut RhaiUser) -> Result<RhaiUser, Box<EvalAltResult>> {
|
|
||||||
// This would integrate with the database save functionality
|
|
||||||
// For now, just return the user as-is
|
|
||||||
Ok(user.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getters
|
|
||||||
#[rhai_fn(name = "get_id")]
|
|
||||||
pub fn get_id(user: &mut RhaiUser) -> i64 {
|
|
||||||
user.base_data.id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_username")]
|
|
||||||
pub fn get_username(user: &mut RhaiUser) -> String {
|
|
||||||
user.username.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_email")]
|
|
||||||
pub fn get_email(user: &mut RhaiUser) -> String {
|
|
||||||
if let Some(first_email) = user.email.first() {
|
|
||||||
first_email.clone()
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_pubkey")]
|
|
||||||
pub fn get_pubkey(user: &mut RhaiUser) -> String {
|
|
||||||
user.pubkey.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// Group Module
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
type RhaiGroup = Group;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_group_module {
|
|
||||||
use super::RhaiGroup;
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_group", return_raw)]
|
|
||||||
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
Ok(Group::new(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "name", return_raw)]
|
|
||||||
pub fn set_name(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
name: String,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.name(name);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "description", return_raw)]
|
|
||||||
pub fn set_description(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.description(description);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "visibility", return_raw)]
|
|
||||||
pub fn set_visibility(
|
|
||||||
group: &mut RhaiGroup,
|
|
||||||
visibility: String,
|
|
||||||
) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
let visibility_enum = match visibility.as_str() {
|
|
||||||
"Public" => Visibility::Public,
|
|
||||||
"Private" => Visibility::Private,
|
|
||||||
_ => return Err(format!("Invalid visibility: {}", visibility).into()),
|
|
||||||
};
|
|
||||||
let owned = std::mem::take(group);
|
|
||||||
*group = owned.visibility(visibility_enum);
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "save_group", return_raw)]
|
|
||||||
pub fn save_group(group: &mut RhaiGroup) -> Result<RhaiGroup, Box<EvalAltResult>> {
|
|
||||||
Ok(group.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getters
|
|
||||||
#[rhai_fn(name = "get_id")]
|
|
||||||
pub fn get_id(group: &mut RhaiGroup) -> i64 {
|
|
||||||
group.base_data.id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_name")]
|
|
||||||
pub fn get_name(group: &mut RhaiGroup) -> String {
|
|
||||||
group.name.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_description")]
|
|
||||||
pub fn get_description(group: &mut RhaiGroup) -> String {
|
|
||||||
group.description.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// Account Module (from money.rs)
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
type RhaiAccount = Account;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_account_module {
|
|
||||||
use super::RhaiAccount;
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_account", return_raw)]
|
|
||||||
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
Ok(Account::new(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "owner_id", return_raw)]
|
|
||||||
pub fn set_owner_id(
|
|
||||||
account: &mut RhaiAccount,
|
|
||||||
owner_id: i64,
|
|
||||||
) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(account);
|
|
||||||
*account = owned.owner_id(owner_id as u32);
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "address", return_raw)]
|
|
||||||
pub fn set_address(
|
|
||||||
account: &mut RhaiAccount,
|
|
||||||
address: String,
|
|
||||||
) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(account);
|
|
||||||
*account = owned.address(address);
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "currency", return_raw)]
|
|
||||||
pub fn set_currency(
|
|
||||||
account: &mut RhaiAccount,
|
|
||||||
currency: String,
|
|
||||||
) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(account);
|
|
||||||
*account = owned.currency(currency);
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "save_account", return_raw)]
|
|
||||||
pub fn save_account(account: &mut RhaiAccount) -> Result<RhaiAccount, Box<EvalAltResult>> {
|
|
||||||
Ok(account.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getters
|
|
||||||
#[rhai_fn(name = "get_id")]
|
|
||||||
pub fn get_id(account: &mut RhaiAccount) -> i64 {
|
|
||||||
account.base_data.id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_address")]
|
|
||||||
pub fn get_address(account: &mut RhaiAccount) -> String {
|
|
||||||
account.address.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_currency")]
|
|
||||||
pub fn get_currency(account: &mut RhaiAccount) -> String {
|
|
||||||
account.currency.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// DNS Zone Module
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
type RhaiDNSZone = DNSZone;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_dns_zone_module {
|
|
||||||
use super::RhaiDNSZone;
|
|
||||||
|
|
||||||
#[rhai_fn(name = "new_dns_zone", return_raw)]
|
|
||||||
pub fn new_dns_zone() -> Result<RhaiDNSZone, Box<EvalAltResult>> {
|
|
||||||
Ok(DNSZone::new(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "domain", return_raw)]
|
|
||||||
pub fn set_domain(
|
|
||||||
zone: &mut RhaiDNSZone,
|
|
||||||
domain: String,
|
|
||||||
) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(zone);
|
|
||||||
*zone = owned.domain(domain);
|
|
||||||
Ok(zone.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#[rhai_fn(name = "save_dns_zone", return_raw)]
|
|
||||||
pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
|
|
||||||
Ok(zone.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Getters
|
|
||||||
#[rhai_fn(name = "get_id")]
|
|
||||||
pub fn get_id(zone: &mut RhaiDNSZone) -> i64 {
|
|
||||||
zone.base_data.id as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_domain")]
|
|
||||||
pub fn get_domain(zone: &mut RhaiDNSZone) -> String {
|
|
||||||
zone.domain.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// Registration Functions
|
|
||||||
// ============================================================================
|
|
||||||
// Registration functions
|
|
||||||
pub fn register_user_functions(engine: &mut Engine) {
|
|
||||||
let module = exported_module!(rhai_user_module);
|
|
||||||
engine.register_static_module("user", module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_group_functions(engine: &mut Engine) {
|
|
||||||
let module = exported_module!(rhai_group_module);
|
|
||||||
engine.register_static_module("group", module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_account_functions(engine: &mut Engine) {
|
|
||||||
let module = exported_module!(rhai_account_module);
|
|
||||||
engine.register_static_module("account", module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_dnszone_functions(engine: &mut Engine) {
|
|
||||||
let module = exported_module!(rhai_dns_zone_module);
|
|
||||||
engine.register_static_module("dnszone", module.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register all heroledger Rhai modules with the engine
|
|
||||||
pub fn register_heroledger_rhai_modules(engine: &mut Engine) {
|
|
||||||
register_user_functions(engine);
|
|
||||||
register_group_functions(engine);
|
|
||||||
register_account_functions(engine);
|
|
||||||
register_dnszone_functions(engine);
|
|
||||||
}
|
|
||||||
@@ -1,142 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Category of the secret box
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum SecretBoxCategory {
|
|
||||||
Profile,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for SecretBoxCategory {
|
|
||||||
fn default() -> Self {
|
|
||||||
SecretBoxCategory::Profile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Status of a notary
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum NotaryStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Suspended,
|
|
||||||
Archived,
|
|
||||||
Error,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NotaryStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
NotaryStatus::Active
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents an encrypted secret box for storing sensitive data
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct SecretBox {
|
|
||||||
pub notary_id: u32,
|
|
||||||
pub value: String,
|
|
||||||
pub version: u16,
|
|
||||||
pub timestamp: u64,
|
|
||||||
pub cat: SecretBoxCategory,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SecretBox {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
notary_id: 0,
|
|
||||||
value: String::new(),
|
|
||||||
version: 1,
|
|
||||||
timestamp: 0,
|
|
||||||
cat: SecretBoxCategory::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notary_id(mut self, notary_id: u32) -> Self {
|
|
||||||
self.notary_id = notary_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn value(mut self, value: impl ToString) -> Self {
|
|
||||||
self.value = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn version(mut self, version: u16) -> Self {
|
|
||||||
self.version = version;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn timestamp(mut self, timestamp: u64) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cat(mut self, cat: SecretBoxCategory) -> Self {
|
|
||||||
self.cat = cat;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a notary who can decrypt secret boxes
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Notary {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub userid: u32,
|
|
||||||
pub status: NotaryStatus,
|
|
||||||
pub myceliumaddress: String,
|
|
||||||
#[index]
|
|
||||||
pub pubkey: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Notary {
|
|
||||||
/// Create a new notary instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
userid: 0,
|
|
||||||
status: NotaryStatus::default(),
|
|
||||||
myceliumaddress: String::new(),
|
|
||||||
pubkey: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID (fluent)
|
|
||||||
pub fn userid(mut self, userid: u32) -> Self {
|
|
||||||
self.userid = userid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the notary status (fluent)
|
|
||||||
pub fn status(mut self, status: NotaryStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the mycelium address (fluent)
|
|
||||||
pub fn myceliumaddress(mut self, myceliumaddress: impl ToString) -> Self {
|
|
||||||
self.myceliumaddress = myceliumaddress.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the public key (fluent)
|
|
||||||
pub fn pubkey(mut self, pubkey: impl ToString) -> Self {
|
|
||||||
self.pubkey = pubkey.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final notary instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Status of a signature
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum SignatureStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Pending,
|
|
||||||
Revoked,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for SignatureStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
SignatureStatus::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type of object being signed
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum ObjectType {
|
|
||||||
Account,
|
|
||||||
DNSRecord,
|
|
||||||
Membership,
|
|
||||||
User,
|
|
||||||
Transaction,
|
|
||||||
KYC,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ObjectType {
|
|
||||||
fn default() -> Self {
|
|
||||||
ObjectType::User
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a cryptographic signature for various objects
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct Signature {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub signature_id: u32,
|
|
||||||
#[index]
|
|
||||||
pub user_id: u32,
|
|
||||||
pub value: String,
|
|
||||||
#[index]
|
|
||||||
pub objectid: u32,
|
|
||||||
pub objecttype: ObjectType,
|
|
||||||
pub status: SignatureStatus,
|
|
||||||
pub timestamp: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signature {
|
|
||||||
/// Create a new signature instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
signature_id: 0,
|
|
||||||
user_id: 0,
|
|
||||||
value: String::new(),
|
|
||||||
objectid: 0,
|
|
||||||
objecttype: ObjectType::default(),
|
|
||||||
status: SignatureStatus::default(),
|
|
||||||
timestamp: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the signature ID (fluent)
|
|
||||||
pub fn signature_id(mut self, signature_id: u32) -> Self {
|
|
||||||
self.signature_id = signature_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID (fluent)
|
|
||||||
pub fn user_id(mut self, user_id: u32) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the signature value (fluent)
|
|
||||||
pub fn value(mut self, value: impl ToString) -> Self {
|
|
||||||
self.value = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the object ID (fluent)
|
|
||||||
pub fn objectid(mut self, objectid: u32) -> Self {
|
|
||||||
self.objectid = objectid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the object type (fluent)
|
|
||||||
pub fn objecttype(mut self, objecttype: ObjectType) -> Self {
|
|
||||||
self.objecttype = objecttype;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the signature status (fluent)
|
|
||||||
pub fn status(mut self, status: SignatureStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the timestamp (fluent)
|
|
||||||
pub fn timestamp(mut self, timestamp: u64) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final signature instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,370 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Represents the status of a user in the system
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum UserStatus {
|
|
||||||
Active,
|
|
||||||
Inactive,
|
|
||||||
Suspended,
|
|
||||||
Archived,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for UserStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
UserStatus::Active
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents the KYC status of a user
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub enum KYCStatus {
|
|
||||||
Pending,
|
|
||||||
Approved,
|
|
||||||
Rejected,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for KYCStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
KYCStatus::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// User profile information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct UserProfile {
|
|
||||||
pub user_id: u32,
|
|
||||||
pub full_name: String,
|
|
||||||
pub bio: String,
|
|
||||||
pub profile_pic: String,
|
|
||||||
pub links: HashMap<String, String>,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserProfile {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
user_id: 0,
|
|
||||||
full_name: String::new(),
|
|
||||||
bio: String::new(),
|
|
||||||
profile_pic: String::new(),
|
|
||||||
links: HashMap::new(),
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn user_id(mut self, user_id: u32) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn full_name(mut self, full_name: impl ToString) -> Self {
|
|
||||||
self.full_name = full_name.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bio(mut self, bio: impl ToString) -> Self {
|
|
||||||
self.bio = bio.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn profile_pic(mut self, profile_pic: impl ToString) -> Self {
|
|
||||||
self.profile_pic = profile_pic.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_link(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.links.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn links(mut self, links: HashMap<String, String>) -> Self {
|
|
||||||
self.links = links;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.metadata.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
|
|
||||||
self.metadata = metadata;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// KYC (Know Your Customer) information for a user
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct KYCInfo {
|
|
||||||
pub user_id: u32,
|
|
||||||
pub full_name: String,
|
|
||||||
pub date_of_birth: u64,
|
|
||||||
pub address: String,
|
|
||||||
pub phone_number: String,
|
|
||||||
pub id_number: String,
|
|
||||||
pub id_type: String,
|
|
||||||
pub id_expiry: u64,
|
|
||||||
pub kyc_status: KYCStatus,
|
|
||||||
pub kyc_verified: bool,
|
|
||||||
pub kyc_verified_by: u32,
|
|
||||||
pub kyc_verified_at: u64,
|
|
||||||
pub kyc_rejected_reason: String,
|
|
||||||
pub kyc_signature: u32,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KYCInfo {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
user_id: 0,
|
|
||||||
full_name: String::new(),
|
|
||||||
date_of_birth: 0,
|
|
||||||
address: String::new(),
|
|
||||||
phone_number: String::new(),
|
|
||||||
id_number: String::new(),
|
|
||||||
id_type: String::new(),
|
|
||||||
id_expiry: 0,
|
|
||||||
kyc_status: KYCStatus::default(),
|
|
||||||
kyc_verified: false,
|
|
||||||
kyc_verified_by: 0,
|
|
||||||
kyc_verified_at: 0,
|
|
||||||
kyc_rejected_reason: String::new(),
|
|
||||||
kyc_signature: 0,
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn user_id(mut self, user_id: u32) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn full_name(mut self, full_name: impl ToString) -> Self {
|
|
||||||
self.full_name = full_name.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn date_of_birth(mut self, date_of_birth: u64) -> Self {
|
|
||||||
self.date_of_birth = date_of_birth;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn address(mut self, address: impl ToString) -> Self {
|
|
||||||
self.address = address.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn phone_number(mut self, phone_number: impl ToString) -> Self {
|
|
||||||
self.phone_number = phone_number.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id_number(mut self, id_number: impl ToString) -> Self {
|
|
||||||
self.id_number = id_number.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id_type(mut self, id_type: impl ToString) -> Self {
|
|
||||||
self.id_type = id_type.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id_expiry(mut self, id_expiry: u64) -> Self {
|
|
||||||
self.id_expiry = id_expiry;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_status(mut self, kyc_status: KYCStatus) -> Self {
|
|
||||||
self.kyc_status = kyc_status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_verified(mut self, kyc_verified: bool) -> Self {
|
|
||||||
self.kyc_verified = kyc_verified;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_verified_by(mut self, kyc_verified_by: u32) -> Self {
|
|
||||||
self.kyc_verified_by = kyc_verified_by;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_verified_at(mut self, kyc_verified_at: u64) -> Self {
|
|
||||||
self.kyc_verified_at = kyc_verified_at;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_rejected_reason(mut self, kyc_rejected_reason: impl ToString) -> Self {
|
|
||||||
self.kyc_rejected_reason = kyc_rejected_reason.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kyc_signature(mut self, kyc_signature: u32) -> Self {
|
|
||||||
self.kyc_signature = kyc_signature;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
|
|
||||||
self.metadata.insert(key.to_string(), value.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
|
|
||||||
self.metadata = metadata;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a secret box for storing encrypted data
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct SecretBox {
|
|
||||||
pub data: Vec<u8>,
|
|
||||||
pub nonce: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SecretBox {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
data: Vec::new(),
|
|
||||||
nonce: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn data(mut self, data: Vec<u8>) -> Self {
|
|
||||||
self.data = data;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn nonce(mut self, nonce: Vec<u8>) -> Self {
|
|
||||||
self.nonce = nonce;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a user in the heroledger system
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
pub struct User {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub username: String,
|
|
||||||
#[index]
|
|
||||||
pub pubkey: String,
|
|
||||||
pub email: Vec<String>,
|
|
||||||
pub status: UserStatus,
|
|
||||||
pub userprofile: Vec<SecretBox>,
|
|
||||||
pub kyc: Vec<SecretBox>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for User {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
base_data: BaseModelData::new(),
|
|
||||||
username: String::new(),
|
|
||||||
pubkey: String::new(),
|
|
||||||
email: Vec::new(),
|
|
||||||
status: UserStatus::default(),
|
|
||||||
userprofile: Vec::new(),
|
|
||||||
kyc: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl User {
|
|
||||||
/// Create a new user instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
username: String::new(),
|
|
||||||
pubkey: String::new(),
|
|
||||||
email: Vec::new(),
|
|
||||||
status: UserStatus::default(),
|
|
||||||
userprofile: Vec::new(),
|
|
||||||
kyc: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the user ID
|
|
||||||
pub fn id(&self) -> u32 {
|
|
||||||
self.base_data.id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the username (fluent)
|
|
||||||
pub fn username(mut self, username: impl ToString) -> Self {
|
|
||||||
self.username = username.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the public key (fluent)
|
|
||||||
pub fn pubkey(mut self, pubkey: impl ToString) -> Self {
|
|
||||||
self.pubkey = pubkey.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an email address (fluent)
|
|
||||||
pub fn add_email(mut self, email: impl ToString) -> Self {
|
|
||||||
self.email.push(email.to_string());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all email addresses (fluent)
|
|
||||||
pub fn email(mut self, email: Vec<String>) -> Self {
|
|
||||||
self.email = email;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user status (fluent)
|
|
||||||
pub fn status(mut self, status: UserStatus) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a user profile secret box (fluent)
|
|
||||||
pub fn add_userprofile(mut self, profile: SecretBox) -> Self {
|
|
||||||
self.userprofile.push(profile);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all user profile secret boxes (fluent)
|
|
||||||
pub fn userprofile(mut self, userprofile: Vec<SecretBox>) -> Self {
|
|
||||||
self.userprofile = userprofile;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a KYC secret box (fluent)
|
|
||||||
pub fn add_kyc(mut self, kyc: SecretBox) -> Self {
|
|
||||||
self.kyc.push(kyc);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all KYC secret boxes (fluent)
|
|
||||||
pub fn kyc(mut self, kyc: Vec<SecretBox>) -> Self {
|
|
||||||
self.kyc = kyc;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final user instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
use heromodels_core::{Model, BaseModelData, IndexKey};
|
|
||||||
use heromodels_derive::model;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use super::secretbox::SecretBox;
|
|
||||||
|
|
||||||
/// Represents a per-user key-value store
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct UserKVS {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub userid: u32,
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserKVS {
|
|
||||||
/// Create a new user KVS instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
userid: 0,
|
|
||||||
name: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID (fluent)
|
|
||||||
pub fn userid(mut self, userid: u32) -> Self {
|
|
||||||
self.userid = userid;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the KVS name (fluent)
|
|
||||||
pub fn name(mut self, name: impl ToString) -> Self {
|
|
||||||
self.name = name.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final user KVS instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Represents an item in a user's key-value store
|
|
||||||
#[model]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
|
||||||
pub struct UserKVSItem {
|
|
||||||
/// Base model data
|
|
||||||
pub base_data: BaseModelData,
|
|
||||||
#[index]
|
|
||||||
pub userkvs_id: u32,
|
|
||||||
pub key: String,
|
|
||||||
pub value: String,
|
|
||||||
pub secretbox: Vec<SecretBox>,
|
|
||||||
pub timestamp: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserKVSItem {
|
|
||||||
/// Create a new user KVS item instance
|
|
||||||
pub fn new(id: u32) -> Self {
|
|
||||||
let mut base_data = BaseModelData::new();
|
|
||||||
base_data.update_id(id);
|
|
||||||
Self {
|
|
||||||
base_data,
|
|
||||||
userkvs_id: 0,
|
|
||||||
key: String::new(),
|
|
||||||
value: String::new(),
|
|
||||||
secretbox: Vec::new(),
|
|
||||||
timestamp: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user KVS ID (fluent)
|
|
||||||
pub fn userkvs_id(mut self, userkvs_id: u32) -> Self {
|
|
||||||
self.userkvs_id = userkvs_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the key (fluent)
|
|
||||||
pub fn key(mut self, key: impl ToString) -> Self {
|
|
||||||
self.key = key.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the value (fluent)
|
|
||||||
pub fn value(mut self, value: impl ToString) -> Self {
|
|
||||||
self.value = value.to_string();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a secret box (fluent)
|
|
||||||
pub fn add_secretbox(mut self, secretbox: SecretBox) -> Self {
|
|
||||||
self.secretbox.push(secretbox);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set all secret boxes (fluent)
|
|
||||||
pub fn secretbox(mut self, secretbox: Vec<SecretBox>) -> Self {
|
|
||||||
self.secretbox = secretbox;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the timestamp (fluent)
|
|
||||||
pub fn timestamp(mut self, timestamp: u64) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build the final user KVS item instance
|
|
||||||
pub fn build(self) -> Self {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
use derive::FromVec;
|
|
||||||
use heromodels::db::Db;
|
|
||||||
use macros::{
|
|
||||||
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
|
|
||||||
register_authorized_get_by_id_fn, register_authorized_list_fn,
|
|
||||||
};
|
|
||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, Position, TypeBuilder};
|
|
||||||
use serde::Serialize;
|
|
||||||
use serde_json;
|
|
||||||
use std::mem;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::Collection as DbCollectionTrait;
|
|
||||||
use heromodels::models::library::collection::Collection as RhaiCollection;
|
|
||||||
use heromodels::models::library::items::{
|
|
||||||
Book as RhaiBook, Image as RhaiImage, Markdown as RhaiMarkdown, Pdf as RhaiPdf,
|
|
||||||
Slide as RhaiSlide, Slideshow as RhaiSlideshow, TocEntry as RhaiTocEntry,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Registers a `.json()` method for any type `T` that implements the required traits.
|
|
||||||
fn register_json_method<T>(engine: &mut Engine)
|
|
||||||
where
|
|
||||||
T: CustomType + Clone + Serialize,
|
|
||||||
{
|
|
||||||
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
|
|
||||||
match serde_json::to_string_pretty(obj) {
|
|
||||||
Ok(json_str) => Ok(json_str),
|
|
||||||
Err(e) => Err(format!("Failed to serialize to JSON: {}", e).into()),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
engine.register_fn("json", to_json_fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper types for arrays
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "CollectionArray")]
|
|
||||||
pub struct RhaiCollectionArray(pub Vec<RhaiCollection>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "ImageArray")]
|
|
||||||
pub struct RhaiImageArray(pub Vec<RhaiImage>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "PdfArray")]
|
|
||||||
pub struct RhaiPdfArray(pub Vec<RhaiPdf>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "MarkdownArray")]
|
|
||||||
pub struct RhaiMarkdownArray(pub Vec<RhaiMarkdown>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "BookArray")]
|
|
||||||
pub struct RhaiBookArray(pub Vec<RhaiBook>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "SlideshowArray")]
|
|
||||||
pub struct RhaiSlideshowArray(pub Vec<RhaiSlideshow>);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
|
|
||||||
#[rhai_type(name = "TocEntryArray")]
|
|
||||||
pub struct RhaiTocEntryArray(pub Vec<RhaiTocEntry>);
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_library_module {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
// --- Collection Functions ---
|
|
||||||
#[rhai_fn(name = "new_collection", return_raw)]
|
|
||||||
pub fn new_collection() -> Result<RhaiCollection, Box<EvalAltResult>> {
|
|
||||||
Ok(RhaiCollection::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "collection_title", return_raw)]
|
|
||||||
pub fn collection_title(
|
|
||||||
collection: &mut RhaiCollection,
|
|
||||||
title: String,
|
|
||||||
) -> Result<RhaiCollection, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(collection);
|
|
||||||
*collection = owned.title(title);
|
|
||||||
Ok(collection.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "collection_description", return_raw)]
|
|
||||||
pub fn collection_description(
|
|
||||||
collection: &mut RhaiCollection,
|
|
||||||
description: String,
|
|
||||||
) -> Result<RhaiCollection, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(collection);
|
|
||||||
*collection = owned.description(description);
|
|
||||||
Ok(collection.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_collection_id")]
|
|
||||||
pub fn get_collection_id(collection: &mut RhaiCollection) -> i64 {
|
|
||||||
collection.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_collection_title")]
|
|
||||||
pub fn get_collection_title(collection: &mut RhaiCollection) -> String {
|
|
||||||
collection.title().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Image Functions ---
|
|
||||||
#[rhai_fn(name = "new_image", return_raw)]
|
|
||||||
pub fn new_image() -> Result<RhaiImage, Box<EvalAltResult>> {
|
|
||||||
Ok(RhaiImage::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "image_title", return_raw)]
|
|
||||||
pub fn image_title(
|
|
||||||
image: &mut RhaiImage,
|
|
||||||
title: String,
|
|
||||||
) -> Result<RhaiImage, Box<EvalAltResult>> {
|
|
||||||
let owned = std::mem::take(image);
|
|
||||||
*image = owned.title(title);
|
|
||||||
Ok(image.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rhai_fn(name = "get_image_id")]
|
|
||||||
pub fn get_image_id(image: &mut RhaiImage) -> i64 {
|
|
||||||
image.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additional functions would continue here...
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_library_rhai_module(engine: &mut Engine) {
|
|
||||||
let mut module = exported_module!(rhai_library_module);
|
|
||||||
|
|
||||||
register_json_method::<RhaiCollection>(engine);
|
|
||||||
register_json_method::<RhaiImage>(engine);
|
|
||||||
register_json_method::<RhaiPdf>(engine);
|
|
||||||
register_json_method::<RhaiMarkdown>(engine);
|
|
||||||
register_json_method::<RhaiBook>(engine);
|
|
||||||
register_json_method::<RhaiSlideshow>(engine);
|
|
||||||
register_json_method::<RhaiTocEntry>(engine);
|
|
||||||
register_json_method::<RhaiCollectionArray>(engine);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_collection",
|
|
||||||
resource_type_str: "Collection",
|
|
||||||
rhai_return_rust_type: heromodels::models::library::collection::Collection
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_collection",
|
|
||||||
resource_type_str: "Collection",
|
|
||||||
rhai_return_rust_type: heromodels::models::library::collection::Collection
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct Address {
|
|
||||||
pub street: String,
|
|
||||||
pub city: String,
|
|
||||||
pub state: Option<String>,
|
|
||||||
pub postal_code: String,
|
|
||||||
pub country: String,
|
|
||||||
pub company: Option<String>,
|
|
||||||
}
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
// Export location models
|
|
||||||
pub mod address;
|
|
||||||
@@ -10,16 +10,12 @@ pub mod contact;
|
|||||||
pub mod finance;
|
pub mod finance;
|
||||||
pub mod flow;
|
pub mod flow;
|
||||||
pub mod governance;
|
pub mod governance;
|
||||||
pub mod heroledger;
|
|
||||||
pub mod legal;
|
pub mod legal;
|
||||||
pub mod library;
|
pub mod library;
|
||||||
pub mod location;
|
|
||||||
pub mod object;
|
pub mod object;
|
||||||
pub mod projects;
|
pub mod projects;
|
||||||
pub mod payment;
|
pub mod payment;
|
||||||
pub mod identity;
|
pub mod identity;
|
||||||
pub mod tfmarketplace;
|
|
||||||
pub mod grid4;
|
|
||||||
|
|
||||||
// Re-export key types for convenience
|
// Re-export key types for convenience
|
||||||
pub use core::Comment;
|
pub use core::Comment;
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Export object module
|
// Export contact module
|
||||||
pub mod object;
|
pub mod object;
|
||||||
pub mod object_rhai_dsl;
|
|
||||||
|
|
||||||
// Re-export Object from the inner object module (object.rs) within src/models/object/mod.rs
|
// Re-export contact, Group from the inner contact module (contact.rs) within src/models/contact/mod.rs
|
||||||
pub use self::object::Object;
|
pub use self::object::Object;
|
||||||
|
|||||||
@@ -1,56 +0,0 @@
|
|||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module};
|
|
||||||
use super::Object;
|
|
||||||
|
|
||||||
type RhaiObject = Object;
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
pub mod generated_rhai_module {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
/// Create a new Object
|
|
||||||
#[rhai_fn(name = "new_object")]
|
|
||||||
pub fn new_object() -> RhaiObject {
|
|
||||||
Object::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the title of an Object
|
|
||||||
#[rhai_fn(name = "object_title")]
|
|
||||||
pub fn object_title(
|
|
||||||
object: &mut RhaiObject,
|
|
||||||
title: String,
|
|
||||||
) -> RhaiObject {
|
|
||||||
let mut result = object.clone();
|
|
||||||
result.title = title;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the description of an Object
|
|
||||||
#[rhai_fn(name = "object_description")]
|
|
||||||
pub fn object_description(
|
|
||||||
object: &mut RhaiObject,
|
|
||||||
description: String,
|
|
||||||
) -> RhaiObject {
|
|
||||||
let mut result = object.clone();
|
|
||||||
result.description = description;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the ID of an Object
|
|
||||||
#[rhai_fn(name = "get_object_id")]
|
|
||||||
pub fn get_object_id(object: &mut RhaiObject) -> i64 {
|
|
||||||
object.id() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the title of an Object
|
|
||||||
#[rhai_fn(name = "get_object_title")]
|
|
||||||
pub fn get_object_title(object: &mut RhaiObject) -> String {
|
|
||||||
object.title.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the description of an Object
|
|
||||||
#[rhai_fn(name = "get_object_description")]
|
|
||||||
pub fn get_object_description(object: &mut RhaiObject) -> String {
|
|
||||||
object.description.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
use heromodels::db::hero::OurDB;
|
|
||||||
use heromodels::db::{Collection, Db};
|
|
||||||
use heromodels::models::object::Object;
|
|
||||||
use macros::{register_authorized_create_by_id_fn, register_authorized_get_by_id_fn};
|
|
||||||
use rhai::{exported_module, Engine, EvalAltResult, FuncRegistration, Module};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub fn register_object_fns(engine: &mut Engine) {
|
|
||||||
let mut module = Module::new();
|
|
||||||
|
|
||||||
register_authorized_get_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "get_object_by_id",
|
|
||||||
resource_type_str: "Object",
|
|
||||||
rhai_return_rust_type: heromodels::models::object::Object
|
|
||||||
);
|
|
||||||
|
|
||||||
register_authorized_create_by_id_fn!(
|
|
||||||
module: &mut module,
|
|
||||||
rhai_fn_name: "save_object",
|
|
||||||
resource_type_str: "Object",
|
|
||||||
rhai_return_rust_type: heromodels::models::object::Object
|
|
||||||
);
|
|
||||||
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
engine.register_type_with_name::<Object>("Object");
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
use rhai::plugin::*;
|
|
||||||
use rhai::{Dynamic, Engine, EvalAltResult, Module};
|
|
||||||
|
|
||||||
// Simplified payment module - contains the core Stripe integration
|
|
||||||
// This is a condensed version of the original payment.rs DSL file
|
|
||||||
|
|
||||||
#[export_module]
|
|
||||||
mod rhai_payment_module {
|
|
||||||
// Payment configuration and basic functions
|
|
||||||
#[rhai_fn(name = "configure_stripe", return_raw)]
|
|
||||||
pub fn configure_stripe(api_key: String) -> Result<String, Box<EvalAltResult>> {
|
|
||||||
Ok(format!("Stripe configured with key: {}...", &api_key[..8]))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Product functions
|
|
||||||
#[rhai_fn(name = "new_product", return_raw)]
|
|
||||||
pub fn new_product() -> Result<Dynamic, Box<EvalAltResult>> {
|
|
||||||
Ok(Dynamic::from("product_created"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Price functions
|
|
||||||
#[rhai_fn(name = "new_price", return_raw)]
|
|
||||||
pub fn new_price() -> Result<Dynamic, Box<EvalAltResult>> {
|
|
||||||
Ok(Dynamic::from("price_created"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscription functions
|
|
||||||
#[rhai_fn(name = "new_subscription", return_raw)]
|
|
||||||
pub fn new_subscription() -> Result<Dynamic, Box<EvalAltResult>> {
|
|
||||||
Ok(Dynamic::from("subscription_created"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Payment intent functions
|
|
||||||
#[rhai_fn(name = "new_payment_intent", return_raw)]
|
|
||||||
pub fn new_payment_intent() -> Result<Dynamic, Box<EvalAltResult>> {
|
|
||||||
Ok(Dynamic::from("payment_intent_created"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Coupon functions
|
|
||||||
#[rhai_fn(name = "new_coupon", return_raw)]
|
|
||||||
pub fn new_coupon() -> Result<Dynamic, Box<EvalAltResult>> {
|
|
||||||
Ok(Dynamic::from("coupon_created"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_payment_rhai_module(engine: &mut Engine) {
|
|
||||||
let module = exported_module!(rhai_payment_module);
|
|
||||||
engine.register_global_module(module.into());
|
|
||||||
}
|
|
||||||
277
ourdb/API.md
Normal file
277
ourdb/API.md
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
# OurDB API Reference
|
||||||
|
|
||||||
|
This document provides a comprehensive reference for the OurDB Rust API.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Configuration](#configuration)
|
||||||
|
2. [Database Operations](#database-operations)
|
||||||
|
- [Creating and Opening](#creating-and-opening)
|
||||||
|
- [Setting Data](#setting-data)
|
||||||
|
- [Getting Data](#getting-data)
|
||||||
|
- [Deleting Data](#deleting-data)
|
||||||
|
- [History Tracking](#history-tracking)
|
||||||
|
3. [Error Handling](#error-handling)
|
||||||
|
4. [Advanced Usage](#advanced-usage)
|
||||||
|
- [Custom File Size](#custom-file-size)
|
||||||
|
- [Custom Key Size](#custom-key-size)
|
||||||
|
5. [Performance Considerations](#performance-considerations)
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### OurDBConfig
|
||||||
|
|
||||||
|
The `OurDBConfig` struct is used to configure a new OurDB instance.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct OurDBConfig {
|
||||||
|
pub path: PathBuf,
|
||||||
|
pub incremental_mode: bool,
|
||||||
|
pub file_size: Option<usize>,
|
||||||
|
pub keysize: Option<u8>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `path` | `PathBuf` | Path to the database directory |
|
||||||
|
| `incremental_mode` | `bool` | Whether to use auto-incremented IDs (true) or user-provided IDs (false) |
|
||||||
|
| `file_size` | `Option<usize>` | Maximum size of each database file in bytes (default: 500MB) |
|
||||||
|
| `keysize` | `Option<u8>` | Size of keys in bytes (default: 4, valid values: 2, 3, 4, 6) |
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: PathBuf::from("/path/to/db"),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024 * 1024 * 100), // 100MB
|
||||||
|
keysize: Some(4), // 4-byte keys
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Operations
|
||||||
|
|
||||||
|
### Creating and Opening
|
||||||
|
|
||||||
|
#### `OurDB::new`
|
||||||
|
|
||||||
|
Creates a new OurDB instance or opens an existing one.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn new(config: OurDBConfig) -> Result<OurDB, Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting Data
|
||||||
|
|
||||||
|
#### `OurDB::set`
|
||||||
|
|
||||||
|
Sets a value in the database. In incremental mode, if no ID is provided, a new ID is generated.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
The `OurDBSetArgs` struct has the following fields:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct OurDBSetArgs<'a> {
|
||||||
|
pub id: Option<u32>,
|
||||||
|
pub data: &'a [u8],
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example with auto-generated ID:
|
||||||
|
```rust
|
||||||
|
let id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: b"Hello, World!",
|
||||||
|
})?;
|
||||||
|
```
|
||||||
|
|
||||||
|
Example with explicit ID:
|
||||||
|
```rust
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(42),
|
||||||
|
data: b"Hello, World!",
|
||||||
|
})?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting Data
|
||||||
|
|
||||||
|
#### `OurDB::get`
|
||||||
|
|
||||||
|
Retrieves a value from the database by ID.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
let data = db.get(42)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deleting Data
|
||||||
|
|
||||||
|
#### `OurDB::delete`
|
||||||
|
|
||||||
|
Deletes a value from the database by ID.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn delete(&mut self, id: u32) -> Result<(), Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
db.delete(42)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### History Tracking
|
||||||
|
|
||||||
|
#### `OurDB::get_history`
|
||||||
|
|
||||||
|
Retrieves the history of values for a given ID, up to the specified depth.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
// Get the last 5 versions of the record
|
||||||
|
let history = db.get_history(42, 5)?;
|
||||||
|
|
||||||
|
// Process each version (most recent first)
|
||||||
|
for (i, version) in history.iter().enumerate() {
|
||||||
|
println!("Version {}: {:?}", i, version);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Other Operations
|
||||||
|
|
||||||
|
#### `OurDB::get_next_id`
|
||||||
|
|
||||||
|
Returns the next ID that will be assigned in incremental mode.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn get_next_id(&self) -> Result<u32, Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
let next_id = db.get_next_id()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `OurDB::close`
|
||||||
|
|
||||||
|
Closes the database, ensuring all data is flushed to disk.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn close(&mut self) -> Result<(), Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
db.close()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `OurDB::destroy`
|
||||||
|
|
||||||
|
Closes the database and deletes all database files.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub fn destroy(&mut self) -> Result<(), Error>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
db.destroy()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
OurDB uses the `thiserror` crate to define error types. The main error type is `ourdb::Error`.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub enum Error {
|
||||||
|
IoError(std::io::Error),
|
||||||
|
InvalidKeySize,
|
||||||
|
InvalidId,
|
||||||
|
RecordNotFound,
|
||||||
|
InvalidCrc,
|
||||||
|
NotIncrementalMode,
|
||||||
|
DatabaseClosed,
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All OurDB operations that can fail return a `Result<T, Error>` which can be handled using Rust's standard error handling mechanisms.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```rust
|
||||||
|
match db.get(42) {
|
||||||
|
Ok(data) => println!("Found data: {:?}", data),
|
||||||
|
Err(ourdb::Error::RecordNotFound) => println!("Record not found"),
|
||||||
|
Err(e) => eprintln!("Error: {}", e),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### Custom File Size
|
||||||
|
|
||||||
|
You can configure the maximum size of each database file:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: PathBuf::from("/path/to/db"),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024 * 1024 * 10), // 10MB per file
|
||||||
|
keysize: None,
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Smaller file sizes can be useful for:
|
||||||
|
- Limiting memory usage when reading files
|
||||||
|
- Improving performance on systems with limited memory
|
||||||
|
- Easier backup and file management
|
||||||
|
|
||||||
|
### Custom Key Size
|
||||||
|
|
||||||
|
OurDB supports different key sizes (2, 3, 4, or 6 bytes):
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: PathBuf::from("/path/to/db"),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: Some(6), // 6-byte keys
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Key size considerations:
|
||||||
|
- 2 bytes: Up to 65,536 records
|
||||||
|
- 3 bytes: Up to 16,777,216 records
|
||||||
|
- 4 bytes: Up to 4,294,967,296 records (default)
|
||||||
|
- 6 bytes: Up to 281,474,976,710,656 records
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
For optimal performance:
|
||||||
|
|
||||||
|
1. **Choose appropriate key size**: Use the smallest key size that can accommodate your expected number of records.
|
||||||
|
|
||||||
|
2. **Configure file size**: For large databases, consider using smaller file sizes to improve memory usage.
|
||||||
|
|
||||||
|
3. **Batch operations**: When inserting or updating many records, consider batching operations to minimize disk I/O.
|
||||||
|
|
||||||
|
4. **Close properly**: Always call `close()` when you're done with the database to ensure data is properly flushed to disk.
|
||||||
|
|
||||||
|
5. **Reuse OurDB instance**: Creating a new OurDB instance has overhead, so reuse the same instance for multiple operations when possible.
|
||||||
|
|
||||||
|
6. **Consider memory usage**: The lookup table is loaded into memory, so very large databases may require significant RAM.
|
||||||
806
ourdb/Cargo.lock
generated
Normal file
806
ourdb/Cargo.lock
generated
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
# This file is automatically @generated by Cargo.
|
||||||
|
# It is not intended for manual editing.
|
||||||
|
version = 4
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aho-corasick"
|
||||||
|
version = "1.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anes"
|
||||||
|
version = "0.1.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle"
|
||||||
|
version = "1.0.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "autocfg"
|
||||||
|
version = "1.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bitflags"
|
||||||
|
version = "2.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bumpalo"
|
||||||
|
version = "3.17.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cast"
|
||||||
|
version = "0.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cfg-if"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
|
||||||
|
dependencies = [
|
||||||
|
"ciborium-io",
|
||||||
|
"ciborium-ll",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium-io"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium-ll"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
|
||||||
|
dependencies = [
|
||||||
|
"ciborium-io",
|
||||||
|
"half",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap"
|
||||||
|
version = "4.5.35"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
|
||||||
|
dependencies = [
|
||||||
|
"clap_builder",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_builder"
|
||||||
|
version = "4.5.35"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
|
||||||
|
dependencies = [
|
||||||
|
"anstyle",
|
||||||
|
"clap_lex",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_lex"
|
||||||
|
version = "0.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crc32fast"
|
||||||
|
version = "1.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "criterion"
|
||||||
|
version = "0.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
|
||||||
|
dependencies = [
|
||||||
|
"anes",
|
||||||
|
"cast",
|
||||||
|
"ciborium",
|
||||||
|
"clap",
|
||||||
|
"criterion-plot",
|
||||||
|
"is-terminal",
|
||||||
|
"itertools",
|
||||||
|
"num-traits",
|
||||||
|
"once_cell",
|
||||||
|
"oorandom",
|
||||||
|
"plotters",
|
||||||
|
"rayon",
|
||||||
|
"regex",
|
||||||
|
"serde",
|
||||||
|
"serde_derive",
|
||||||
|
"serde_json",
|
||||||
|
"tinytemplate",
|
||||||
|
"walkdir",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "criterion-plot"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
|
||||||
|
dependencies = [
|
||||||
|
"cast",
|
||||||
|
"itertools",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-deque"
|
||||||
|
version = "0.8.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-epoch",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-epoch"
|
||||||
|
version = "0.9.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-utils"
|
||||||
|
version = "0.8.21"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crunchy"
|
||||||
|
version = "0.2.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "either"
|
||||||
|
version = "1.15.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "errno"
|
||||||
|
version = "0.3.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fastrand"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.2.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"r-efi",
|
||||||
|
"wasi 0.14.2+wasi-0.2.4",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "half"
|
||||||
|
version = "2.6.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"crunchy",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hermit-abi"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "is-terminal"
|
||||||
|
version = "0.4.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||||
|
dependencies = [
|
||||||
|
"hermit-abi",
|
||||||
|
"libc",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itertools"
|
||||||
|
version = "0.10.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
|
||||||
|
dependencies = [
|
||||||
|
"either",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itoa"
|
||||||
|
version = "1.0.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "js-sys"
|
||||||
|
version = "0.3.77"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
|
||||||
|
dependencies = [
|
||||||
|
"once_cell",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libc"
|
||||||
|
version = "0.2.171"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "linux-raw-sys"
|
||||||
|
version = "0.9.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log"
|
||||||
|
version = "0.4.27"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "memchr"
|
||||||
|
version = "2.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-traits"
|
||||||
|
version = "0.2.19"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
|
||||||
|
dependencies = [
|
||||||
|
"autocfg",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "once_cell"
|
||||||
|
version = "1.21.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "oorandom"
|
||||||
|
version = "11.1.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ourdb"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"crc32fast",
|
||||||
|
"criterion",
|
||||||
|
"log",
|
||||||
|
"rand",
|
||||||
|
"tempfile",
|
||||||
|
"thiserror",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits",
|
||||||
|
"plotters-backend",
|
||||||
|
"plotters-svg",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters-backend"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters-svg"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
|
||||||
|
dependencies = [
|
||||||
|
"plotters-backend",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ppv-lite86"
|
||||||
|
version = "0.2.21"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||||
|
dependencies = [
|
||||||
|
"zerocopy",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2"
|
||||||
|
version = "1.0.94"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "quote"
|
||||||
|
version = "1.0.40"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "r-efi"
|
||||||
|
version = "5.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand"
|
||||||
|
version = "0.8.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"rand_chacha",
|
||||||
|
"rand_core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_chacha"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||||
|
dependencies = [
|
||||||
|
"ppv-lite86",
|
||||||
|
"rand_core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_core"
|
||||||
|
version = "0.6.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||||
|
dependencies = [
|
||||||
|
"getrandom 0.2.15",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rayon"
|
||||||
|
version = "1.10.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
|
||||||
|
dependencies = [
|
||||||
|
"either",
|
||||||
|
"rayon-core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rayon-core"
|
||||||
|
version = "1.12.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-deque",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex"
|
||||||
|
version = "1.11.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-automata",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-automata"
|
||||||
|
version = "0.4.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-syntax"
|
||||||
|
version = "0.8.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustix"
|
||||||
|
version = "1.0.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags",
|
||||||
|
"errno",
|
||||||
|
"libc",
|
||||||
|
"linux-raw-sys",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustversion"
|
||||||
|
version = "1.0.20"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ryu"
|
||||||
|
version = "1.0.20"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "same-file"
|
||||||
|
version = "1.0.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||||
|
dependencies = [
|
||||||
|
"winapi-util",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||||
|
dependencies = [
|
||||||
|
"serde_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_derive"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_json"
|
||||||
|
version = "1.0.140"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||||
|
dependencies = [
|
||||||
|
"itoa",
|
||||||
|
"memchr",
|
||||||
|
"ryu",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "syn"
|
||||||
|
version = "2.0.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tempfile"
|
||||||
|
version = "3.19.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
|
||||||
|
dependencies = [
|
||||||
|
"fastrand",
|
||||||
|
"getrandom 0.3.2",
|
||||||
|
"once_cell",
|
||||||
|
"rustix",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
|
||||||
|
dependencies = [
|
||||||
|
"thiserror-impl",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror-impl"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tinytemplate"
|
||||||
|
version = "1.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicode-ident"
|
||||||
|
version = "1.0.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "walkdir"
|
||||||
|
version = "2.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
|
||||||
|
dependencies = [
|
||||||
|
"same-file",
|
||||||
|
"winapi-util",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.11.0+wasi-snapshot-preview1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.14.2+wasi-0.2.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
|
||||||
|
dependencies = [
|
||||||
|
"wit-bindgen-rt",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"once_cell",
|
||||||
|
"rustversion",
|
||||||
|
"wasm-bindgen-macro",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-backend"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
|
||||||
|
dependencies = [
|
||||||
|
"bumpalo",
|
||||||
|
"log",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
|
||||||
|
dependencies = [
|
||||||
|
"quote",
|
||||||
|
"wasm-bindgen-macro-support",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro-support"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-backend",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-shared"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "web-sys"
|
||||||
|
version = "0.3.77"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
|
||||||
|
dependencies = [
|
||||||
|
"js-sys",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winapi-util"
|
||||||
|
version = "0.1.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||||
|
dependencies = [
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-sys"
|
||||||
|
version = "0.59.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||||
|
dependencies = [
|
||||||
|
"windows-targets",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-targets"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||||
|
dependencies = [
|
||||||
|
"windows_aarch64_gnullvm",
|
||||||
|
"windows_aarch64_msvc",
|
||||||
|
"windows_i686_gnu",
|
||||||
|
"windows_i686_gnullvm",
|
||||||
|
"windows_i686_msvc",
|
||||||
|
"windows_x86_64_gnu",
|
||||||
|
"windows_x86_64_gnullvm",
|
||||||
|
"windows_x86_64_msvc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnu"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnu"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wit-bindgen-rt"
|
||||||
|
version = "0.39.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "zerocopy"
|
||||||
|
version = "0.8.24"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
|
||||||
|
dependencies = [
|
||||||
|
"zerocopy-derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "zerocopy-derive"
|
||||||
|
version = "0.8.24"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
32
ourdb/Cargo.toml
Normal file
32
ourdb/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
[package]
|
||||||
|
name = "ourdb"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "A lightweight, efficient key-value database with history tracking capabilities"
|
||||||
|
authors = ["OurWorld Team"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
crc32fast = "1.3.2"
|
||||||
|
thiserror = "1.0.40"
|
||||||
|
log = "0.4.17"
|
||||||
|
rand = "0.8.5"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
criterion = "0.5.1"
|
||||||
|
tempfile = "3.8.0"
|
||||||
|
|
||||||
|
# [[bench]]
|
||||||
|
# name = "ourdb_benchmarks"
|
||||||
|
# harness = false
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "basic_usage"
|
||||||
|
path = "examples/basic_usage.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "advanced_usage"
|
||||||
|
path = "examples/advanced_usage.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "benchmark"
|
||||||
|
path = "examples/benchmark.rs"
|
||||||
135
ourdb/README.md
Normal file
135
ourdb/README.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# OurDB
|
||||||
|
|
||||||
|
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This Rust implementation offers a robust and performant solution for applications requiring simple but reliable data storage.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Simple key-value storage with history tracking
|
||||||
|
- Data integrity verification using CRC32
|
||||||
|
- Support for multiple backend files for large datasets
|
||||||
|
- Lookup table for fast data retrieval
|
||||||
|
- Incremental mode for auto-generated IDs
|
||||||
|
- Memory and disk-based lookup tables
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- Maximum data size per entry is 65,535 bytes (~64KB) due to the 2-byte size field in the record header
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn main() -> Result<(), ourdb::Error> {
|
||||||
|
// Create a new database
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: PathBuf::from("/tmp/ourdb"),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None, // Use default (500MB)
|
||||||
|
keysize: None, // Use default (4 bytes)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// Store data (with auto-generated ID in incremental mode)
|
||||||
|
let data = b"Hello, OurDB!";
|
||||||
|
let id = db.set(OurDBSetArgs { id: None, data })?;
|
||||||
|
println!("Stored data with ID: {}", id);
|
||||||
|
|
||||||
|
// Retrieve data
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!("Retrieved: {}", String::from_utf8_lossy(&retrieved));
|
||||||
|
|
||||||
|
// Update data
|
||||||
|
let updated_data = b"Updated data";
|
||||||
|
db.set(OurDBSetArgs { id: Some(id), data: updated_data })?;
|
||||||
|
|
||||||
|
// Get history (returns most recent first)
|
||||||
|
let history = db.get_history(id, 2)?;
|
||||||
|
for (i, entry) in history.iter().enumerate() {
|
||||||
|
println!("History {}: {}", i, String::from_utf8_lossy(entry));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete data
|
||||||
|
db.delete(id)?;
|
||||||
|
|
||||||
|
// Close the database
|
||||||
|
db.close()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key-Value Mode vs Incremental Mode
|
||||||
|
|
||||||
|
OurDB supports two operating modes:
|
||||||
|
|
||||||
|
1. **Key-Value Mode** (`incremental_mode: false`): You must provide IDs explicitly when storing data.
|
||||||
|
2. **Incremental Mode** (`incremental_mode: true`): IDs are auto-generated when not provided.
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
- `path`: Directory for database storage
|
||||||
|
- `incremental_mode`: Whether to use auto-increment mode
|
||||||
|
- `file_size`: Maximum file size (default: 500MB)
|
||||||
|
- `keysize`: Size of lookup table entries (2-6 bytes)
|
||||||
|
- 2: For databases with < 65,536 records
|
||||||
|
- 3: For databases with < 16,777,216 records
|
||||||
|
- 4: For databases with < 4,294,967,296 records (default)
|
||||||
|
- 6: For large databases requiring multiple files
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
OurDB consists of three main components:
|
||||||
|
|
||||||
|
1. **Frontend API**: Provides the public interface for database operations
|
||||||
|
2. **Lookup Table**: Maps keys to physical locations in the backend storage
|
||||||
|
3. **Backend Storage**: Manages the actual data persistence in files
|
||||||
|
|
||||||
|
### Record Format
|
||||||
|
|
||||||
|
Each record in the backend storage includes:
|
||||||
|
- 2 bytes: Data size
|
||||||
|
- 4 bytes: CRC32 checksum
|
||||||
|
- 6 bytes: Previous record location (for history)
|
||||||
|
- N bytes: Actual data
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
Additional documentation is available in the repository:
|
||||||
|
|
||||||
|
- [API Reference](API.md): Detailed API documentation
|
||||||
|
- [Migration Guide](MIGRATION.md): Guide for migrating from the V implementation
|
||||||
|
- [Architecture](architecture.md): Design and implementation details
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
The repository includes several examples to demonstrate OurDB usage:
|
||||||
|
|
||||||
|
- `basic_usage.rs`: Simple operations with OurDB
|
||||||
|
- `advanced_usage.rs`: More complex features including both operation modes
|
||||||
|
- `benchmark.rs`: Performance benchmarking tool
|
||||||
|
|
||||||
|
Run an example with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run --example basic_usage
|
||||||
|
cargo run --example advanced_usage
|
||||||
|
cargo run --example benchmark
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
OurDB is designed for efficiency and minimal overhead. The benchmark example can be used to evaluate performance on your specific hardware and workload.
|
||||||
|
|
||||||
|
Typical performance metrics on modern hardware:
|
||||||
|
|
||||||
|
- **Write**: 10,000+ operations per second
|
||||||
|
- **Read**: 50,000+ operations per second
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is licensed under the MIT License.
|
||||||
439
ourdb/architecture.md
Normal file
439
ourdb/architecture.md
Normal file
@@ -0,0 +1,439 @@
|
|||||||
|
# OurDB: Architecture for V to Rust Port
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This document outlines the architecture for porting OurDB from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
|
||||||
|
|
||||||
|
## 2. Current Architecture (V Implementation)
|
||||||
|
|
||||||
|
The current V implementation of OurDB consists of three main components in a layered architecture:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Client Code] --> B[Frontend API]
|
||||||
|
B --> C[Lookup Table]
|
||||||
|
B --> D[Backend Storage]
|
||||||
|
C --> D
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.1 Frontend (db.v)
|
||||||
|
|
||||||
|
The frontend provides the public API for database operations and coordinates between the lookup table and backend storage components.
|
||||||
|
|
||||||
|
Key responsibilities:
|
||||||
|
- Exposing high-level operations (set, get, delete, history)
|
||||||
|
- Managing incremental ID generation in auto-increment mode
|
||||||
|
- Coordinating data flow between lookup and backend components
|
||||||
|
- Handling database lifecycle (open, close, destroy)
|
||||||
|
|
||||||
|
### 2.2 Lookup Table (lookup.v)
|
||||||
|
|
||||||
|
The lookup table maps keys to physical locations in the backend storage.
|
||||||
|
|
||||||
|
Key responsibilities:
|
||||||
|
- Maintaining key-to-location mapping
|
||||||
|
- Optimizing key sizes based on database configuration
|
||||||
|
- Supporting both memory and disk-based lookup tables
|
||||||
|
- Handling sparse data efficiently
|
||||||
|
- Providing next ID generation for incremental mode
|
||||||
|
|
||||||
|
### 2.3 Backend Storage (backend.v)
|
||||||
|
|
||||||
|
The backend storage manages the actual data persistence in files.
|
||||||
|
|
||||||
|
Key responsibilities:
|
||||||
|
- Managing physical data storage in files
|
||||||
|
- Ensuring data integrity with CRC32 checksums
|
||||||
|
- Supporting multiple file backends for large datasets
|
||||||
|
- Implementing low-level read/write operations
|
||||||
|
- Tracking record history through linked locations
|
||||||
|
|
||||||
|
### 2.4 Core Data Structures
|
||||||
|
|
||||||
|
#### OurDB
|
||||||
|
```v
|
||||||
|
@[heap]
|
||||||
|
pub struct OurDB {
|
||||||
|
mut:
|
||||||
|
lookup &LookupTable
|
||||||
|
pub:
|
||||||
|
path string // directory for storage
|
||||||
|
incremental_mode bool
|
||||||
|
file_size u32 = 500 * (1 << 20) // 500MB
|
||||||
|
pub mut:
|
||||||
|
file os.File
|
||||||
|
file_nr u16 // the file which is open
|
||||||
|
last_used_file_nr u16
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### LookupTable
|
||||||
|
```v
|
||||||
|
pub struct LookupTable {
|
||||||
|
keysize u8
|
||||||
|
lookuppath string
|
||||||
|
mut:
|
||||||
|
data []u8
|
||||||
|
incremental ?u32 // points to next empty slot if incremental mode is enabled
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Location
|
||||||
|
```v
|
||||||
|
pub struct Location {
|
||||||
|
pub mut:
|
||||||
|
file_nr u16
|
||||||
|
position u32
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.5 Storage Format
|
||||||
|
|
||||||
|
#### Record Format
|
||||||
|
Each record in the backend storage includes:
|
||||||
|
- 2 bytes: Data size
|
||||||
|
- 4 bytes: CRC32 checksum
|
||||||
|
- 6 bytes: Previous record location (for history)
|
||||||
|
- N bytes: Actual data
|
||||||
|
|
||||||
|
#### Lookup Table Optimization
|
||||||
|
The lookup table automatically optimizes its key size based on the database configuration:
|
||||||
|
- 2 bytes: For databases with < 65,536 records
|
||||||
|
- 3 bytes: For databases with < 16,777,216 records
|
||||||
|
- 4 bytes: For databases with < 4,294,967,296 records
|
||||||
|
- 6 bytes: For large databases requiring multiple files
|
||||||
|
|
||||||
|
## 3. Proposed Rust Architecture
|
||||||
|
|
||||||
|
The Rust implementation will maintain the same layered architecture while leveraging Rust's type system, ownership model, and error handling.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Client Code] --> B[OurDB API]
|
||||||
|
B --> C[LookupTable]
|
||||||
|
B --> D[Backend]
|
||||||
|
C --> D
|
||||||
|
E[Error Handling] --> B
|
||||||
|
E --> C
|
||||||
|
E --> D
|
||||||
|
F[Configuration] --> B
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.1 Core Components
|
||||||
|
|
||||||
|
#### 3.1.1 OurDB (API Layer)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct OurDB {
|
||||||
|
path: String,
|
||||||
|
incremental_mode: bool,
|
||||||
|
file_size: u32,
|
||||||
|
lookup: LookupTable,
|
||||||
|
file: Option<std::fs::File>,
|
||||||
|
file_nr: u16,
|
||||||
|
last_used_file_nr: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OurDB {
|
||||||
|
pub fn new(config: OurDBConfig) -> Result<Self, Error>;
|
||||||
|
pub fn set(&mut self, id: Option<u32>, data: &[u8]) -> Result<u32, Error>;
|
||||||
|
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>;
|
||||||
|
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>;
|
||||||
|
pub fn delete(&mut self, id: u32) -> Result<(), Error>;
|
||||||
|
pub fn get_next_id(&mut self) -> Result<u32, Error>;
|
||||||
|
pub fn close(&mut self) -> Result<(), Error>;
|
||||||
|
pub fn destroy(&mut self) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.1.2 LookupTable
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct LookupTable {
|
||||||
|
keysize: u8,
|
||||||
|
lookuppath: String,
|
||||||
|
data: Vec<u8>,
|
||||||
|
incremental: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LookupTable {
|
||||||
|
fn new(config: LookupConfig) -> Result<Self, Error>;
|
||||||
|
fn get(&self, id: u32) -> Result<Location, Error>;
|
||||||
|
fn set(&mut self, id: u32, location: Location) -> Result<(), Error>;
|
||||||
|
fn delete(&mut self, id: u32) -> Result<(), Error>;
|
||||||
|
fn get_next_id(&self) -> Result<u32, Error>;
|
||||||
|
fn increment_index(&mut self) -> Result<(), Error>;
|
||||||
|
fn export_data(&self, path: &str) -> Result<(), Error>;
|
||||||
|
fn import_data(&mut self, path: &str) -> Result<(), Error>;
|
||||||
|
fn export_sparse(&self, path: &str) -> Result<(), Error>;
|
||||||
|
fn import_sparse(&mut self, path: &str) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.1.3 Location
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct Location {
|
||||||
|
file_nr: u16,
|
||||||
|
position: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Location {
|
||||||
|
fn new(bytes: &[u8], keysize: u8) -> Result<Self, Error>;
|
||||||
|
fn to_bytes(&self) -> Result<Vec<u8>, Error>;
|
||||||
|
fn to_u64(&self) -> u64;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.1.4 Backend
|
||||||
|
|
||||||
|
The backend functionality will be implemented as methods on the OurDB struct:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
impl OurDB {
|
||||||
|
fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error>;
|
||||||
|
fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error>;
|
||||||
|
fn get_file_nr(&mut self) -> Result<u16, Error>;
|
||||||
|
fn set_(&mut self, id: u32, old_location: Location, data: &[u8]) -> Result<(), Error>;
|
||||||
|
fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error>;
|
||||||
|
fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error>;
|
||||||
|
fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error>;
|
||||||
|
fn close_(&mut self);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.1.5 Configuration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct OurDBConfig {
|
||||||
|
pub record_nr_max: u32,
|
||||||
|
pub record_size_max: u32,
|
||||||
|
pub file_size: u32,
|
||||||
|
pub path: String,
|
||||||
|
pub incremental_mode: bool,
|
||||||
|
pub reset: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct LookupConfig {
|
||||||
|
size: u32,
|
||||||
|
keysize: u8,
|
||||||
|
lookuppath: String,
|
||||||
|
incremental_mode: bool,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3.1.6 Error Handling
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("I/O error: {0}")]
|
||||||
|
Io(#[from] std::io::Error),
|
||||||
|
|
||||||
|
#[error("Invalid key size: {0}")]
|
||||||
|
InvalidKeySize(u8),
|
||||||
|
|
||||||
|
#[error("Record not found: {0}")]
|
||||||
|
RecordNotFound(u32),
|
||||||
|
|
||||||
|
#[error("Data corruption: CRC mismatch")]
|
||||||
|
DataCorruption,
|
||||||
|
|
||||||
|
#[error("Index out of bounds: {0}")]
|
||||||
|
IndexOutOfBounds(u32),
|
||||||
|
|
||||||
|
#[error("Incremental mode not enabled")]
|
||||||
|
IncrementalNotEnabled,
|
||||||
|
|
||||||
|
#[error("Lookup table is full")]
|
||||||
|
LookupTableFull,
|
||||||
|
|
||||||
|
#[error("Invalid file number: {0}")]
|
||||||
|
InvalidFileNumber(u16),
|
||||||
|
|
||||||
|
#[error("Invalid operation: {0}")]
|
||||||
|
InvalidOperation(String),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Implementation Strategy
|
||||||
|
|
||||||
|
### 4.1 Phase 1: Core Data Structures
|
||||||
|
|
||||||
|
1. Implement the `Location` struct with serialization/deserialization
|
||||||
|
2. Implement the `Error` enum for error handling
|
||||||
|
3. Implement the configuration structures
|
||||||
|
|
||||||
|
### 4.2 Phase 2: Lookup Table
|
||||||
|
|
||||||
|
1. Implement the `LookupTable` struct with memory-based storage
|
||||||
|
2. Add disk-based storage support
|
||||||
|
3. Implement key size optimization
|
||||||
|
4. Add incremental ID support
|
||||||
|
5. Implement import/export functionality
|
||||||
|
|
||||||
|
### 4.3 Phase 3: Backend Storage
|
||||||
|
|
||||||
|
1. Implement file management functions
|
||||||
|
2. Implement record serialization/deserialization with CRC32
|
||||||
|
3. Implement history tracking through linked locations
|
||||||
|
4. Add support for multiple backend files
|
||||||
|
|
||||||
|
### 4.4 Phase 4: Frontend API
|
||||||
|
|
||||||
|
1. Implement the `OurDB` struct with core operations
|
||||||
|
2. Add high-level API methods (set, get, delete, history)
|
||||||
|
3. Implement database lifecycle management
|
||||||
|
|
||||||
|
### 4.5 Phase 5: Testing and Optimization
|
||||||
|
|
||||||
|
1. Port existing tests from V to Rust
|
||||||
|
2. Add new tests for Rust-specific functionality
|
||||||
|
3. Benchmark and optimize performance
|
||||||
|
4. Ensure compatibility with existing OurDB files
|
||||||
|
|
||||||
|
## 5. Implementation Considerations
|
||||||
|
|
||||||
|
### 5.1 Memory Management
|
||||||
|
|
||||||
|
Leverage Rust's ownership model for safe and efficient memory management:
|
||||||
|
- Use `Vec<u8>` for data buffers instead of raw pointers
|
||||||
|
- Implement proper RAII for file handles
|
||||||
|
- Use references and borrows to avoid unnecessary copying
|
||||||
|
- Consider using `Bytes` from the `bytes` crate for zero-copy operations
|
||||||
|
|
||||||
|
### 5.2 Error Handling
|
||||||
|
|
||||||
|
Use Rust's `Result` type for comprehensive error handling:
|
||||||
|
- Define custom error types for OurDB-specific errors
|
||||||
|
- Propagate errors using the `?` operator
|
||||||
|
- Provide detailed error messages
|
||||||
|
- Implement proper error conversion using the `From` trait
|
||||||
|
|
||||||
|
### 5.3 File I/O
|
||||||
|
|
||||||
|
Optimize file operations for performance:
|
||||||
|
- Use `BufReader` and `BufWriter` for buffered I/O
|
||||||
|
- Implement proper file locking for concurrent access
|
||||||
|
- Consider memory-mapped files for lookup tables
|
||||||
|
- Use `seek` and `read_exact` for precise positioning
|
||||||
|
|
||||||
|
### 5.4 Concurrency
|
||||||
|
|
||||||
|
Consider thread safety for concurrent database access:
|
||||||
|
- Use interior mutability patterns where appropriate
|
||||||
|
- Implement `Send` and `Sync` traits for thread safety
|
||||||
|
- Consider using `RwLock` for shared read access
|
||||||
|
- Provide clear documentation on thread safety guarantees
|
||||||
|
|
||||||
|
### 5.5 Performance Optimizations
|
||||||
|
|
||||||
|
Identify opportunities for performance improvements:
|
||||||
|
- Use memory-mapped files for lookup tables
|
||||||
|
- Implement caching for frequently accessed records
|
||||||
|
- Use zero-copy operations where possible
|
||||||
|
- Consider async I/O for non-blocking operations
|
||||||
|
|
||||||
|
## 6. Testing Strategy
|
||||||
|
|
||||||
|
### 6.1 Unit Tests
|
||||||
|
|
||||||
|
Write comprehensive unit tests for each component:
|
||||||
|
- Test `Location` serialization/deserialization
|
||||||
|
- Test `LookupTable` operations
|
||||||
|
- Test backend storage functions
|
||||||
|
- Test error handling
|
||||||
|
|
||||||
|
### 6.2 Integration Tests
|
||||||
|
|
||||||
|
Write integration tests for the complete system:
|
||||||
|
- Test database creation and configuration
|
||||||
|
- Test basic CRUD operations
|
||||||
|
- Test history tracking
|
||||||
|
- Test incremental ID generation
|
||||||
|
- Test file management
|
||||||
|
|
||||||
|
### 6.3 Compatibility Tests
|
||||||
|
|
||||||
|
Ensure compatibility with existing OurDB files:
|
||||||
|
- Test reading existing V-created OurDB files
|
||||||
|
- Test writing files that can be read by the V implementation
|
||||||
|
- Test migration scenarios
|
||||||
|
|
||||||
|
### 6.4 Performance Tests
|
||||||
|
|
||||||
|
Benchmark performance against the V implementation:
|
||||||
|
- Measure throughput for set/get operations
|
||||||
|
- Measure latency for different operations
|
||||||
|
- Test with different database sizes
|
||||||
|
- Test with different record sizes
|
||||||
|
|
||||||
|
## 7. Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
ourdb/
|
||||||
|
├── Cargo.toml
|
||||||
|
├── src/
|
||||||
|
│ ├── lib.rs # Public API and re-exports
|
||||||
|
│ ├── ourdb.rs # OurDB implementation (frontend)
|
||||||
|
│ ├── lookup.rs # Lookup table implementation
|
||||||
|
│ ├── location.rs # Location struct implementation
|
||||||
|
│ ├── backend.rs # Backend storage implementation
|
||||||
|
│ ├── error.rs # Error types
|
||||||
|
│ ├── config.rs # Configuration structures
|
||||||
|
│ └── utils.rs # Utility functions
|
||||||
|
├── tests/
|
||||||
|
│ ├── unit/ # Unit tests
|
||||||
|
│ ├── integration/ # Integration tests
|
||||||
|
│ └── compatibility/ # Compatibility tests
|
||||||
|
└── examples/
|
||||||
|
├── basic.rs # Basic usage example
|
||||||
|
├── history.rs # History tracking example
|
||||||
|
└── client_server.rs # Client-server example
|
||||||
|
```
|
||||||
|
|
||||||
|
## 8. Dependencies
|
||||||
|
|
||||||
|
The Rust implementation will use the following dependencies:
|
||||||
|
|
||||||
|
- `thiserror` for error handling
|
||||||
|
- `crc32fast` for CRC32 calculation
|
||||||
|
- `bytes` for efficient byte manipulation
|
||||||
|
- `memmap2` for memory-mapped files (optional)
|
||||||
|
- `serde` for serialization (optional, for future extensions)
|
||||||
|
- `log` for logging
|
||||||
|
- `criterion` for benchmarking
|
||||||
|
|
||||||
|
## 9. Compatibility Considerations
|
||||||
|
|
||||||
|
To ensure compatibility with the V implementation:
|
||||||
|
|
||||||
|
1. Maintain the same file format for data storage
|
||||||
|
2. Preserve the lookup table format
|
||||||
|
3. Keep the same CRC32 calculation method
|
||||||
|
4. Ensure identical behavior for incremental ID generation
|
||||||
|
5. Maintain the same history tracking mechanism
|
||||||
|
|
||||||
|
## 10. Future Extensions
|
||||||
|
|
||||||
|
Potential future extensions to consider:
|
||||||
|
|
||||||
|
1. Async API for non-blocking operations
|
||||||
|
2. Transactions support
|
||||||
|
3. Better concurrency control
|
||||||
|
4. Compression support
|
||||||
|
5. Encryption support
|
||||||
|
6. Streaming API for large values
|
||||||
|
7. Iterators for scanning records
|
||||||
|
8. Secondary indexes
|
||||||
|
|
||||||
|
## 11. Conclusion
|
||||||
|
|
||||||
|
This architecture provides a roadmap for porting OurDB from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
|
||||||
|
|
||||||
|
The Rust implementation aims to be:
|
||||||
|
- **Safe**: Leveraging Rust's ownership model for memory safety
|
||||||
|
- **Fast**: Maintaining or improving performance compared to V
|
||||||
|
- **Compatible**: Working with existing OurDB files
|
||||||
|
- **Extensible**: Providing a foundation for future enhancements
|
||||||
|
- **Well-tested**: Including comprehensive test coverage
|
||||||
231
ourdb/examples/advanced_usage.rs
Normal file
231
ourdb/examples/advanced_usage.rs
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
fn main() -> Result<(), ourdb::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("ourdb_advanced_example");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating database at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Demonstrate key-value mode (non-incremental)
|
||||||
|
key_value_mode_example(&db_path)?;
|
||||||
|
|
||||||
|
// Demonstrate incremental mode
|
||||||
|
incremental_mode_example(&db_path)?;
|
||||||
|
|
||||||
|
// Demonstrate performance benchmarking
|
||||||
|
performance_benchmark(&db_path)?;
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("Database kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key_value_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||||
|
println!("\n=== Key-Value Mode Example ===");
|
||||||
|
|
||||||
|
let db_path = base_path.join("key_value");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
// Create a new database with key-value mode (non-incremental)
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path,
|
||||||
|
incremental_mode: false,
|
||||||
|
file_size: Some(1024 * 1024), // 1MB for testing
|
||||||
|
keysize: Some(2), // Small key size for demonstration
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// In key-value mode, we must provide IDs explicitly
|
||||||
|
let custom_ids = [100, 200, 300, 400, 500];
|
||||||
|
|
||||||
|
// Store data with custom IDs
|
||||||
|
for (i, &id) in custom_ids.iter().enumerate() {
|
||||||
|
let data = format!("Record with custom ID {}", id);
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: data.as_bytes(),
|
||||||
|
})?;
|
||||||
|
println!("Stored record {} with custom ID: {}", i + 1, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve data by custom IDs
|
||||||
|
for &id in &custom_ids {
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved ID {}: {}",
|
||||||
|
id,
|
||||||
|
String::from_utf8_lossy(&retrieved)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update and track history
|
||||||
|
let id_to_update = custom_ids[2]; // ID 300
|
||||||
|
for i in 1..=3 {
|
||||||
|
let updated_data = format!("Updated record {} (version {})", id_to_update, i);
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id_to_update),
|
||||||
|
data: updated_data.as_bytes(),
|
||||||
|
})?;
|
||||||
|
println!("Updated ID {} (version {})", id_to_update, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get history for the updated record
|
||||||
|
let history = db.get_history(id_to_update, 5)?;
|
||||||
|
println!("History for ID {} (most recent first):", id_to_update);
|
||||||
|
for (i, entry) in history.iter().enumerate() {
|
||||||
|
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.close()?;
|
||||||
|
println!("Key-value mode example completed");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn incremental_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||||
|
println!("\n=== Incremental Mode Example ===");
|
||||||
|
|
||||||
|
let db_path = base_path.join("incremental");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
// Create a new database with incremental mode
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path,
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024 * 1024), // 1MB for testing
|
||||||
|
keysize: Some(3), // 3-byte keys
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// In incremental mode, IDs are auto-generated
|
||||||
|
let mut assigned_ids = Vec::new();
|
||||||
|
|
||||||
|
// Store multiple records and collect assigned IDs
|
||||||
|
for i in 1..=5 {
|
||||||
|
let data = format!("Auto-increment record {}", i);
|
||||||
|
let id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: data.as_bytes(),
|
||||||
|
})?;
|
||||||
|
assigned_ids.push(id);
|
||||||
|
println!("Stored record {} with auto-assigned ID: {}", i, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check next ID
|
||||||
|
let next_id = db.get_next_id()?;
|
||||||
|
println!("Next ID to be assigned: {}", next_id);
|
||||||
|
|
||||||
|
// Retrieve all records
|
||||||
|
for &id in &assigned_ids {
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved ID {}: {}",
|
||||||
|
id,
|
||||||
|
String::from_utf8_lossy(&retrieved)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
db.close()?;
|
||||||
|
println!("Incremental mode example completed");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn performance_benchmark(base_path: &PathBuf) -> Result<(), ourdb::Error> {
|
||||||
|
println!("\n=== Performance Benchmark ===");
|
||||||
|
|
||||||
|
let db_path = base_path.join("benchmark");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
// Create a new database
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path,
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024 * 1024), // 10MB
|
||||||
|
keysize: Some(4), // 4-byte keys
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// Number of operations for the benchmark
|
||||||
|
let num_operations = 1000;
|
||||||
|
let data_size = 100; // bytes per record
|
||||||
|
|
||||||
|
// Prepare test data
|
||||||
|
let test_data = vec![b'A'; data_size];
|
||||||
|
|
||||||
|
// Benchmark write operations
|
||||||
|
println!("Benchmarking {} write operations...", num_operations);
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
let mut ids = Vec::with_capacity(num_operations);
|
||||||
|
for _ in 0..num_operations {
|
||||||
|
let id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &test_data,
|
||||||
|
})?;
|
||||||
|
ids.push(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
let write_duration = start.elapsed();
|
||||||
|
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
|
||||||
|
println!(
|
||||||
|
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
writes_per_second,
|
||||||
|
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
// Benchmark read operations
|
||||||
|
println!("Benchmarking {} read operations...", num_operations);
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
for &id in &ids {
|
||||||
|
let _ = db.get(id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let read_duration = start.elapsed();
|
||||||
|
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
|
||||||
|
println!(
|
||||||
|
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
reads_per_second,
|
||||||
|
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
// Benchmark update operations
|
||||||
|
println!("Benchmarking {} update operations...", num_operations);
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
for &id in &ids {
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: &test_data,
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let update_duration = start.elapsed();
|
||||||
|
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
|
||||||
|
println!(
|
||||||
|
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
updates_per_second,
|
||||||
|
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
db.close()?;
|
||||||
|
println!("Performance benchmark completed");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
89
ourdb/examples/basic_usage.rs
Normal file
89
ourdb/examples/basic_usage.rs
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
|
||||||
|
fn main() -> Result<(), ourdb::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("ourdb_example");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating database at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new database with incremental mode enabled
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None, // Use default (500MB)
|
||||||
|
keysize: None, // Use default (4 bytes)
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// Store some data with auto-generated IDs
|
||||||
|
let data1 = b"First record";
|
||||||
|
let id1 = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: data1,
|
||||||
|
})?;
|
||||||
|
println!("Stored first record with ID: {}", id1);
|
||||||
|
|
||||||
|
let data2 = b"Second record";
|
||||||
|
let id2 = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: data2,
|
||||||
|
})?;
|
||||||
|
println!("Stored second record with ID: {}", id2);
|
||||||
|
|
||||||
|
// Retrieve and print the data
|
||||||
|
let retrieved1 = db.get(id1)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved ID {}: {}",
|
||||||
|
id1,
|
||||||
|
String::from_utf8_lossy(&retrieved1)
|
||||||
|
);
|
||||||
|
|
||||||
|
let retrieved2 = db.get(id2)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved ID {}: {}",
|
||||||
|
id2,
|
||||||
|
String::from_utf8_lossy(&retrieved2)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update a record to demonstrate history tracking
|
||||||
|
let updated_data = b"Updated first record";
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id1),
|
||||||
|
data: updated_data,
|
||||||
|
})?;
|
||||||
|
println!("Updated record with ID: {}", id1);
|
||||||
|
|
||||||
|
// Get history for the updated record
|
||||||
|
let history = db.get_history(id1, 2)?;
|
||||||
|
println!("History for ID {}:", id1);
|
||||||
|
for (i, entry) in history.iter().enumerate() {
|
||||||
|
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete a record
|
||||||
|
db.delete(id2)?;
|
||||||
|
println!("Deleted record with ID: {}", id2);
|
||||||
|
|
||||||
|
// Verify deletion
|
||||||
|
match db.get(id2) {
|
||||||
|
Ok(_) => println!("Record still exists (unexpected)"),
|
||||||
|
Err(e) => println!("Verified deletion: {}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the database
|
||||||
|
db.close()?;
|
||||||
|
println!("Database closed successfully");
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("Database kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
124
ourdb/examples/benchmark.rs
Normal file
124
ourdb/examples/benchmark.rs
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
fn main() -> Result<(), ourdb::Error> {
|
||||||
|
// Parse command-line arguments
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
|
||||||
|
// Default values
|
||||||
|
let mut incremental_mode = true;
|
||||||
|
let mut keysize: u8 = 4;
|
||||||
|
let mut num_operations = 10000;
|
||||||
|
|
||||||
|
// Parse arguments
|
||||||
|
for i in 1..args.len() {
|
||||||
|
if args[i] == "--no-incremental" {
|
||||||
|
incremental_mode = false;
|
||||||
|
} else if args[i] == "--keysize" && i + 1 < args.len() {
|
||||||
|
keysize = args[i + 1].parse().unwrap_or(4);
|
||||||
|
} else if args[i] == "--ops" && i + 1 < args.len() {
|
||||||
|
num_operations = args[i + 1].parse().unwrap_or(10000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("ourdb_benchmark");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Database path: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new database
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path.clone(),
|
||||||
|
incremental_mode,
|
||||||
|
file_size: Some(1024 * 1024),
|
||||||
|
keysize: Some(keysize),
|
||||||
|
reset: Some(true), // Reset the database for benchmarking
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// Prepare test data (100 bytes per record)
|
||||||
|
let test_data = vec![b'A'; 100];
|
||||||
|
|
||||||
|
// Benchmark write operations
|
||||||
|
println!(
|
||||||
|
"Benchmarking {} write operations (incremental: {}, keysize: {})...",
|
||||||
|
num_operations, incremental_mode, keysize
|
||||||
|
);
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
let mut ids = Vec::with_capacity(num_operations);
|
||||||
|
for _ in 0..num_operations {
|
||||||
|
let id = if incremental_mode {
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &test_data,
|
||||||
|
})?
|
||||||
|
} else {
|
||||||
|
// In non-incremental mode, we need to provide IDs
|
||||||
|
let id = ids.len() as u32 + 1;
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: &test_data,
|
||||||
|
})?;
|
||||||
|
id
|
||||||
|
};
|
||||||
|
ids.push(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
let write_duration = start.elapsed();
|
||||||
|
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
writes_per_second,
|
||||||
|
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
// Benchmark read operations
|
||||||
|
println!("Benchmarking {} read operations...", num_operations);
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
for &id in &ids {
|
||||||
|
let _ = db.get(id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let read_duration = start.elapsed();
|
||||||
|
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
reads_per_second,
|
||||||
|
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
// Benchmark update operations
|
||||||
|
println!("Benchmarking {} update operations...", num_operations);
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
for &id in &ids {
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: &test_data,
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let update_duration = start.elapsed();
|
||||||
|
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
|
||||||
|
updates_per_second,
|
||||||
|
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
|
||||||
|
);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.close()?;
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
83
ourdb/examples/main.rs
Normal file
83
ourdb/examples/main.rs
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
println!("Standalone OurDB Example");
|
||||||
|
println!("=======================\n");
|
||||||
|
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating database at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new OurDB instance
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: Some(false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
println!("Database created successfully");
|
||||||
|
|
||||||
|
// Store some data
|
||||||
|
let test_data = b"Hello, OurDB!";
|
||||||
|
let id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})?;
|
||||||
|
println!("\nStored data with ID: {}", id);
|
||||||
|
|
||||||
|
// Retrieve the data
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
|
||||||
|
|
||||||
|
// Update the data
|
||||||
|
let updated_data = b"Updated data in OurDB!";
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: updated_data,
|
||||||
|
})?;
|
||||||
|
println!("\nUpdated data with ID: {}", id);
|
||||||
|
|
||||||
|
// Retrieve the updated data
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved updated data: {}",
|
||||||
|
String::from_utf8_lossy(&retrieved)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get history
|
||||||
|
let history = db.get_history(id, 2)?;
|
||||||
|
println!("\nHistory for ID {}:", id);
|
||||||
|
for (i, data) in history.iter().enumerate() {
|
||||||
|
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the data
|
||||||
|
db.delete(id)?;
|
||||||
|
println!("\nDeleted data with ID: {}", id);
|
||||||
|
|
||||||
|
// Try to retrieve the deleted data (should fail)
|
||||||
|
match db.get(id) {
|
||||||
|
Ok(_) => println!("Data still exists (unexpected)"),
|
||||||
|
Err(e) => println!("Verified deletion: {}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nExample completed successfully!");
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.close()?;
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
83
ourdb/examples/standalone_ourdb_example.rs
Normal file
83
ourdb/examples/standalone_ourdb_example.rs
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
println!("Standalone OurDB Example");
|
||||||
|
println!("=======================\n");
|
||||||
|
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating database at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new OurDB instance
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: db_path.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: Some(false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
println!("Database created successfully");
|
||||||
|
|
||||||
|
// Store some data
|
||||||
|
let test_data = b"Hello, OurDB!";
|
||||||
|
let id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})?;
|
||||||
|
println!("\nStored data with ID: {}", id);
|
||||||
|
|
||||||
|
// Retrieve the data
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
|
||||||
|
|
||||||
|
// Update the data
|
||||||
|
let updated_data = b"Updated data in OurDB!";
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: updated_data,
|
||||||
|
})?;
|
||||||
|
println!("\nUpdated data with ID: {}", id);
|
||||||
|
|
||||||
|
// Retrieve the updated data
|
||||||
|
let retrieved = db.get(id)?;
|
||||||
|
println!(
|
||||||
|
"Retrieved updated data: {}",
|
||||||
|
String::from_utf8_lossy(&retrieved)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get history
|
||||||
|
let history = db.get_history(id, 2)?;
|
||||||
|
println!("\nHistory for ID {}:", id);
|
||||||
|
for (i, data) in history.iter().enumerate() {
|
||||||
|
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the data
|
||||||
|
db.delete(id)?;
|
||||||
|
println!("\nDeleted data with ID: {}", id);
|
||||||
|
|
||||||
|
// Try to retrieve the deleted data (should fail)
|
||||||
|
match db.get(id) {
|
||||||
|
Ok(_) => println!("Data still exists (unexpected)"),
|
||||||
|
Err(e) => println!("Verified deletion: {}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nExample completed successfully!");
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.close()?;
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
366
ourdb/src/backend.rs
Normal file
366
ourdb/src/backend.rs
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
use std::fs::{self, File, OpenOptions};
|
||||||
|
use std::io::{Read, Seek, SeekFrom, Write};
|
||||||
|
|
||||||
|
use crc32fast::Hasher;
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::location::Location;
|
||||||
|
use crate::OurDB;
|
||||||
|
|
||||||
|
// Header size: 2 bytes (size) + 4 bytes (CRC32) + 6 bytes (previous location)
|
||||||
|
pub const HEADER_SIZE: usize = 12;
|
||||||
|
|
||||||
|
impl OurDB {
|
||||||
|
/// Selects and opens a database file for read/write operations
|
||||||
|
pub(crate) fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error> {
|
||||||
|
// No need to check if file_nr > 65535 as u16 can't exceed that value
|
||||||
|
|
||||||
|
let path = self.path.join(format!("{}.db", file_nr));
|
||||||
|
|
||||||
|
// Always close the current file if it's open
|
||||||
|
self.file = None;
|
||||||
|
|
||||||
|
// Create file if it doesn't exist
|
||||||
|
if !path.exists() {
|
||||||
|
self.create_new_db_file(file_nr)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the file fresh
|
||||||
|
let file = OpenOptions::new().read(true).write(true).open(&path)?;
|
||||||
|
|
||||||
|
self.file = Some(file);
|
||||||
|
self.file_nr = file_nr;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new database file
|
||||||
|
pub(crate) fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error> {
|
||||||
|
let new_file_path = self.path.join(format!("{}.db", file_nr));
|
||||||
|
let mut file = File::create(&new_file_path)?;
|
||||||
|
|
||||||
|
// Write a single byte to make all positions start from 1
|
||||||
|
file.write_all(&[0u8])?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the file number to use for the next write operation
|
||||||
|
pub(crate) fn get_file_nr(&mut self) -> Result<u16, Error> {
|
||||||
|
// For keysize 2, 3, or 4, we can only use file_nr 0
|
||||||
|
if self.lookup.keysize() <= 4 {
|
||||||
|
let path = self.path.join("0.db");
|
||||||
|
|
||||||
|
if !path.exists() {
|
||||||
|
self.create_new_db_file(0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// For keysize 6, we can use multiple files
|
||||||
|
let path = self.path.join(format!("{}.db", self.last_used_file_nr));
|
||||||
|
|
||||||
|
if !path.exists() {
|
||||||
|
self.create_new_db_file(self.last_used_file_nr)?;
|
||||||
|
return Ok(self.last_used_file_nr);
|
||||||
|
}
|
||||||
|
|
||||||
|
let metadata = fs::metadata(&path)?;
|
||||||
|
if metadata.len() >= self.file_size as u64 {
|
||||||
|
self.last_used_file_nr += 1;
|
||||||
|
self.create_new_db_file(self.last_used_file_nr)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(self.last_used_file_nr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stores data at the specified ID with history tracking
|
||||||
|
pub(crate) fn set_(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
old_location: Location,
|
||||||
|
data: &[u8],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Validate data size - maximum is u16::MAX (65535 bytes or ~64KB)
|
||||||
|
if data.len() > u16::MAX as usize {
|
||||||
|
return Err(Error::InvalidOperation(format!(
|
||||||
|
"Data size exceeds maximum allowed size of {} bytes",
|
||||||
|
u16::MAX
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file number to use
|
||||||
|
let file_nr = self.get_file_nr()?;
|
||||||
|
|
||||||
|
// Select the file
|
||||||
|
self.db_file_select(file_nr)?;
|
||||||
|
|
||||||
|
// Get current file position for lookup
|
||||||
|
let file = self
|
||||||
|
.file
|
||||||
|
.as_mut()
|
||||||
|
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||||
|
file.seek(SeekFrom::End(0))?;
|
||||||
|
let position = file.stream_position()? as u32;
|
||||||
|
|
||||||
|
// Create new location
|
||||||
|
let new_location = Location { file_nr, position };
|
||||||
|
|
||||||
|
// Calculate CRC of data
|
||||||
|
let crc = calculate_crc(data);
|
||||||
|
|
||||||
|
// Create header
|
||||||
|
let mut header = vec![0u8; HEADER_SIZE];
|
||||||
|
|
||||||
|
// Write size (2 bytes)
|
||||||
|
let size = data.len() as u16; // Safe now because we've validated the size
|
||||||
|
header[0] = (size & 0xFF) as u8;
|
||||||
|
header[1] = ((size >> 8) & 0xFF) as u8;
|
||||||
|
|
||||||
|
// Write CRC (4 bytes)
|
||||||
|
header[2] = (crc & 0xFF) as u8;
|
||||||
|
header[3] = ((crc >> 8) & 0xFF) as u8;
|
||||||
|
header[4] = ((crc >> 16) & 0xFF) as u8;
|
||||||
|
header[5] = ((crc >> 24) & 0xFF) as u8;
|
||||||
|
|
||||||
|
// Write previous location (6 bytes)
|
||||||
|
let prev_bytes = old_location.to_bytes();
|
||||||
|
for (i, &byte) in prev_bytes.iter().enumerate().take(6) {
|
||||||
|
header[6 + i] = byte;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write header
|
||||||
|
file.write_all(&header)?;
|
||||||
|
|
||||||
|
// Write actual data
|
||||||
|
file.write_all(data)?;
|
||||||
|
file.flush()?;
|
||||||
|
|
||||||
|
// Update lookup table with new position
|
||||||
|
self.lookup.set(id, new_location)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves data at the specified location
|
||||||
|
pub(crate) fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error> {
|
||||||
|
if location.position == 0 {
|
||||||
|
return Err(Error::NotFound(format!(
|
||||||
|
"Record not found, location: {:?}",
|
||||||
|
location
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select the file
|
||||||
|
self.db_file_select(location.file_nr)?;
|
||||||
|
|
||||||
|
let file = self
|
||||||
|
.file
|
||||||
|
.as_mut()
|
||||||
|
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||||
|
|
||||||
|
// Read header
|
||||||
|
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||||
|
let mut header = vec![0u8; HEADER_SIZE];
|
||||||
|
file.read_exact(&mut header)?;
|
||||||
|
|
||||||
|
// Parse size (2 bytes)
|
||||||
|
let size = u16::from(header[0]) | (u16::from(header[1]) << 8);
|
||||||
|
|
||||||
|
// Parse CRC (4 bytes)
|
||||||
|
let stored_crc = u32::from(header[2])
|
||||||
|
| (u32::from(header[3]) << 8)
|
||||||
|
| (u32::from(header[4]) << 16)
|
||||||
|
| (u32::from(header[5]) << 24);
|
||||||
|
|
||||||
|
// Read data
|
||||||
|
let mut data = vec![0u8; size as usize];
|
||||||
|
file.read_exact(&mut data)?;
|
||||||
|
|
||||||
|
// Verify CRC
|
||||||
|
let calculated_crc = calculate_crc(&data);
|
||||||
|
if calculated_crc != stored_crc {
|
||||||
|
return Err(Error::DataCorruption(
|
||||||
|
"CRC mismatch: data corruption detected".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves the previous position for a record (for history tracking)
|
||||||
|
pub(crate) fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error> {
|
||||||
|
if location.position == 0 {
|
||||||
|
return Err(Error::NotFound("Record not found".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select the file
|
||||||
|
self.db_file_select(location.file_nr)?;
|
||||||
|
|
||||||
|
let file = self
|
||||||
|
.file
|
||||||
|
.as_mut()
|
||||||
|
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||||
|
|
||||||
|
// Skip size and CRC (6 bytes)
|
||||||
|
file.seek(SeekFrom::Start(location.position as u64 + 6))?;
|
||||||
|
|
||||||
|
// Read previous location (6 bytes)
|
||||||
|
let mut prev_bytes = vec![0u8; 6];
|
||||||
|
file.read_exact(&mut prev_bytes)?;
|
||||||
|
|
||||||
|
// Create location from bytes
|
||||||
|
Location::from_bytes(&prev_bytes, 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes the record at the specified location
|
||||||
|
pub(crate) fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error> {
|
||||||
|
if location.position == 0 {
|
||||||
|
return Err(Error::NotFound("Record not found".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select the file
|
||||||
|
self.db_file_select(location.file_nr)?;
|
||||||
|
|
||||||
|
let file = self
|
||||||
|
.file
|
||||||
|
.as_mut()
|
||||||
|
.ok_or_else(|| Error::Other("No file open".to_string()))?;
|
||||||
|
|
||||||
|
// Read size first
|
||||||
|
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||||
|
let mut size_bytes = vec![0u8; 2];
|
||||||
|
file.read_exact(&mut size_bytes)?;
|
||||||
|
let size = u16::from(size_bytes[0]) | (u16::from(size_bytes[1]) << 8);
|
||||||
|
|
||||||
|
// Write zeros for the entire record (header + data)
|
||||||
|
let zeros = vec![0u8; HEADER_SIZE + size as usize];
|
||||||
|
file.seek(SeekFrom::Start(location.position as u64))?;
|
||||||
|
file.write_all(&zeros)?;
|
||||||
|
|
||||||
|
// Clear lookup entry
|
||||||
|
self.lookup.delete(id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Condenses the database by removing empty records and updating positions
|
||||||
|
pub fn condense(&mut self) -> Result<(), Error> {
|
||||||
|
// Create a temporary directory
|
||||||
|
let temp_path = self.path.join("temp");
|
||||||
|
fs::create_dir_all(&temp_path)?;
|
||||||
|
|
||||||
|
// Get all file numbers
|
||||||
|
let mut file_numbers = Vec::new();
|
||||||
|
for entry in fs::read_dir(&self.path)? {
|
||||||
|
let entry = entry?;
|
||||||
|
let path = entry.path();
|
||||||
|
|
||||||
|
if path.is_file() && path.extension().map_or(false, |ext| ext == "db") {
|
||||||
|
if let Some(stem) = path.file_stem() {
|
||||||
|
if let Ok(file_nr) = stem.to_string_lossy().parse::<u16>() {
|
||||||
|
file_numbers.push(file_nr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each file
|
||||||
|
for file_nr in file_numbers {
|
||||||
|
let src_path = self.path.join(format!("{}.db", file_nr));
|
||||||
|
let temp_file_path = temp_path.join(format!("{}.db", file_nr));
|
||||||
|
|
||||||
|
// Create new file
|
||||||
|
let mut temp_file = File::create(&temp_file_path)?;
|
||||||
|
temp_file.write_all(&[0u8])?; // Initialize with a byte
|
||||||
|
|
||||||
|
// Open source file
|
||||||
|
let mut src_file = File::open(&src_path)?;
|
||||||
|
|
||||||
|
// Read and process records
|
||||||
|
let mut buffer = vec![0u8; 1024]; // Read in chunks
|
||||||
|
let mut _position = 0;
|
||||||
|
|
||||||
|
while let Ok(bytes_read) = src_file.read(&mut buffer) {
|
||||||
|
if bytes_read == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process the chunk
|
||||||
|
// This is a simplified version - in a real implementation,
|
||||||
|
// you would need to handle records that span chunk boundaries
|
||||||
|
|
||||||
|
_position += bytes_read;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement proper record copying and position updating
|
||||||
|
// This would involve:
|
||||||
|
// 1. Reading each record from the source file
|
||||||
|
// 2. If not deleted (all zeros), copy to temp file
|
||||||
|
// 3. Update lookup table with new positions
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Replace original files with temp files
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
fs::remove_dir_all(&temp_path)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculates CRC32 for the data
|
||||||
|
fn calculate_crc(data: &[u8]) -> u32 {
|
||||||
|
let mut hasher = Hasher::new();
|
||||||
|
hasher.update(data);
|
||||||
|
hasher.finalize()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
fn get_temp_dir() -> PathBuf {
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
temp_dir().join(format!("ourdb_backend_test_{}", timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_backend_operations() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: false,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set and get
|
||||||
|
let test_data = b"Test data for backend operations";
|
||||||
|
let id = 1;
|
||||||
|
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, test_data);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
41
ourdb/src/error.rs
Normal file
41
ourdb/src/error.rs
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Error types for OurDB operations
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
/// IO errors from file operations
|
||||||
|
#[error("IO error: {0}")]
|
||||||
|
Io(#[from] std::io::Error),
|
||||||
|
|
||||||
|
/// Data corruption errors
|
||||||
|
#[error("Data corruption: {0}")]
|
||||||
|
DataCorruption(String),
|
||||||
|
|
||||||
|
/// Invalid operation errors
|
||||||
|
#[error("Invalid operation: {0}")]
|
||||||
|
InvalidOperation(String),
|
||||||
|
|
||||||
|
/// Lookup table errors
|
||||||
|
#[error("Lookup error: {0}")]
|
||||||
|
LookupError(String),
|
||||||
|
|
||||||
|
/// Record not found errors
|
||||||
|
#[error("Record not found: {0}")]
|
||||||
|
NotFound(String),
|
||||||
|
|
||||||
|
/// Other errors
|
||||||
|
#[error("Error: {0}")]
|
||||||
|
Other(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for Error {
|
||||||
|
fn from(msg: String) -> Self {
|
||||||
|
Error::Other(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for Error {
|
||||||
|
fn from(msg: &str) -> Self {
|
||||||
|
Error::Other(msg.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
293
ourdb/src/lib.rs
Normal file
293
ourdb/src/lib.rs
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
mod backend;
|
||||||
|
mod error;
|
||||||
|
mod location;
|
||||||
|
mod lookup;
|
||||||
|
|
||||||
|
pub use error::Error;
|
||||||
|
pub use location::Location;
|
||||||
|
pub use lookup::LookupTable;
|
||||||
|
|
||||||
|
use std::fs::File;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
/// OurDB is a lightweight, efficient key-value database implementation that provides
|
||||||
|
/// data persistence with history tracking capabilities.
|
||||||
|
pub struct OurDB {
|
||||||
|
/// Directory path for storage
|
||||||
|
path: PathBuf,
|
||||||
|
/// Whether to use auto-increment mode
|
||||||
|
incremental_mode: bool,
|
||||||
|
/// Maximum file size (default: 500MB)
|
||||||
|
file_size: u32,
|
||||||
|
/// Lookup table for mapping keys to locations
|
||||||
|
lookup: LookupTable,
|
||||||
|
/// Currently open file
|
||||||
|
file: Option<File>,
|
||||||
|
/// Current file number
|
||||||
|
file_nr: u16,
|
||||||
|
/// Last used file number
|
||||||
|
last_used_file_nr: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for creating a new OurDB instance
|
||||||
|
pub struct OurDBConfig {
|
||||||
|
/// Directory path for storage
|
||||||
|
pub path: PathBuf,
|
||||||
|
/// Whether to use auto-increment mode
|
||||||
|
pub incremental_mode: bool,
|
||||||
|
/// Maximum file size (default: 500MB)
|
||||||
|
pub file_size: Option<u32>,
|
||||||
|
/// Lookup table key size (default: 4)
|
||||||
|
/// - 2: For databases with < 65,536 records (single file)
|
||||||
|
/// - 3: For databases with < 16,777,216 records (single file)
|
||||||
|
/// - 4: For databases with < 4,294,967,296 records (single file)
|
||||||
|
/// - 6: For large databases requiring multiple files (default)
|
||||||
|
pub keysize: Option<u8>,
|
||||||
|
/// Whether to reset the database if it exists (default: false)
|
||||||
|
pub reset: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Arguments for setting a value in OurDB
|
||||||
|
pub struct OurDBSetArgs<'a> {
|
||||||
|
/// ID for the record (optional in incremental mode)
|
||||||
|
pub id: Option<u32>,
|
||||||
|
/// Data to store
|
||||||
|
pub data: &'a [u8],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OurDB {
|
||||||
|
/// Creates a new OurDB instance with the given configuration
|
||||||
|
pub fn new(config: OurDBConfig) -> Result<Self, Error> {
|
||||||
|
// If reset is true and the path exists, remove it first
|
||||||
|
if config.reset.unwrap_or(false) && config.path.exists() {
|
||||||
|
std::fs::remove_dir_all(&config.path)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create directory if it doesn't exist
|
||||||
|
std::fs::create_dir_all(&config.path)?;
|
||||||
|
|
||||||
|
// Create lookup table
|
||||||
|
let lookup_path = config.path.join("lookup");
|
||||||
|
std::fs::create_dir_all(&lookup_path)?;
|
||||||
|
|
||||||
|
let lookup_config = lookup::LookupConfig {
|
||||||
|
size: 1000000, // Default size
|
||||||
|
keysize: config.keysize.unwrap_or(4),
|
||||||
|
lookuppath: lookup_path.to_string_lossy().to_string(),
|
||||||
|
incremental_mode: config.incremental_mode,
|
||||||
|
};
|
||||||
|
|
||||||
|
let lookup = LookupTable::new(lookup_config)?;
|
||||||
|
|
||||||
|
let mut db = OurDB {
|
||||||
|
path: config.path,
|
||||||
|
incremental_mode: config.incremental_mode,
|
||||||
|
file_size: config.file_size.unwrap_or(500 * (1 << 20)), // 500MB default
|
||||||
|
lookup,
|
||||||
|
file: None,
|
||||||
|
file_nr: 0,
|
||||||
|
last_used_file_nr: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Load existing metadata if available
|
||||||
|
db.load()?;
|
||||||
|
|
||||||
|
Ok(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a value in the database
|
||||||
|
///
|
||||||
|
/// In incremental mode:
|
||||||
|
/// - If ID is provided, it updates an existing record
|
||||||
|
/// - If ID is not provided, it creates a new record with auto-generated ID
|
||||||
|
///
|
||||||
|
/// In key-value mode:
|
||||||
|
/// - ID must be provided
|
||||||
|
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error> {
|
||||||
|
if self.incremental_mode {
|
||||||
|
if let Some(id) = args.id {
|
||||||
|
// This is an update
|
||||||
|
let location = self.lookup.get(id)?;
|
||||||
|
if location.position == 0 {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.set_(id, location, args.data)?;
|
||||||
|
Ok(id)
|
||||||
|
} else {
|
||||||
|
// This is an insert
|
||||||
|
let id = self.lookup.get_next_id()?;
|
||||||
|
self.set_(id, Location::default(), args.data)?;
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Using key-value mode
|
||||||
|
let id = args.id.ok_or_else(|| {
|
||||||
|
Error::InvalidOperation(
|
||||||
|
"ID must be provided when incremental is disabled".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let location = self.lookup.get(id)?;
|
||||||
|
self.set_(id, location, args.data)?;
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves data stored at the specified key position
|
||||||
|
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error> {
|
||||||
|
let location = self.lookup.get(id)?;
|
||||||
|
self.get_(location)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves a list of previous values for the specified key
|
||||||
|
///
|
||||||
|
/// The depth parameter controls how many historical values to retrieve (maximum)
|
||||||
|
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
let mut current_location = self.lookup.get(id)?;
|
||||||
|
|
||||||
|
// Traverse the history chain up to specified depth
|
||||||
|
for _ in 0..depth {
|
||||||
|
// Get current value
|
||||||
|
let data = self.get_(current_location)?;
|
||||||
|
result.push(data);
|
||||||
|
|
||||||
|
// Try to get previous location
|
||||||
|
match self.get_prev_pos_(current_location) {
|
||||||
|
Ok(location) => {
|
||||||
|
if location.position == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
current_location = location;
|
||||||
|
}
|
||||||
|
Err(_) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes the data at the specified key position
|
||||||
|
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
|
||||||
|
let location = self.lookup.get(id)?;
|
||||||
|
self.delete_(id, location)?;
|
||||||
|
self.lookup.delete(id)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the next ID which will be used when storing in incremental mode
|
||||||
|
pub fn get_next_id(&mut self) -> Result<u32, Error> {
|
||||||
|
if !self.incremental_mode {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Incremental mode is not enabled".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.lookup.get_next_id()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Closes the database, ensuring all data is saved
|
||||||
|
pub fn close(&mut self) -> Result<(), Error> {
|
||||||
|
self.save()?;
|
||||||
|
self.close_();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroys the database, removing all files
|
||||||
|
pub fn destroy(&mut self) -> Result<(), Error> {
|
||||||
|
let _ = self.close();
|
||||||
|
std::fs::remove_dir_all(&self.path)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper methods
|
||||||
|
fn lookup_dump_path(&self) -> PathBuf {
|
||||||
|
self.path.join("lookup_dump.db")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load(&mut self) -> Result<(), Error> {
|
||||||
|
let dump_path = self.lookup_dump_path();
|
||||||
|
if dump_path.exists() {
|
||||||
|
self.lookup.import_sparse(&dump_path.to_string_lossy())?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save(&mut self) -> Result<(), Error> {
|
||||||
|
self.lookup
|
||||||
|
.export_sparse(&self.lookup_dump_path().to_string_lossy())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn close_(&mut self) {
|
||||||
|
self.file = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
fn get_temp_dir() -> PathBuf {
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
temp_dir().join(format!("ourdb_test_{}", timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic_operations() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set and get
|
||||||
|
let test_data = b"Hello, OurDB!";
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, test_data);
|
||||||
|
|
||||||
|
// Test update
|
||||||
|
let updated_data = b"Updated data";
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: updated_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, updated_data);
|
||||||
|
|
||||||
|
// Test history
|
||||||
|
let history = db.get_history(id, 2).unwrap();
|
||||||
|
assert_eq!(history.len(), 2);
|
||||||
|
assert_eq!(history[0], updated_data);
|
||||||
|
assert_eq!(history[1], test_data);
|
||||||
|
|
||||||
|
// Test delete
|
||||||
|
db.delete(id).unwrap();
|
||||||
|
assert!(db.get(id).is_err());
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
178
ourdb/src/location.rs
Normal file
178
ourdb/src/location.rs
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
use crate::error::Error;
|
||||||
|
|
||||||
|
/// Location represents a physical position in a database file
|
||||||
|
///
|
||||||
|
/// It consists of a file number and a position within that file.
|
||||||
|
/// This allows OurDB to span multiple files for large datasets.
|
||||||
|
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||||
|
pub struct Location {
|
||||||
|
/// File number (0-65535)
|
||||||
|
pub file_nr: u16,
|
||||||
|
/// Position within the file
|
||||||
|
pub position: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Location {
|
||||||
|
/// Creates a new Location from bytes based on keysize
|
||||||
|
///
|
||||||
|
/// - keysize = 2: Only position (2 bytes), file_nr = 0
|
||||||
|
/// - keysize = 3: Only position (3 bytes), file_nr = 0
|
||||||
|
/// - keysize = 4: Only position (4 bytes), file_nr = 0
|
||||||
|
/// - keysize = 6: file_nr (2 bytes) + position (4 bytes)
|
||||||
|
pub fn from_bytes(bytes: &[u8], keysize: u8) -> Result<Self, Error> {
|
||||||
|
// Validate keysize
|
||||||
|
if ![2, 3, 4, 6].contains(&keysize) {
|
||||||
|
return Err(Error::InvalidOperation(format!(
|
||||||
|
"Invalid keysize: {}",
|
||||||
|
keysize
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create padded bytes
|
||||||
|
let mut padded = vec![0u8; keysize as usize];
|
||||||
|
if bytes.len() > keysize as usize {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Input bytes exceed keysize".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let start_idx = keysize as usize - bytes.len();
|
||||||
|
|
||||||
|
for (i, &b) in bytes.iter().enumerate() {
|
||||||
|
if i + start_idx < padded.len() {
|
||||||
|
padded[start_idx + i] = b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut location = Location::default();
|
||||||
|
|
||||||
|
match keysize {
|
||||||
|
2 => {
|
||||||
|
// Only position, 2 bytes big endian
|
||||||
|
location.position = u32::from(padded[0]) << 8 | u32::from(padded[1]);
|
||||||
|
location.file_nr = 0;
|
||||||
|
|
||||||
|
// Verify limits
|
||||||
|
if location.position > 0xFFFF {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Position exceeds max value for keysize=2 (max 65535)".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
3 => {
|
||||||
|
// Only position, 3 bytes big endian
|
||||||
|
location.position =
|
||||||
|
u32::from(padded[0]) << 16 | u32::from(padded[1]) << 8 | u32::from(padded[2]);
|
||||||
|
location.file_nr = 0;
|
||||||
|
|
||||||
|
// Verify limits
|
||||||
|
if location.position > 0xFFFFFF {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Position exceeds max value for keysize=3 (max 16777215)".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
4 => {
|
||||||
|
// Only position, 4 bytes big endian
|
||||||
|
location.position = u32::from(padded[0]) << 24
|
||||||
|
| u32::from(padded[1]) << 16
|
||||||
|
| u32::from(padded[2]) << 8
|
||||||
|
| u32::from(padded[3]);
|
||||||
|
location.file_nr = 0;
|
||||||
|
}
|
||||||
|
6 => {
|
||||||
|
// 2 bytes file_nr + 4 bytes position, all big endian
|
||||||
|
location.file_nr = u16::from(padded[0]) << 8 | u16::from(padded[1]);
|
||||||
|
location.position = u32::from(padded[2]) << 24
|
||||||
|
| u32::from(padded[3]) << 16
|
||||||
|
| u32::from(padded[4]) << 8
|
||||||
|
| u32::from(padded[5]);
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(location)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the location to bytes (always 6 bytes)
|
||||||
|
///
|
||||||
|
/// Format: [file_nr (2 bytes)][position (4 bytes)]
|
||||||
|
pub fn to_bytes(&self) -> Vec<u8> {
|
||||||
|
let mut bytes = Vec::with_capacity(6);
|
||||||
|
|
||||||
|
// Put file_nr first (2 bytes)
|
||||||
|
bytes.push((self.file_nr >> 8) as u8);
|
||||||
|
bytes.push(self.file_nr as u8);
|
||||||
|
|
||||||
|
// Put position next (4 bytes)
|
||||||
|
bytes.push((self.position >> 24) as u8);
|
||||||
|
bytes.push((self.position >> 16) as u8);
|
||||||
|
bytes.push((self.position >> 8) as u8);
|
||||||
|
bytes.push(self.position as u8);
|
||||||
|
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the location to a u64 value
|
||||||
|
///
|
||||||
|
/// The file_nr is stored in the most significant bits
|
||||||
|
pub fn to_u64(&self) -> u64 {
|
||||||
|
(u64::from(self.file_nr) << 32) | u64::from(self.position)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_from_bytes_keysize_2() {
|
||||||
|
let bytes = vec![0x12, 0x34];
|
||||||
|
let location = Location::from_bytes(&bytes, 2).unwrap();
|
||||||
|
assert_eq!(location.file_nr, 0);
|
||||||
|
assert_eq!(location.position, 0x1234);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_from_bytes_keysize_3() {
|
||||||
|
let bytes = vec![0x12, 0x34, 0x56];
|
||||||
|
let location = Location::from_bytes(&bytes, 3).unwrap();
|
||||||
|
assert_eq!(location.file_nr, 0);
|
||||||
|
assert_eq!(location.position, 0x123456);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_from_bytes_keysize_4() {
|
||||||
|
let bytes = vec![0x12, 0x34, 0x56, 0x78];
|
||||||
|
let location = Location::from_bytes(&bytes, 4).unwrap();
|
||||||
|
assert_eq!(location.file_nr, 0);
|
||||||
|
assert_eq!(location.position, 0x12345678);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_from_bytes_keysize_6() {
|
||||||
|
let bytes = vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78];
|
||||||
|
let location = Location::from_bytes(&bytes, 6).unwrap();
|
||||||
|
assert_eq!(location.file_nr, 0xABCD);
|
||||||
|
assert_eq!(location.position, 0x12345678);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_to_bytes() {
|
||||||
|
let location = Location {
|
||||||
|
file_nr: 0xABCD,
|
||||||
|
position: 0x12345678,
|
||||||
|
};
|
||||||
|
let bytes = location.to_bytes();
|
||||||
|
assert_eq!(bytes, vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_location_to_u64() {
|
||||||
|
let location = Location {
|
||||||
|
file_nr: 0xABCD,
|
||||||
|
position: 0x12345678,
|
||||||
|
};
|
||||||
|
let value = location.to_u64();
|
||||||
|
assert_eq!(value, 0xABCD_0000_0000 | 0x12345678);
|
||||||
|
}
|
||||||
|
}
|
||||||
540
ourdb/src/lookup.rs
Normal file
540
ourdb/src/lookup.rs
Normal file
@@ -0,0 +1,540 @@
|
|||||||
|
use std::fs::{self, File, OpenOptions};
|
||||||
|
use std::io::{Read, Seek, SeekFrom, Write};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::location::Location;
|
||||||
|
|
||||||
|
const DATA_FILE_NAME: &str = "data";
|
||||||
|
const INCREMENTAL_FILE_NAME: &str = ".inc";
|
||||||
|
|
||||||
|
/// Configuration for creating a new lookup table
|
||||||
|
pub struct LookupConfig {
|
||||||
|
/// Size of the lookup table
|
||||||
|
pub size: u32,
|
||||||
|
/// Size of each entry in bytes (2-6)
|
||||||
|
/// - 2: For databases with < 65,536 records (single file)
|
||||||
|
/// - 3: For databases with < 16,777,216 records (single file)
|
||||||
|
/// - 4: For databases with < 4,294,967,296 records (single file)
|
||||||
|
/// - 6: For large databases requiring multiple files
|
||||||
|
pub keysize: u8,
|
||||||
|
/// Path for disk-based lookup
|
||||||
|
pub lookuppath: String,
|
||||||
|
/// Whether to use incremental mode
|
||||||
|
pub incremental_mode: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lookup table maps keys to physical locations in the backend storage
|
||||||
|
pub struct LookupTable {
|
||||||
|
/// Size of each entry in bytes (2-6)
|
||||||
|
keysize: u8,
|
||||||
|
/// Path for disk-based lookup
|
||||||
|
lookuppath: String,
|
||||||
|
/// In-memory data for memory-based lookup
|
||||||
|
data: Vec<u8>,
|
||||||
|
/// Next empty slot if incremental mode is enabled
|
||||||
|
incremental: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LookupTable {
|
||||||
|
/// Returns the keysize of this lookup table
|
||||||
|
pub fn keysize(&self) -> u8 {
|
||||||
|
self.keysize
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new lookup table with the given configuration
|
||||||
|
pub fn new(config: LookupConfig) -> Result<Self, Error> {
|
||||||
|
// Verify keysize is valid
|
||||||
|
if ![2, 3, 4, 6].contains(&config.keysize) {
|
||||||
|
return Err(Error::InvalidOperation(format!(
|
||||||
|
"Invalid keysize: {}",
|
||||||
|
config.keysize
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let incremental = if config.incremental_mode {
|
||||||
|
Some(get_incremental_info(&config)?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
if !config.lookuppath.is_empty() {
|
||||||
|
// Create directory if it doesn't exist
|
||||||
|
fs::create_dir_all(&config.lookuppath)?;
|
||||||
|
|
||||||
|
// For disk-based lookup, create empty file if it doesn't exist
|
||||||
|
let data_path = Path::new(&config.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
if !data_path.exists() {
|
||||||
|
let data = vec![0u8; config.size as usize * config.keysize as usize];
|
||||||
|
fs::write(&data_path, &data)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(LookupTable {
|
||||||
|
data: Vec::new(),
|
||||||
|
keysize: config.keysize,
|
||||||
|
lookuppath: config.lookuppath,
|
||||||
|
incremental,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup
|
||||||
|
Ok(LookupTable {
|
||||||
|
data: vec![0u8; config.size as usize * config.keysize as usize],
|
||||||
|
keysize: config.keysize,
|
||||||
|
lookuppath: String::new(),
|
||||||
|
incremental,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a location for the given ID
|
||||||
|
pub fn get(&self, id: u32) -> Result<Location, Error> {
|
||||||
|
let entry_size = self.keysize as usize;
|
||||||
|
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// Disk-based lookup
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
|
||||||
|
// Check file size first
|
||||||
|
let file_size = fs::metadata(&data_path)?.len();
|
||||||
|
let start_pos = id as u64 * entry_size as u64;
|
||||||
|
|
||||||
|
if start_pos + entry_size as u64 > file_size {
|
||||||
|
return Err(Error::LookupError(format!(
|
||||||
|
"Invalid read for get in lut: {}: {} would exceed file size {}",
|
||||||
|
self.lookuppath,
|
||||||
|
start_pos + entry_size as u64,
|
||||||
|
file_size
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read directly from file
|
||||||
|
let mut file = File::open(&data_path)?;
|
||||||
|
file.seek(SeekFrom::Start(start_pos))?;
|
||||||
|
|
||||||
|
let mut data = vec![0u8; entry_size];
|
||||||
|
let bytes_read = file.read(&mut data)?;
|
||||||
|
|
||||||
|
if bytes_read < entry_size {
|
||||||
|
return Err(Error::LookupError(format!(
|
||||||
|
"Incomplete read: expected {} bytes but got {}",
|
||||||
|
entry_size, bytes_read
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return Location::from_bytes(&data, self.keysize);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory-based lookup
|
||||||
|
if (id * self.keysize as u32) as usize >= self.data.len() {
|
||||||
|
return Err(Error::LookupError("Index out of bounds".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = (id * self.keysize as u32) as usize;
|
||||||
|
let end = start + entry_size;
|
||||||
|
|
||||||
|
Location::from_bytes(&self.data[start..end], self.keysize)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a location for the given ID
|
||||||
|
pub fn set(&mut self, id: u32, location: Location) -> Result<(), Error> {
|
||||||
|
let entry_size = self.keysize as usize;
|
||||||
|
|
||||||
|
// Handle incremental mode
|
||||||
|
if let Some(incremental) = self.incremental {
|
||||||
|
if id == incremental {
|
||||||
|
self.increment_index()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if id > incremental {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert location to bytes based on keysize
|
||||||
|
let location_bytes = match self.keysize {
|
||||||
|
2 => {
|
||||||
|
if location.file_nr != 0 {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"file_nr must be 0 for keysize=2".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if location.position > 0xFFFF {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"position exceeds max value for keysize=2 (max 65535)".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
vec![(location.position >> 8) as u8, location.position as u8]
|
||||||
|
}
|
||||||
|
3 => {
|
||||||
|
if location.file_nr != 0 {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"file_nr must be 0 for keysize=3".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if location.position > 0xFFFFFF {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"position exceeds max value for keysize=3 (max 16777215)".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
vec![
|
||||||
|
(location.position >> 16) as u8,
|
||||||
|
(location.position >> 8) as u8,
|
||||||
|
location.position as u8,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
4 => {
|
||||||
|
if location.file_nr != 0 {
|
||||||
|
return Err(Error::InvalidOperation(
|
||||||
|
"file_nr must be 0 for keysize=4".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
vec![
|
||||||
|
(location.position >> 24) as u8,
|
||||||
|
(location.position >> 16) as u8,
|
||||||
|
(location.position >> 8) as u8,
|
||||||
|
location.position as u8,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
6 => {
|
||||||
|
// Full location with file_nr and position
|
||||||
|
location.to_bytes()
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(Error::InvalidOperation(format!(
|
||||||
|
"Invalid keysize: {}",
|
||||||
|
self.keysize
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// Disk-based lookup
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
let mut file = OpenOptions::new().write(true).open(data_path)?;
|
||||||
|
|
||||||
|
let start_pos = id as u64 * entry_size as u64;
|
||||||
|
file.seek(SeekFrom::Start(start_pos))?;
|
||||||
|
file.write_all(&location_bytes)?;
|
||||||
|
} else {
|
||||||
|
// Memory-based lookup
|
||||||
|
let start = (id * self.keysize as u32) as usize;
|
||||||
|
if start + entry_size > self.data.len() {
|
||||||
|
return Err(Error::LookupError("Index out of bounds".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i, &byte) in location_bytes.iter().enumerate() {
|
||||||
|
self.data[start + i] = byte;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes an entry for the given ID
|
||||||
|
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
|
||||||
|
// Set location to all zeros
|
||||||
|
self.set(id, Location::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the next available ID in incremental mode
|
||||||
|
pub fn get_next_id(&self) -> Result<u32, Error> {
|
||||||
|
let incremental = self.incremental.ok_or_else(|| {
|
||||||
|
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let table_size = if !self.lookuppath.is_empty() {
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
fs::metadata(data_path)?.len() as u32
|
||||||
|
} else {
|
||||||
|
self.data.len() as u32
|
||||||
|
};
|
||||||
|
|
||||||
|
if incremental * self.keysize as u32 >= table_size {
|
||||||
|
return Err(Error::LookupError("Lookup table is full".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(incremental)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increments the index in incremental mode
|
||||||
|
pub fn increment_index(&mut self) -> Result<(), Error> {
|
||||||
|
let mut incremental = self.incremental.ok_or_else(|| {
|
||||||
|
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
incremental += 1;
|
||||||
|
self.incremental = Some(incremental);
|
||||||
|
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
let inc_path = Path::new(&self.lookuppath).join(INCREMENTAL_FILE_NAME);
|
||||||
|
fs::write(inc_path, incremental.to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Exports the lookup table to a file
|
||||||
|
pub fn export_data(&self, path: &str) -> Result<(), Error> {
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// For disk-based lookup, just copy the file
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
fs::copy(data_path, path)?;
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup, write the data to file
|
||||||
|
fs::write(path, &self.data)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Imports the lookup table from a file
|
||||||
|
pub fn import_data(&mut self, path: &str) -> Result<(), Error> {
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// For disk-based lookup, copy the file
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
fs::copy(path, data_path)?;
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup, read the data from file
|
||||||
|
self.data = fs::read(path)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Exports only non-zero entries to save space
|
||||||
|
pub fn export_sparse(&self, path: &str) -> Result<(), Error> {
|
||||||
|
let mut output = Vec::new();
|
||||||
|
let entry_size = self.keysize as usize;
|
||||||
|
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// For disk-based lookup
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
let mut file = File::open(&data_path)?;
|
||||||
|
let file_size = fs::metadata(&data_path)?.len();
|
||||||
|
let max_entries = file_size / entry_size as u64;
|
||||||
|
|
||||||
|
for id in 0..max_entries {
|
||||||
|
file.seek(SeekFrom::Start(id * entry_size as u64))?;
|
||||||
|
|
||||||
|
let mut buffer = vec![0u8; entry_size];
|
||||||
|
let bytes_read = file.read(&mut buffer)?;
|
||||||
|
|
||||||
|
if bytes_read < entry_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if entry is non-zero
|
||||||
|
if buffer.iter().any(|&b| b != 0) {
|
||||||
|
// Write ID (4 bytes) + entry
|
||||||
|
output.extend_from_slice(&(id as u32).to_be_bytes());
|
||||||
|
output.extend_from_slice(&buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup
|
||||||
|
let max_entries = self.data.len() / entry_size;
|
||||||
|
|
||||||
|
for id in 0..max_entries {
|
||||||
|
let start = id * entry_size;
|
||||||
|
let entry = &self.data[start..start + entry_size];
|
||||||
|
|
||||||
|
// Check if entry is non-zero
|
||||||
|
if entry.iter().any(|&b| b != 0) {
|
||||||
|
// Write ID (4 bytes) + entry
|
||||||
|
output.extend_from_slice(&(id as u32).to_be_bytes());
|
||||||
|
output.extend_from_slice(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the output to file
|
||||||
|
fs::write(path, &output)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Imports sparse data (only non-zero entries)
|
||||||
|
pub fn import_sparse(&mut self, path: &str) -> Result<(), Error> {
|
||||||
|
let data = fs::read(path)?;
|
||||||
|
let entry_size = self.keysize as usize;
|
||||||
|
let record_size = 4 + entry_size; // ID (4 bytes) + entry
|
||||||
|
|
||||||
|
if data.len() % record_size != 0 {
|
||||||
|
return Err(Error::DataCorruption(
|
||||||
|
"Invalid sparse data format: size mismatch".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk_start in (0..data.len()).step_by(record_size) {
|
||||||
|
if chunk_start + record_size > data.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract ID (4 bytes)
|
||||||
|
let id_bytes = &data[chunk_start..chunk_start + 4];
|
||||||
|
let id = u32::from_be_bytes([id_bytes[0], id_bytes[1], id_bytes[2], id_bytes[3]]);
|
||||||
|
|
||||||
|
// Extract entry
|
||||||
|
let entry = &data[chunk_start + 4..chunk_start + record_size];
|
||||||
|
|
||||||
|
// Create location from entry
|
||||||
|
let location = Location::from_bytes(entry, self.keysize)?;
|
||||||
|
|
||||||
|
// Set the entry
|
||||||
|
self.set(id, location)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds the highest ID with a non-zero entry
|
||||||
|
pub fn find_last_entry(&mut self) -> Result<u32, Error> {
|
||||||
|
let mut last_id = 0u32;
|
||||||
|
let entry_size = self.keysize as usize;
|
||||||
|
|
||||||
|
if !self.lookuppath.is_empty() {
|
||||||
|
// For disk-based lookup
|
||||||
|
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
|
||||||
|
let mut file = File::open(&data_path)?;
|
||||||
|
let file_size = fs::metadata(&data_path)?.len();
|
||||||
|
|
||||||
|
let mut buffer = vec![0u8; entry_size];
|
||||||
|
let mut pos = 0u32;
|
||||||
|
|
||||||
|
while (pos as u64 * entry_size as u64) < file_size {
|
||||||
|
file.seek(SeekFrom::Start(pos as u64 * entry_size as u64))?;
|
||||||
|
|
||||||
|
let bytes_read = file.read(&mut buffer)?;
|
||||||
|
if bytes_read == 0 || bytes_read < entry_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let location = Location::from_bytes(&buffer, self.keysize)?;
|
||||||
|
if location.position != 0 || location.file_nr != 0 {
|
||||||
|
last_id = pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup
|
||||||
|
for i in 0..(self.data.len() / entry_size) as u32 {
|
||||||
|
if let Ok(location) = self.get(i) {
|
||||||
|
if location.position != 0 || location.file_nr != 0 {
|
||||||
|
last_id = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(last_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to get the incremental value
|
||||||
|
fn get_incremental_info(config: &LookupConfig) -> Result<u32, Error> {
|
||||||
|
if !config.incremental_mode {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.lookuppath.is_empty() {
|
||||||
|
let inc_path = Path::new(&config.lookuppath).join(INCREMENTAL_FILE_NAME);
|
||||||
|
|
||||||
|
if !inc_path.exists() {
|
||||||
|
// Create a separate file for storing the incremental value
|
||||||
|
fs::write(&inc_path, "1")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let inc_str = fs::read_to_string(&inc_path)?;
|
||||||
|
let incremental = match inc_str.trim().parse::<u32>() {
|
||||||
|
Ok(val) => val,
|
||||||
|
Err(_) => {
|
||||||
|
// If the value is invalid, reset it to 1
|
||||||
|
fs::write(&inc_path, "1")?;
|
||||||
|
1
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(incremental)
|
||||||
|
} else {
|
||||||
|
// For memory-based lookup, start with 1
|
||||||
|
Ok(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
fn get_temp_dir() -> PathBuf {
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
temp_dir().join(format!("ourdb_lookup_test_{}", timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_lookup() {
|
||||||
|
let config = LookupConfig {
|
||||||
|
size: 1000,
|
||||||
|
keysize: 4,
|
||||||
|
lookuppath: String::new(),
|
||||||
|
incremental_mode: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut lookup = LookupTable::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set and get
|
||||||
|
let location = Location {
|
||||||
|
file_nr: 0,
|
||||||
|
position: 12345,
|
||||||
|
};
|
||||||
|
|
||||||
|
lookup.set(1, location).unwrap();
|
||||||
|
let retrieved = lookup.get(1).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(retrieved.file_nr, location.file_nr);
|
||||||
|
assert_eq!(retrieved.position, location.position);
|
||||||
|
|
||||||
|
// Test incremental mode
|
||||||
|
let next_id = lookup.get_next_id().unwrap();
|
||||||
|
assert_eq!(next_id, 2);
|
||||||
|
|
||||||
|
lookup.increment_index().unwrap();
|
||||||
|
let next_id = lookup.get_next_id().unwrap();
|
||||||
|
assert_eq!(next_id, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_disk_lookup() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
fs::create_dir_all(&temp_dir).unwrap();
|
||||||
|
|
||||||
|
let config = LookupConfig {
|
||||||
|
size: 1000,
|
||||||
|
keysize: 4,
|
||||||
|
lookuppath: temp_dir.to_string_lossy().to_string(),
|
||||||
|
incremental_mode: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut lookup = LookupTable::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set and get
|
||||||
|
let location = Location {
|
||||||
|
file_nr: 0,
|
||||||
|
position: 12345,
|
||||||
|
};
|
||||||
|
|
||||||
|
lookup.set(1, location).unwrap();
|
||||||
|
let retrieved = lookup.get(1).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(retrieved.file_nr, location.file_nr);
|
||||||
|
assert_eq!(retrieved.position, location.position);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
fs::remove_dir_all(temp_dir).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
369
ourdb/tests/integration_tests.rs
Normal file
369
ourdb/tests/integration_tests.rs
Normal file
@@ -0,0 +1,369 @@
|
|||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use rand;
|
||||||
|
use std::env::temp_dir;
|
||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
// Helper function to create a unique temporary directory for tests
|
||||||
|
fn get_temp_dir() -> PathBuf {
|
||||||
|
let timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_nanos();
|
||||||
|
let random_part = rand::random::<u32>();
|
||||||
|
let dir = temp_dir().join(format!("ourdb_test_{}_{}", timestamp, random_part));
|
||||||
|
|
||||||
|
// Ensure the directory exists and is empty
|
||||||
|
if dir.exists() {
|
||||||
|
std::fs::remove_dir_all(&dir).unwrap();
|
||||||
|
}
|
||||||
|
std::fs::create_dir_all(&dir).unwrap();
|
||||||
|
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic_operations() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database with incremental mode
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set and get
|
||||||
|
let test_data = b"Hello, OurDB!";
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, test_data);
|
||||||
|
|
||||||
|
// Test update
|
||||||
|
let updated_data = b"Updated data";
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: updated_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, updated_data);
|
||||||
|
|
||||||
|
// Test history
|
||||||
|
let history = db.get_history(id, 2).unwrap();
|
||||||
|
assert_eq!(history.len(), 2);
|
||||||
|
assert_eq!(history[0], updated_data);
|
||||||
|
assert_eq!(history[1], test_data);
|
||||||
|
|
||||||
|
// Test delete
|
||||||
|
db.delete(id).unwrap();
|
||||||
|
assert!(db.get(id).is_err());
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_key_value_mode() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database with key-value mode
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: false,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test set with explicit ID
|
||||||
|
let test_data = b"Key-value data";
|
||||||
|
let id = 42;
|
||||||
|
db.set(OurDBSetArgs {
|
||||||
|
id: Some(id),
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, test_data);
|
||||||
|
|
||||||
|
// Verify next_id fails in key-value mode
|
||||||
|
assert!(db.get_next_id().is_err());
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_incremental_mode() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database with incremental mode
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test auto-increment IDs
|
||||||
|
let data1 = b"First record";
|
||||||
|
let id1 = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: data1,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let data2 = b"Second record";
|
||||||
|
let id2 = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: data2,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// IDs should be sequential
|
||||||
|
assert_eq!(id2, id1 + 1);
|
||||||
|
|
||||||
|
// Verify get_next_id works
|
||||||
|
let next_id = db.get_next_id().unwrap();
|
||||||
|
assert_eq!(next_id, id2 + 1);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_persistence() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create data in a new database
|
||||||
|
{
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
let test_data = b"Persistent data";
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Explicitly close the database
|
||||||
|
db.close().unwrap();
|
||||||
|
|
||||||
|
// ID should be 1 in a new database
|
||||||
|
assert_eq!(id, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reopen the database and verify data persists
|
||||||
|
{
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Verify data is still there
|
||||||
|
let retrieved = db.get(1).unwrap();
|
||||||
|
assert_eq!(retrieved, b"Persistent data");
|
||||||
|
|
||||||
|
// Verify incremental counter persisted
|
||||||
|
let next_id = db.get_next_id().unwrap();
|
||||||
|
assert_eq!(next_id, 2);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_different_keysizes() {
|
||||||
|
for keysize in [2, 3, 4, 6].iter() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Ensure the directory exists
|
||||||
|
std::fs::create_dir_all(&temp_dir).unwrap();
|
||||||
|
|
||||||
|
// Create a new database with specified keysize
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: Some(*keysize),
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Test basic operations
|
||||||
|
let test_data = b"Keysize test data";
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved, test_data);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_large_data() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Create a large data set (60KB - within the 64KB limit)
|
||||||
|
let large_data = vec![b'X'; 60 * 1024];
|
||||||
|
|
||||||
|
// Store and retrieve large data
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &large_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(retrieved.len(), large_data.len());
|
||||||
|
assert_eq!(retrieved, large_data);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_exceed_size_limit() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: None,
|
||||||
|
keysize: None,
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Create data larger than the 64KB limit (70KB)
|
||||||
|
let oversized_data = vec![b'X'; 70 * 1024];
|
||||||
|
|
||||||
|
// Attempt to store data that exceeds the size limit
|
||||||
|
let result = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &oversized_data,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify that an error is returned
|
||||||
|
assert!(
|
||||||
|
result.is_err(),
|
||||||
|
"Expected an error when storing data larger than 64KB"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_files() {
|
||||||
|
let temp_dir = get_temp_dir();
|
||||||
|
|
||||||
|
// Create a new database with small file size to force multiple files
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: temp_dir.clone(),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024), // Very small file size (1KB)
|
||||||
|
keysize: Some(6), // 6-byte keysize for multiple files
|
||||||
|
reset: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config).unwrap();
|
||||||
|
|
||||||
|
// Store enough data to span multiple files
|
||||||
|
let data_size = 500; // bytes per record
|
||||||
|
let test_data = vec![b'A'; data_size];
|
||||||
|
|
||||||
|
let mut ids = Vec::new();
|
||||||
|
for _ in 0..10 {
|
||||||
|
let id = db
|
||||||
|
.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &test_data,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
ids.push(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all data can be retrieved
|
||||||
|
for &id in &ids {
|
||||||
|
let retrieved = db.get(id).unwrap();
|
||||||
|
assert_eq!(retrieved.len(), data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify multiple files were created
|
||||||
|
let files = fs::read_dir(&temp_dir)
|
||||||
|
.unwrap()
|
||||||
|
.filter_map(Result::ok)
|
||||||
|
.filter(|entry| {
|
||||||
|
let path = entry.path();
|
||||||
|
path.is_file() && path.extension().map_or(false, |ext| ext == "db")
|
||||||
|
})
|
||||||
|
.count();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
files > 1,
|
||||||
|
"Expected multiple database files, found {}",
|
||||||
|
files
|
||||||
|
);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
db.destroy().unwrap();
|
||||||
|
}
|
||||||
787
radixtree/ARCHITECTURE.md
Normal file
787
radixtree/ARCHITECTURE.md
Normal file
@@ -0,0 +1,787 @@
|
|||||||
|
# RadixTree: Architecture for V to Rust Port
|
||||||
|
|
||||||
|
## 1. Overview
|
||||||
|
|
||||||
|
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This document outlines the architecture for porting the RadixTree module from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
|
||||||
|
|
||||||
|
The Rust implementation will integrate with the existing OurDB Rust implementation for persistent storage.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Client Code] --> B[RadixTree API]
|
||||||
|
B --> C[Node Management]
|
||||||
|
B --> D[Serialization]
|
||||||
|
B --> E[Tree Operations]
|
||||||
|
C --> F[OurDB]
|
||||||
|
D --> F
|
||||||
|
E --> C
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. Current Architecture (V Implementation)
|
||||||
|
|
||||||
|
The current V implementation of RadixTree consists of the following components:
|
||||||
|
|
||||||
|
### 2.1 Core Data Structures
|
||||||
|
|
||||||
|
#### Node
|
||||||
|
```v
|
||||||
|
struct Node {
|
||||||
|
mut:
|
||||||
|
key_segment string // The segment of the key stored at this node
|
||||||
|
value []u8 // Value stored at this node (empty if not a leaf)
|
||||||
|
children []NodeRef // References to child nodes
|
||||||
|
is_leaf bool // Whether this node is a leaf node
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### NodeRef
|
||||||
|
```v
|
||||||
|
struct NodeRef {
|
||||||
|
mut:
|
||||||
|
key_part string // The key segment for this child
|
||||||
|
node_id u32 // Database ID of the node
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RadixTree
|
||||||
|
```v
|
||||||
|
@[heap]
|
||||||
|
pub struct RadixTree {
|
||||||
|
mut:
|
||||||
|
db &ourdb.OurDB // Database for persistent storage
|
||||||
|
root_id u32 // Database ID of the root node
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.2 Key Operations
|
||||||
|
|
||||||
|
1. **new()**: Creates a new radix tree with a specified database path
|
||||||
|
2. **set(key, value)**: Sets a key-value pair in the tree
|
||||||
|
3. **get(key)**: Retrieves a value by key
|
||||||
|
4. **update(prefix, new_value)**: Updates the value at a given key prefix
|
||||||
|
5. **delete(key)**: Removes a key from the tree
|
||||||
|
6. **list(prefix)**: Lists all keys with a given prefix
|
||||||
|
7. **getall(prefix)**: Gets all values for keys with a given prefix
|
||||||
|
|
||||||
|
### 2.3 Serialization
|
||||||
|
|
||||||
|
The V implementation uses a custom binary serialization format for nodes:
|
||||||
|
- Version byte (1 byte)
|
||||||
|
- Key segment (string)
|
||||||
|
- Value length (2 bytes) followed by value bytes
|
||||||
|
- Children count (2 bytes) followed by children
|
||||||
|
- Is leaf flag (1 byte)
|
||||||
|
|
||||||
|
Each child is serialized as:
|
||||||
|
- Key part (string)
|
||||||
|
- Node ID (4 bytes)
|
||||||
|
|
||||||
|
### 2.4 Integration with OurDB
|
||||||
|
|
||||||
|
The RadixTree uses OurDB for persistent storage:
|
||||||
|
- Each node is serialized and stored as a record in OurDB
|
||||||
|
- Node references use OurDB record IDs
|
||||||
|
- The tree maintains a root node ID for traversal
|
||||||
|
|
||||||
|
## 3. Proposed Rust Architecture
|
||||||
|
|
||||||
|
The Rust implementation will maintain the same overall architecture while leveraging Rust's type system, ownership model, and error handling.
|
||||||
|
|
||||||
|
### 3.1 Core Data Structures
|
||||||
|
|
||||||
|
#### Node
|
||||||
|
```rust
|
||||||
|
pub struct Node {
|
||||||
|
key_segment: String,
|
||||||
|
value: Vec<u8>,
|
||||||
|
children: Vec<NodeRef>,
|
||||||
|
is_leaf: bool,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### NodeRef
|
||||||
|
```rust
|
||||||
|
pub struct NodeRef {
|
||||||
|
key_part: String,
|
||||||
|
node_id: u32,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RadixTree
|
||||||
|
```rust
|
||||||
|
pub struct RadixTree {
|
||||||
|
db: ourdb::OurDB,
|
||||||
|
root_id: u32,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.2 Public API
|
||||||
|
|
||||||
|
```rust
|
||||||
|
impl RadixTree {
|
||||||
|
/// Creates a new radix tree with the specified database path
|
||||||
|
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a key-value pair in the tree
|
||||||
|
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a value by key from the tree
|
||||||
|
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the value at a given key prefix
|
||||||
|
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes a key from the tree
|
||||||
|
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all keys with a given prefix
|
||||||
|
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets all values for keys with a given prefix
|
||||||
|
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.3 Error Handling
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("OurDB error: {0}")]
|
||||||
|
OurDB(#[from] ourdb::Error),
|
||||||
|
|
||||||
|
#[error("Key not found: {0}")]
|
||||||
|
KeyNotFound(String),
|
||||||
|
|
||||||
|
#[error("Prefix not found: {0}")]
|
||||||
|
PrefixNotFound(String),
|
||||||
|
|
||||||
|
#[error("Serialization error: {0}")]
|
||||||
|
Serialization(String),
|
||||||
|
|
||||||
|
#[error("Deserialization error: {0}")]
|
||||||
|
Deserialization(String),
|
||||||
|
|
||||||
|
#[error("Invalid operation: {0}")]
|
||||||
|
InvalidOperation(String),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.4 Serialization
|
||||||
|
|
||||||
|
The Rust implementation will maintain the same binary serialization format for compatibility:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
const VERSION: u8 = 1;
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Serializes a node to bytes for storage
|
||||||
|
fn serialize(&self) -> Vec<u8> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes bytes to a node
|
||||||
|
fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.5 Integration with OurDB
|
||||||
|
|
||||||
|
The Rust implementation will use the existing OurDB Rust implementation:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
impl RadixTree {
|
||||||
|
fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||||
|
let data = self.db.get(node_id)?;
|
||||||
|
Node::deserialize(&data)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||||
|
let data = node.serialize();
|
||||||
|
let args = ourdb::OurDBSetArgs {
|
||||||
|
id: node_id,
|
||||||
|
data: &data,
|
||||||
|
};
|
||||||
|
Ok(self.db.set(args)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Implementation Strategy
|
||||||
|
|
||||||
|
### 4.1 Phase 1: Core Data Structures and Serialization
|
||||||
|
|
||||||
|
1. Implement the `Node` and `NodeRef` structs
|
||||||
|
2. Implement serialization and deserialization functions
|
||||||
|
3. Implement the `Error` enum for error handling
|
||||||
|
|
||||||
|
### 4.2 Phase 2: Basic Tree Operations
|
||||||
|
|
||||||
|
1. Implement the `RadixTree` struct with OurDB integration
|
||||||
|
2. Implement the `new()` function for creating a new tree
|
||||||
|
3. Implement the `get()` and `set()` functions for basic operations
|
||||||
|
|
||||||
|
### 4.3 Phase 3: Advanced Tree Operations
|
||||||
|
|
||||||
|
1. Implement the `delete()` function for removing keys
|
||||||
|
2. Implement the `update()` function for updating values
|
||||||
|
3. Implement the `list()` and `getall()` functions for prefix operations
|
||||||
|
|
||||||
|
### 4.4 Phase 4: Testing and Optimization
|
||||||
|
|
||||||
|
1. Port existing tests from V to Rust
|
||||||
|
2. Add new tests for Rust-specific functionality
|
||||||
|
3. Benchmark and optimize performance
|
||||||
|
4. Ensure compatibility with existing RadixTree data
|
||||||
|
|
||||||
|
## 5. Implementation Considerations
|
||||||
|
|
||||||
|
### 5.1 Memory Management
|
||||||
|
|
||||||
|
Leverage Rust's ownership model for safe and efficient memory management:
|
||||||
|
- Use `String` and `Vec<u8>` for data buffers instead of raw pointers
|
||||||
|
- Use references and borrows to avoid unnecessary copying
|
||||||
|
- Implement proper RAII for resource management
|
||||||
|
|
||||||
|
### 5.2 Error Handling
|
||||||
|
|
||||||
|
Use Rust's `Result` type for comprehensive error handling:
|
||||||
|
- Define custom error types for RadixTree-specific errors
|
||||||
|
- Propagate errors using the `?` operator
|
||||||
|
- Provide detailed error messages
|
||||||
|
- Implement proper error conversion using the `From` trait
|
||||||
|
|
||||||
|
### 5.3 Performance Optimizations
|
||||||
|
|
||||||
|
Identify opportunities for performance improvements:
|
||||||
|
- Use efficient string operations for prefix matching
|
||||||
|
- Minimize database operations by caching nodes when appropriate
|
||||||
|
- Use iterators for efficient traversal
|
||||||
|
- Consider using `Cow<str>` for string operations to avoid unnecessary cloning
|
||||||
|
|
||||||
|
### 5.4 Compatibility
|
||||||
|
|
||||||
|
Ensure compatibility with the V implementation:
|
||||||
|
- Maintain the same serialization format
|
||||||
|
- Ensure identical behavior for all operations
|
||||||
|
- Support reading existing RadixTree data
|
||||||
|
|
||||||
|
## 6. Testing Strategy
|
||||||
|
|
||||||
|
### 6.1 Unit Tests
|
||||||
|
|
||||||
|
Write comprehensive unit tests for each component:
|
||||||
|
- Test `Node` serialization/deserialization
|
||||||
|
- Test string operations (common prefix, etc.)
|
||||||
|
- Test error handling
|
||||||
|
|
||||||
|
### 6.2 Integration Tests
|
||||||
|
|
||||||
|
Write integration tests for the complete system:
|
||||||
|
- Test basic CRUD operations
|
||||||
|
- Test prefix operations
|
||||||
|
- Test edge cases (empty keys, very long keys, etc.)
|
||||||
|
- Test with large datasets
|
||||||
|
|
||||||
|
### 6.3 Compatibility Tests
|
||||||
|
|
||||||
|
Ensure compatibility with existing RadixTree data:
|
||||||
|
- Test reading existing V-created RadixTree data
|
||||||
|
- Test writing data that can be read by the V implementation
|
||||||
|
|
||||||
|
### 6.4 Performance Tests
|
||||||
|
|
||||||
|
Benchmark performance against the V implementation:
|
||||||
|
- Measure throughput for set/get operations
|
||||||
|
- Measure latency for different operations
|
||||||
|
- Test with different tree sizes and key distributions
|
||||||
|
|
||||||
|
## 7. Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
radixtree/
|
||||||
|
├── Cargo.toml
|
||||||
|
├── src/
|
||||||
|
│ ├── lib.rs # Public API and re-exports
|
||||||
|
│ ├── node.rs # Node and NodeRef implementations
|
||||||
|
│ ├── serialize.rs # Serialization and deserialization
|
||||||
|
│ ├── error.rs # Error types
|
||||||
|
│ └── operations.rs # Tree operations implementation
|
||||||
|
├── tests/
|
||||||
|
│ ├── basic_test.rs # Basic operations tests
|
||||||
|
│ ├── prefix_test.rs # Prefix operations tests
|
||||||
|
│ └── edge_cases.rs # Edge case tests
|
||||||
|
└── examples/
|
||||||
|
├── basic.rs # Basic usage example
|
||||||
|
├── prefix.rs # Prefix operations example
|
||||||
|
└── performance.rs # Performance benchmark
|
||||||
|
```
|
||||||
|
|
||||||
|
## 8. Dependencies
|
||||||
|
|
||||||
|
The Rust implementation will use the following dependencies:
|
||||||
|
|
||||||
|
- `ourdb` for persistent storage
|
||||||
|
- `thiserror` for error handling
|
||||||
|
- `log` for logging
|
||||||
|
- `criterion` for benchmarking (dev dependency)
|
||||||
|
|
||||||
|
## 9. Compatibility Considerations
|
||||||
|
|
||||||
|
To ensure compatibility with the V implementation:
|
||||||
|
|
||||||
|
1. Maintain the same serialization format for nodes
|
||||||
|
2. Ensure identical behavior for all operations
|
||||||
|
3. Support reading existing RadixTree data
|
||||||
|
4. Maintain the same performance characteristics
|
||||||
|
|
||||||
|
## 10. Future Extensions
|
||||||
|
|
||||||
|
Potential future extensions to consider:
|
||||||
|
|
||||||
|
1. Async API for non-blocking operations
|
||||||
|
2. Iterator interface for efficient traversal
|
||||||
|
3. Batch operations for improved performance
|
||||||
|
4. Custom serialization formats for specific use cases
|
||||||
|
5. Compression support for values
|
||||||
|
6. Concurrency support for parallel operations
|
||||||
|
|
||||||
|
## 11. Conclusion
|
||||||
|
|
||||||
|
This architecture provides a roadmap for porting RadixTree from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
|
||||||
|
|
||||||
|
The Rust implementation aims to be:
|
||||||
|
- **Safe**: Leveraging Rust's ownership model for memory safety
|
||||||
|
- **Fast**: Maintaining or improving performance compared to V
|
||||||
|
- **Compatible**: Working with existing RadixTree data
|
||||||
|
- **Extensible**: Providing a foundation for future enhancements
|
||||||
|
- **Well-tested**: Including comprehensive test coverage
|
||||||
|
|
||||||
|
## 12. Implementation Files
|
||||||
|
|
||||||
|
### 12.1 Cargo.toml
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[package]
|
||||||
|
name = "radixtree"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "A persistent radix tree implementation using OurDB for storage"
|
||||||
|
authors = ["OurWorld Team"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
ourdb = { path = "../ourdb" }
|
||||||
|
thiserror = "1.0.40"
|
||||||
|
log = "0.4.17"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
criterion = "0.5.1"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "radixtree_benchmarks"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "basic_usage"
|
||||||
|
path = "examples/basic_usage.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "prefix_operations"
|
||||||
|
path = "examples/prefix_operations.rs"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.2 src/lib.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
|
||||||
|
//! with persistent storage using OurDB as a backend.
|
||||||
|
//!
|
||||||
|
//! This implementation provides a persistent radix tree that can be used for efficient
|
||||||
|
//! prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||||
|
|
||||||
|
mod error;
|
||||||
|
mod node;
|
||||||
|
mod operations;
|
||||||
|
mod serialize;
|
||||||
|
|
||||||
|
pub use error::Error;
|
||||||
|
pub use node::{Node, NodeRef};
|
||||||
|
|
||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
/// RadixTree represents a radix tree data structure with persistent storage.
|
||||||
|
pub struct RadixTree {
|
||||||
|
db: OurDB,
|
||||||
|
root_id: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RadixTree {
|
||||||
|
/// Creates a new radix tree with the specified database path.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `path` - The path to the database directory
|
||||||
|
/// * `reset` - Whether to reset the database if it exists
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A new `RadixTree` instance
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the database cannot be created or opened
|
||||||
|
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a key-value pair in the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to set
|
||||||
|
/// * `value` - The value to set
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a value by key from the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to get
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// The value associated with the key
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the key is not found or the operation fails
|
||||||
|
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the value at a given key prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The key prefix to update
|
||||||
|
/// * `new_value` - The new value to set
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the prefix is not found or the operation fails
|
||||||
|
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes a key from the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to delete
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the key is not found or the operation fails
|
||||||
|
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all keys with a given prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The prefix to search for
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A list of keys that start with the given prefix
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets all values for keys with a given prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The prefix to search for
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A list of values for keys that start with the given prefix
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.3 src/error.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! Error types for the RadixTree module.
|
||||||
|
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Error type for RadixTree operations.
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum Error {
|
||||||
|
/// Error from OurDB operations.
|
||||||
|
#[error("OurDB error: {0}")]
|
||||||
|
OurDB(#[from] ourdb::Error),
|
||||||
|
|
||||||
|
/// Error when a key is not found.
|
||||||
|
#[error("Key not found: {0}")]
|
||||||
|
KeyNotFound(String),
|
||||||
|
|
||||||
|
/// Error when a prefix is not found.
|
||||||
|
#[error("Prefix not found: {0}")]
|
||||||
|
PrefixNotFound(String),
|
||||||
|
|
||||||
|
/// Error during serialization.
|
||||||
|
#[error("Serialization error: {0}")]
|
||||||
|
Serialization(String),
|
||||||
|
|
||||||
|
/// Error during deserialization.
|
||||||
|
#[error("Deserialization error: {0}")]
|
||||||
|
Deserialization(String),
|
||||||
|
|
||||||
|
/// Error for invalid operations.
|
||||||
|
#[error("Invalid operation: {0}")]
|
||||||
|
InvalidOperation(String),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.4 src/node.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! Node types for the RadixTree module.
|
||||||
|
|
||||||
|
/// Represents a node in the radix tree.
|
||||||
|
pub struct Node {
|
||||||
|
/// The segment of the key stored at this node.
|
||||||
|
pub key_segment: String,
|
||||||
|
|
||||||
|
/// Value stored at this node (empty if not a leaf).
|
||||||
|
pub value: Vec<u8>,
|
||||||
|
|
||||||
|
/// References to child nodes.
|
||||||
|
pub children: Vec<NodeRef>,
|
||||||
|
|
||||||
|
/// Whether this node is a leaf node.
|
||||||
|
pub is_leaf: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reference to a node in the database.
|
||||||
|
pub struct NodeRef {
|
||||||
|
/// The key segment for this child.
|
||||||
|
pub key_part: String,
|
||||||
|
|
||||||
|
/// Database ID of the node.
|
||||||
|
pub node_id: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Creates a new node.
|
||||||
|
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
|
||||||
|
Self {
|
||||||
|
key_segment,
|
||||||
|
value,
|
||||||
|
children: Vec::new(),
|
||||||
|
is_leaf,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new root node.
|
||||||
|
pub fn new_root() -> Self {
|
||||||
|
Self {
|
||||||
|
key_segment: String::new(),
|
||||||
|
value: Vec::new(),
|
||||||
|
children: Vec::new(),
|
||||||
|
is_leaf: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeRef {
|
||||||
|
/// Creates a new node reference.
|
||||||
|
pub fn new(key_part: String, node_id: u32) -> Self {
|
||||||
|
Self {
|
||||||
|
key_part,
|
||||||
|
node_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.5 src/serialize.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! Serialization and deserialization for RadixTree nodes.
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::node::{Node, NodeRef};
|
||||||
|
|
||||||
|
/// Current binary format version.
|
||||||
|
const VERSION: u8 = 1;
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Serializes a node to bytes for storage.
|
||||||
|
pub fn serialize(&self) -> Vec<u8> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes bytes to a node.
|
||||||
|
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.6 src/operations.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! Implementation of RadixTree operations.
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::node::{Node, NodeRef};
|
||||||
|
use crate::RadixTree;
|
||||||
|
|
||||||
|
impl RadixTree {
|
||||||
|
/// Helper function to get a node from the database.
|
||||||
|
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to save a node to the database.
|
||||||
|
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to find all keys with a given prefix.
|
||||||
|
fn find_keys_with_prefix(
|
||||||
|
&mut self,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
prefix: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to recursively collect all keys under a node.
|
||||||
|
fn collect_all_keys(
|
||||||
|
&mut self,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to get the common prefix of two strings.
|
||||||
|
fn get_common_prefix(a: &str, b: &str) -> String {
|
||||||
|
// Implementation will go here
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 12.7 examples/basic_usage.rs
|
||||||
|
|
||||||
|
```rust
|
||||||
|
//! Basic usage example for RadixTree.
|
||||||
|
|
||||||
|
use radixtree::RadixTree;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("radixtree_example");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating radix tree at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||||
|
|
||||||
|
// Store some data
|
||||||
|
tree.set("hello", b"world".to_vec())?;
|
||||||
|
tree.set("help", b"me".to_vec())?;
|
||||||
|
tree.set("helicopter", b"flying".to_vec())?;
|
||||||
|
|
||||||
|
// Retrieve and print the data
|
||||||
|
let value = tree.get("hello")?;
|
||||||
|
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||||
|
|
||||||
|
// List keys with prefix
|
||||||
|
let keys = tree.list("hel")?;
|
||||||
|
println!("Keys with prefix 'hel': {:?}", keys);
|
||||||
|
|
||||||
|
// Get all values with prefix
|
||||||
|
let values = tree.getall("hel")?;
|
||||||
|
println!("Values with prefix 'hel':");
|
||||||
|
for (i, value) in values.iter().enumerate() {
|
||||||
|
println!(" {}: {}", i, String::from_utf8_lossy(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete a key
|
||||||
|
tree.delete("help")?;
|
||||||
|
println!("Deleted 'help'");
|
||||||
|
|
||||||
|
// Verify deletion
|
||||||
|
let keys_after = tree.list("hel")?;
|
||||||
|
println!("Keys with prefix 'hel' after deletion: {:?}", keys_after);
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("Database kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
815
radixtree/Cargo.lock
generated
Normal file
815
radixtree/Cargo.lock
generated
Normal file
@@ -0,0 +1,815 @@
|
|||||||
|
# This file is automatically @generated by Cargo.
|
||||||
|
# It is not intended for manual editing.
|
||||||
|
version = 4
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aho-corasick"
|
||||||
|
version = "1.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anes"
|
||||||
|
version = "0.1.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle"
|
||||||
|
version = "1.0.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "autocfg"
|
||||||
|
version = "1.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bitflags"
|
||||||
|
version = "2.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bumpalo"
|
||||||
|
version = "3.17.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cast"
|
||||||
|
version = "0.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cfg-if"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
|
||||||
|
dependencies = [
|
||||||
|
"ciborium-io",
|
||||||
|
"ciborium-ll",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium-io"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ciborium-ll"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
|
||||||
|
dependencies = [
|
||||||
|
"ciborium-io",
|
||||||
|
"half",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap"
|
||||||
|
version = "4.5.35"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
|
||||||
|
dependencies = [
|
||||||
|
"clap_builder",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_builder"
|
||||||
|
version = "4.5.35"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
|
||||||
|
dependencies = [
|
||||||
|
"anstyle",
|
||||||
|
"clap_lex",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_lex"
|
||||||
|
version = "0.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crc32fast"
|
||||||
|
version = "1.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "criterion"
|
||||||
|
version = "0.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
|
||||||
|
dependencies = [
|
||||||
|
"anes",
|
||||||
|
"cast",
|
||||||
|
"ciborium",
|
||||||
|
"clap",
|
||||||
|
"criterion-plot",
|
||||||
|
"is-terminal",
|
||||||
|
"itertools",
|
||||||
|
"num-traits",
|
||||||
|
"once_cell",
|
||||||
|
"oorandom",
|
||||||
|
"plotters",
|
||||||
|
"rayon",
|
||||||
|
"regex",
|
||||||
|
"serde",
|
||||||
|
"serde_derive",
|
||||||
|
"serde_json",
|
||||||
|
"tinytemplate",
|
||||||
|
"walkdir",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "criterion-plot"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
|
||||||
|
dependencies = [
|
||||||
|
"cast",
|
||||||
|
"itertools",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-deque"
|
||||||
|
version = "0.8.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-epoch",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-epoch"
|
||||||
|
version = "0.9.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-utils"
|
||||||
|
version = "0.8.21"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crunchy"
|
||||||
|
version = "0.2.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "either"
|
||||||
|
version = "1.15.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "errno"
|
||||||
|
version = "0.3.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fastrand"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.2.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"r-efi",
|
||||||
|
"wasi 0.14.2+wasi-0.2.4",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "half"
|
||||||
|
version = "2.6.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"crunchy",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hermit-abi"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "is-terminal"
|
||||||
|
version = "0.4.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||||
|
dependencies = [
|
||||||
|
"hermit-abi",
|
||||||
|
"libc",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itertools"
|
||||||
|
version = "0.10.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
|
||||||
|
dependencies = [
|
||||||
|
"either",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itoa"
|
||||||
|
version = "1.0.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "js-sys"
|
||||||
|
version = "0.3.77"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
|
||||||
|
dependencies = [
|
||||||
|
"once_cell",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libc"
|
||||||
|
version = "0.2.171"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "linux-raw-sys"
|
||||||
|
version = "0.9.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log"
|
||||||
|
version = "0.4.27"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "memchr"
|
||||||
|
version = "2.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-traits"
|
||||||
|
version = "0.2.19"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
|
||||||
|
dependencies = [
|
||||||
|
"autocfg",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "once_cell"
|
||||||
|
version = "1.21.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "oorandom"
|
||||||
|
version = "11.1.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ourdb"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"crc32fast",
|
||||||
|
"log",
|
||||||
|
"rand",
|
||||||
|
"thiserror",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits",
|
||||||
|
"plotters-backend",
|
||||||
|
"plotters-svg",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters-backend"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plotters-svg"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
|
||||||
|
dependencies = [
|
||||||
|
"plotters-backend",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ppv-lite86"
|
||||||
|
version = "0.2.21"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||||
|
dependencies = [
|
||||||
|
"zerocopy",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2"
|
||||||
|
version = "1.0.94"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "quote"
|
||||||
|
version = "1.0.40"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "r-efi"
|
||||||
|
version = "5.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "radixtree"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"criterion",
|
||||||
|
"log",
|
||||||
|
"ourdb",
|
||||||
|
"tempfile",
|
||||||
|
"thiserror",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand"
|
||||||
|
version = "0.8.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"rand_chacha",
|
||||||
|
"rand_core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_chacha"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||||
|
dependencies = [
|
||||||
|
"ppv-lite86",
|
||||||
|
"rand_core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_core"
|
||||||
|
version = "0.6.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||||
|
dependencies = [
|
||||||
|
"getrandom 0.2.15",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rayon"
|
||||||
|
version = "1.10.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
|
||||||
|
dependencies = [
|
||||||
|
"either",
|
||||||
|
"rayon-core",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rayon-core"
|
||||||
|
version = "1.12.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-deque",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex"
|
||||||
|
version = "1.11.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-automata",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-automata"
|
||||||
|
version = "0.4.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-syntax"
|
||||||
|
version = "0.8.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustix"
|
||||||
|
version = "1.0.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags",
|
||||||
|
"errno",
|
||||||
|
"libc",
|
||||||
|
"linux-raw-sys",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustversion"
|
||||||
|
version = "1.0.20"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ryu"
|
||||||
|
version = "1.0.20"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "same-file"
|
||||||
|
version = "1.0.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||||
|
dependencies = [
|
||||||
|
"winapi-util",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||||
|
dependencies = [
|
||||||
|
"serde_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_derive"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_json"
|
||||||
|
version = "1.0.140"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||||
|
dependencies = [
|
||||||
|
"itoa",
|
||||||
|
"memchr",
|
||||||
|
"ryu",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "syn"
|
||||||
|
version = "2.0.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tempfile"
|
||||||
|
version = "3.19.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
|
||||||
|
dependencies = [
|
||||||
|
"fastrand",
|
||||||
|
"getrandom 0.3.2",
|
||||||
|
"once_cell",
|
||||||
|
"rustix",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
|
||||||
|
dependencies = [
|
||||||
|
"thiserror-impl",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror-impl"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tinytemplate"
|
||||||
|
version = "1.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicode-ident"
|
||||||
|
version = "1.0.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "walkdir"
|
||||||
|
version = "2.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
|
||||||
|
dependencies = [
|
||||||
|
"same-file",
|
||||||
|
"winapi-util",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.11.0+wasi-snapshot-preview1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.14.2+wasi-0.2.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
|
||||||
|
dependencies = [
|
||||||
|
"wit-bindgen-rt",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"once_cell",
|
||||||
|
"rustversion",
|
||||||
|
"wasm-bindgen-macro",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-backend"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
|
||||||
|
dependencies = [
|
||||||
|
"bumpalo",
|
||||||
|
"log",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
|
||||||
|
dependencies = [
|
||||||
|
"quote",
|
||||||
|
"wasm-bindgen-macro-support",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro-support"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-backend",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-shared"
|
||||||
|
version = "0.2.100"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "web-sys"
|
||||||
|
version = "0.3.77"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
|
||||||
|
dependencies = [
|
||||||
|
"js-sys",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winapi-util"
|
||||||
|
version = "0.1.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||||
|
dependencies = [
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-sys"
|
||||||
|
version = "0.59.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||||
|
dependencies = [
|
||||||
|
"windows-targets",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-targets"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||||
|
dependencies = [
|
||||||
|
"windows_aarch64_gnullvm",
|
||||||
|
"windows_aarch64_msvc",
|
||||||
|
"windows_i686_gnu",
|
||||||
|
"windows_i686_gnullvm",
|
||||||
|
"windows_i686_msvc",
|
||||||
|
"windows_x86_64_gnu",
|
||||||
|
"windows_x86_64_gnullvm",
|
||||||
|
"windows_x86_64_msvc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnu"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnu"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnullvm"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_msvc"
|
||||||
|
version = "0.52.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wit-bindgen-rt"
|
||||||
|
version = "0.39.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "zerocopy"
|
||||||
|
version = "0.8.24"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
|
||||||
|
dependencies = [
|
||||||
|
"zerocopy-derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "zerocopy-derive"
|
||||||
|
version = "0.8.24"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
27
radixtree/Cargo.toml
Normal file
27
radixtree/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
[package]
|
||||||
|
name = "radixtree"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "A persistent radix tree implementation using OurDB for storage"
|
||||||
|
authors = ["OurWorld Team"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
ourdb = { path = "../ourdb" }
|
||||||
|
thiserror = "1.0.40"
|
||||||
|
log = "0.4.17"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
criterion = "0.5.1"
|
||||||
|
tempfile = "3.8.0"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "radixtree_benchmarks"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "basic_usage"
|
||||||
|
path = "examples/basic_usage.rs"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "prefix_operations"
|
||||||
|
path = "examples/prefix_operations.rs"
|
||||||
265
radixtree/MIGRATION.md
Normal file
265
radixtree/MIGRATION.md
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
# Migration Guide: V to Rust RadixTree
|
||||||
|
|
||||||
|
This document provides guidance for migrating from the V implementation of RadixTree to the Rust implementation.
|
||||||
|
|
||||||
|
## API Changes
|
||||||
|
|
||||||
|
The Rust implementation maintains API compatibility with the V implementation, but with some idiomatic Rust changes:
|
||||||
|
|
||||||
|
### V API
|
||||||
|
|
||||||
|
```v
|
||||||
|
// Create a new radix tree
|
||||||
|
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)!
|
||||||
|
|
||||||
|
// Set a key-value pair
|
||||||
|
rt.set('test', 'value1'.bytes())!
|
||||||
|
|
||||||
|
// Get a value by key
|
||||||
|
value := rt.get('test')!
|
||||||
|
|
||||||
|
// Update a value at a prefix
|
||||||
|
rt.update('prefix', 'new_value'.bytes())!
|
||||||
|
|
||||||
|
// Delete a key
|
||||||
|
rt.delete('test')!
|
||||||
|
|
||||||
|
// List keys with a prefix
|
||||||
|
keys := rt.list('prefix')!
|
||||||
|
|
||||||
|
// Get all values with a prefix
|
||||||
|
values := rt.getall('prefix')!
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rust API
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new("/tmp/radixtree_test", true)?;
|
||||||
|
|
||||||
|
// Set a key-value pair
|
||||||
|
tree.set("test", b"value1".to_vec())?;
|
||||||
|
|
||||||
|
// Get a value by key
|
||||||
|
let value = tree.get("test")?;
|
||||||
|
|
||||||
|
// Update a value at a prefix
|
||||||
|
tree.update("prefix", b"new_value".to_vec())?;
|
||||||
|
|
||||||
|
// Delete a key
|
||||||
|
tree.delete("test")?;
|
||||||
|
|
||||||
|
// List keys with a prefix
|
||||||
|
let keys = tree.list("prefix")?;
|
||||||
|
|
||||||
|
// Get all values with a prefix
|
||||||
|
let values = tree.getall("prefix")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Differences
|
||||||
|
|
||||||
|
1. **Error Handling**: The Rust implementation uses Rust's `Result` type for error handling, while the V implementation uses V's `!` operator.
|
||||||
|
|
||||||
|
2. **String Handling**: The Rust implementation uses Rust's `&str` for string parameters and `String` for string return values, while the V implementation uses V's `string` type.
|
||||||
|
|
||||||
|
3. **Binary Data**: The Rust implementation uses Rust's `Vec<u8>` for binary data, while the V implementation uses V's `[]u8` type.
|
||||||
|
|
||||||
|
4. **Constructor**: The Rust implementation uses a constructor function with separate parameters, while the V implementation uses a struct with named parameters.
|
||||||
|
|
||||||
|
5. **Ownership**: The Rust implementation follows Rust's ownership model, requiring mutable references for methods that modify the tree.
|
||||||
|
|
||||||
|
## Data Compatibility
|
||||||
|
|
||||||
|
The Rust implementation maintains data compatibility with the V implementation:
|
||||||
|
|
||||||
|
- The same serialization format is used for nodes
|
||||||
|
- The same OurDB storage format is used
|
||||||
|
- Existing RadixTree data created with the V implementation can be read by the Rust implementation
|
||||||
|
|
||||||
|
## Migration Steps
|
||||||
|
|
||||||
|
1. **Update Dependencies**: Replace the V RadixTree dependency with the Rust RadixTree dependency in your project.
|
||||||
|
|
||||||
|
2. **Update Import Statements**: Replace V import statements with Rust use statements.
|
||||||
|
|
||||||
|
```v
|
||||||
|
// V
|
||||||
|
import freeflowuniverse.herolib.data.radixtree
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Rust
|
||||||
|
use radixtree::RadixTree;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Update Constructor Calls**: Replace V constructor calls with Rust constructor calls.
|
||||||
|
|
||||||
|
```v
|
||||||
|
// V
|
||||||
|
mut rt := radixtree.new(path: '/path/to/db', reset: false)!
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Rust
|
||||||
|
let mut tree = RadixTree::new("/path/to/db", false)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Update Method Calls**: Replace V method calls with Rust method calls.
|
||||||
|
|
||||||
|
```v
|
||||||
|
// V
|
||||||
|
rt.set('key', 'value'.bytes())!
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Rust
|
||||||
|
tree.set("key", b"value".to_vec())?;
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Update Error Handling**: Replace V error handling with Rust error handling.
|
||||||
|
|
||||||
|
```v
|
||||||
|
// V
|
||||||
|
if value := rt.get('key') {
|
||||||
|
println('Found: ${value.bytestr()}')
|
||||||
|
} else {
|
||||||
|
println('Error: ${err}')
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Rust
|
||||||
|
match tree.get("key") {
|
||||||
|
Ok(value) => println!("Found: {}", String::from_utf8_lossy(&value)),
|
||||||
|
Err(e) => println!("Error: {}", e),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
6. **Update String Conversions**: Replace V string conversions with Rust string conversions.
|
||||||
|
|
||||||
|
```v
|
||||||
|
// V
|
||||||
|
value.bytestr() // Convert []u8 to string
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Rust
|
||||||
|
String::from_utf8_lossy(&value) // Convert Vec<u8> to string
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Migration
|
||||||
|
|
||||||
|
### V Code
|
||||||
|
|
||||||
|
```v
|
||||||
|
module main
|
||||||
|
|
||||||
|
import freeflowuniverse.herolib.data.radixtree
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true) or {
|
||||||
|
println('Error creating RadixTree: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rt.set('hello', 'world'.bytes()) or {
|
||||||
|
println('Error setting key: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rt.set('help', 'me'.bytes()) or {
|
||||||
|
println('Error setting key: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if value := rt.get('hello') {
|
||||||
|
println('hello: ${value.bytestr()}')
|
||||||
|
} else {
|
||||||
|
println('Error getting key: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := rt.list('hel') or {
|
||||||
|
println('Error listing keys: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
println('Keys with prefix "hel": ${keys}')
|
||||||
|
|
||||||
|
values := rt.getall('hel') or {
|
||||||
|
println('Error getting all values: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
println('Values with prefix "hel":')
|
||||||
|
for i, value in values {
|
||||||
|
println(' ${i}: ${value.bytestr()}')
|
||||||
|
}
|
||||||
|
|
||||||
|
rt.delete('help') or {
|
||||||
|
println('Error deleting key: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
println('Deleted "help"')
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rust Code
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use radixtree::RadixTree;
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let mut tree = RadixTree::new("/tmp/radixtree_test", true)
|
||||||
|
.map_err(|e| format!("Error creating RadixTree: {}", e))?;
|
||||||
|
|
||||||
|
tree.set("hello", b"world".to_vec())
|
||||||
|
.map_err(|e| format!("Error setting key: {}", e))?;
|
||||||
|
|
||||||
|
tree.set("help", b"me".to_vec())
|
||||||
|
.map_err(|e| format!("Error setting key: {}", e))?;
|
||||||
|
|
||||||
|
let value = tree.get("hello")
|
||||||
|
.map_err(|e| format!("Error getting key: {}", e))?;
|
||||||
|
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||||
|
|
||||||
|
let keys = tree.list("hel")
|
||||||
|
.map_err(|e| format!("Error listing keys: {}", e))?;
|
||||||
|
println!("Keys with prefix \"hel\": {:?}", keys);
|
||||||
|
|
||||||
|
let values = tree.getall("hel")
|
||||||
|
.map_err(|e| format!("Error getting all values: {}", e))?;
|
||||||
|
println!("Values with prefix \"hel\":");
|
||||||
|
for (i, value) in values.iter().enumerate() {
|
||||||
|
println!(" {}: {}", i, String::from_utf8_lossy(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.delete("help")
|
||||||
|
.map_err(|e| format!("Error deleting key: {}", e))?;
|
||||||
|
println!("Deleted \"help\"");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
The Rust implementation should provide similar or better performance compared to the V implementation. However, there are some considerations:
|
||||||
|
|
||||||
|
1. **Memory Usage**: The Rust implementation may have different memory usage patterns due to Rust's ownership model.
|
||||||
|
|
||||||
|
2. **Error Handling**: The Rust implementation uses Rust's `Result` type, which may have different performance characteristics compared to V's error handling.
|
||||||
|
|
||||||
|
3. **String Handling**: The Rust implementation uses Rust's string types, which may have different performance characteristics compared to V's string types.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you encounter issues during migration, check the following:
|
||||||
|
|
||||||
|
1. **Data Compatibility**: Ensure that the data format is compatible between the V and Rust implementations.
|
||||||
|
|
||||||
|
2. **API Usage**: Ensure that you're using the correct API for the Rust implementation.
|
||||||
|
|
||||||
|
3. **Error Handling**: Ensure that you're handling errors correctly in the Rust implementation.
|
||||||
|
|
||||||
|
4. **String Encoding**: Ensure that string encoding is consistent between the V and Rust implementations.
|
||||||
|
|
||||||
|
If you encounter any issues that are not covered in this guide, please report them to the project maintainers.
|
||||||
189
radixtree/README.md
Normal file
189
radixtree/README.md
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
# RadixTree
|
||||||
|
|
||||||
|
A persistent radix tree implementation in Rust using OurDB for storage.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent radix tree that can be used for efficient prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||||
|
|
||||||
|
A radix tree (also known as a patricia trie or radix trie) is a space-optimized tree data structure that enables efficient string key operations. Unlike a standard trie where each node represents a single character, a radix tree compresses paths by allowing nodes to represent multiple characters (key segments).
|
||||||
|
|
||||||
|
Key characteristics:
|
||||||
|
- Each node stores a segment of a key (not just a single character)
|
||||||
|
- Nodes can have multiple children, each representing a different branch
|
||||||
|
- Leaf nodes contain the actual values
|
||||||
|
- Optimizes storage by compressing common prefixes
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Efficient prefix-based key operations
|
||||||
|
- Persistent storage using OurDB backend
|
||||||
|
- Memory-efficient storage of strings with common prefixes
|
||||||
|
- Support for binary values
|
||||||
|
- Thread-safe operations through OurDB
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Add the dependency to your `Cargo.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[dependencies]
|
||||||
|
radixtree = { path = "../radixtree" }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use radixtree::RadixTree;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new("/tmp/radix", false)?;
|
||||||
|
|
||||||
|
// Set key-value pairs
|
||||||
|
tree.set("hello", b"world".to_vec())?;
|
||||||
|
tree.set("help", b"me".to_vec())?;
|
||||||
|
|
||||||
|
// Get values by key
|
||||||
|
let value = tree.get("hello")?;
|
||||||
|
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
|
||||||
|
|
||||||
|
// List keys by prefix
|
||||||
|
let keys = tree.list("hel")?; // Returns ["hello", "help"]
|
||||||
|
println!("Keys with prefix 'hel': {:?}", keys);
|
||||||
|
|
||||||
|
// Get all values by prefix
|
||||||
|
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
|
||||||
|
|
||||||
|
// Delete keys
|
||||||
|
tree.delete("help")?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## API
|
||||||
|
|
||||||
|
### Creating a RadixTree
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new("/tmp/radix", false)?;
|
||||||
|
|
||||||
|
// Create a new radix tree and reset if it exists
|
||||||
|
let mut tree = RadixTree::new("/tmp/radix", true)?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting Values
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Set a key-value pair
|
||||||
|
tree.set("key", b"value".to_vec())?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting Values
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Get a value by key
|
||||||
|
let value = tree.get("key")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updating Values
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Update a value at a given prefix
|
||||||
|
tree.update("prefix", b"new_value".to_vec())?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deleting Keys
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Delete a key
|
||||||
|
tree.delete("key")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Listing Keys by Prefix
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// List all keys with a given prefix
|
||||||
|
let keys = tree.list("prefix")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting All Values by Prefix
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Get all values for keys with a given prefix
|
||||||
|
let values = tree.getall("prefix")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
- Search: O(k) where k is the key length
|
||||||
|
- Insert: O(k) for new keys, may require node splitting
|
||||||
|
- Delete: O(k) plus potential node cleanup
|
||||||
|
- Space: O(n) where n is the total length of all keys
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
RadixTree is particularly useful for:
|
||||||
|
- Prefix-based searching
|
||||||
|
- IP routing tables
|
||||||
|
- Dictionary implementations
|
||||||
|
- Auto-complete systems
|
||||||
|
- File system paths
|
||||||
|
- Any application requiring efficient string key operations with persistence
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
|
||||||
|
The RadixTree implementation uses OurDB for persistent storage:
|
||||||
|
- Each node is serialized and stored as a record in OurDB
|
||||||
|
- Node references use OurDB record IDs
|
||||||
|
- The tree maintains a root node ID for traversal
|
||||||
|
- Node serialization includes version tracking for format evolution
|
||||||
|
|
||||||
|
For more detailed information about the implementation, see the [ARCHITECTURE.md](./ARCHITECTURE.md) file.
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
The project includes a comprehensive test suite that verifies all functionality:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
cargo test
|
||||||
|
|
||||||
|
# Run specific test file
|
||||||
|
cargo test --test basic_test
|
||||||
|
cargo test --test prefix_test
|
||||||
|
cargo test --test getall_test
|
||||||
|
cargo test --test serialize_test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running Examples
|
||||||
|
|
||||||
|
The project includes example applications that demonstrate how to use the RadixTree:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run the basic usage example
|
||||||
|
cargo run --example basic_usage
|
||||||
|
|
||||||
|
# Run the prefix operations example
|
||||||
|
cargo run --example prefix_operations
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmarking
|
||||||
|
|
||||||
|
The project includes benchmarks to measure performance:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all benchmarks
|
||||||
|
cargo bench
|
||||||
|
|
||||||
|
# Run specific benchmark
|
||||||
|
cargo bench -- set
|
||||||
|
cargo bench -- get
|
||||||
|
cargo bench -- prefix_operations
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is licensed under the same license as the HeroCode project.
|
||||||
141
radixtree/benches/radixtree_benchmarks.rs
Normal file
141
radixtree/benches/radixtree_benchmarks.rs
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
fn criterion_benchmark(c: &mut Criterion) {
|
||||||
|
// Create a temporary directory for benchmarks
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Benchmark set operation
|
||||||
|
c.bench_function("set", |b| {
|
||||||
|
let mut tree = RadixTree::new(db_path, true).unwrap();
|
||||||
|
let mut i = 0;
|
||||||
|
b.iter(|| {
|
||||||
|
let key = format!("benchmark_key_{}", i);
|
||||||
|
let value = format!("benchmark_value_{}", i).into_bytes();
|
||||||
|
tree.set(&key, value).unwrap();
|
||||||
|
i += 1;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Setup tree with data for get/list/delete benchmarks
|
||||||
|
let mut setup_tree = RadixTree::new(db_path, true).unwrap();
|
||||||
|
for i in 0..1000 {
|
||||||
|
let key = format!("benchmark_key_{}", i);
|
||||||
|
let value = format!("benchmark_value_{}", i).into_bytes();
|
||||||
|
setup_tree.set(&key, value).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark get operation
|
||||||
|
c.bench_function("get", |b| {
|
||||||
|
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||||
|
let mut i = 0;
|
||||||
|
b.iter(|| {
|
||||||
|
let key = format!("benchmark_key_{}", i % 1000);
|
||||||
|
let _value = tree.get(&key).unwrap();
|
||||||
|
i += 1;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark list operation
|
||||||
|
c.bench_function("list", |b| {
|
||||||
|
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||||
|
b.iter(|| {
|
||||||
|
let _keys = tree.list("benchmark_key_1").unwrap();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark getall operation
|
||||||
|
c.bench_function("getall", |b| {
|
||||||
|
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||||
|
b.iter(|| {
|
||||||
|
let _values = tree.getall("benchmark_key_1").unwrap();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark update operation
|
||||||
|
c.bench_function("update", |b| {
|
||||||
|
let mut tree = RadixTree::new(db_path, false).unwrap();
|
||||||
|
let mut i = 0;
|
||||||
|
b.iter(|| {
|
||||||
|
let key = format!("benchmark_key_{}", i % 1000);
|
||||||
|
let new_value = format!("updated_value_{}", i).into_bytes();
|
||||||
|
tree.update(&key, new_value).unwrap();
|
||||||
|
i += 1;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark delete operation
|
||||||
|
c.bench_function("delete", |b| {
|
||||||
|
// Create a fresh tree for deletion benchmarks
|
||||||
|
let delete_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let delete_path = delete_dir.path().to_str().unwrap();
|
||||||
|
let mut tree = RadixTree::new(delete_path, true).unwrap();
|
||||||
|
|
||||||
|
// Setup keys to delete
|
||||||
|
for i in 0..1000 {
|
||||||
|
let key = format!("delete_key_{}", i);
|
||||||
|
let value = format!("delete_value_{}", i).into_bytes();
|
||||||
|
tree.set(&key, value).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut i = 0;
|
||||||
|
b.iter(|| {
|
||||||
|
let key = format!("delete_key_{}", i % 1000);
|
||||||
|
// Only try to delete if it exists
|
||||||
|
if tree.get(&key).is_ok() {
|
||||||
|
tree.delete(&key).unwrap();
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark prefix operations with varying tree sizes
|
||||||
|
let mut group = c.benchmark_group("prefix_operations");
|
||||||
|
|
||||||
|
for &size in &[100, 1000, 10000] {
|
||||||
|
// Create a fresh tree for each size
|
||||||
|
let size_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let size_path = size_dir.path().to_str().unwrap();
|
||||||
|
let mut tree = RadixTree::new(size_path, true).unwrap();
|
||||||
|
|
||||||
|
// Insert data with common prefixes
|
||||||
|
for i in 0..size {
|
||||||
|
let prefix = match i % 5 {
|
||||||
|
0 => "user",
|
||||||
|
1 => "post",
|
||||||
|
2 => "comment",
|
||||||
|
3 => "product",
|
||||||
|
_ => "category",
|
||||||
|
};
|
||||||
|
let key = format!("{}_{}", prefix, i);
|
||||||
|
let value = format!("value_{}", i).into_bytes();
|
||||||
|
tree.set(&key, value).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark list operation for this size
|
||||||
|
group.bench_function(format!("list_size_{}", size), |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
for prefix in &["user", "post", "comment", "product", "category"] {
|
||||||
|
let _keys = tree.list(prefix).unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Benchmark getall operation for this size
|
||||||
|
group.bench_function(format!("getall_size_{}", size), |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
for prefix in &["user", "post", "comment", "product", "category"] {
|
||||||
|
let _values = tree.getall(prefix).unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
group.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, criterion_benchmark);
|
||||||
|
criterion_main!(benches);
|
||||||
51
radixtree/examples/basic_usage.rs
Normal file
51
radixtree/examples/basic_usage.rs
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("radixtree_example");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating radix tree at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||||
|
|
||||||
|
// Store some data
|
||||||
|
println!("Storing data...");
|
||||||
|
tree.set("hello", b"world".to_vec())?;
|
||||||
|
tree.set("help", b"me".to_vec())?;
|
||||||
|
tree.set("helicopter", b"flying".to_vec())?;
|
||||||
|
|
||||||
|
// Retrieve and print the data
|
||||||
|
let value = tree.get("hello")?;
|
||||||
|
println!("hello: {}", String::from_utf8_lossy(&value));
|
||||||
|
|
||||||
|
// Update a value
|
||||||
|
println!("Updating value...");
|
||||||
|
tree.update("hello", b"updated world".to_vec())?;
|
||||||
|
|
||||||
|
// Retrieve the updated value
|
||||||
|
let updated_value = tree.get("hello")?;
|
||||||
|
println!("hello (updated): {}", String::from_utf8_lossy(&updated_value));
|
||||||
|
|
||||||
|
// Delete a key
|
||||||
|
println!("Deleting 'help'...");
|
||||||
|
tree.delete("help")?;
|
||||||
|
|
||||||
|
// Try to retrieve the deleted key (should fail)
|
||||||
|
match tree.get("help") {
|
||||||
|
Ok(value) => println!("Unexpected: help still exists with value: {}", String::from_utf8_lossy(&value)),
|
||||||
|
Err(e) => println!("As expected, help was deleted: {}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("Cleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("Database kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
121
radixtree/examples/large_scale_test.rs
Normal file
121
radixtree/examples/large_scale_test.rs
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
// Use much smaller batches to avoid hitting OurDB's size limit
|
||||||
|
const BATCH_SIZE: usize = 1_000;
|
||||||
|
const NUM_BATCHES: usize = 1_000; // Total records: 1,000,000
|
||||||
|
const PROGRESS_INTERVAL: usize = 100;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Overall metrics
|
||||||
|
let total_start_time = Instant::now();
|
||||||
|
let mut total_records_inserted = 0;
|
||||||
|
let mut batch_times = Vec::with_capacity(NUM_BATCHES);
|
||||||
|
|
||||||
|
println!("Will insert up to {} records in batches of {}",
|
||||||
|
BATCH_SIZE * NUM_BATCHES, BATCH_SIZE);
|
||||||
|
|
||||||
|
// Process in batches to avoid OurDB size limits
|
||||||
|
for batch in 0..NUM_BATCHES {
|
||||||
|
// Create a new database for each batch
|
||||||
|
let batch_path = std::env::temp_dir().join(format!("radixtree_batch_{}", batch));
|
||||||
|
|
||||||
|
// Clean up any existing database
|
||||||
|
if batch_path.exists() {
|
||||||
|
std::fs::remove_dir_all(&batch_path)?;
|
||||||
|
}
|
||||||
|
std::fs::create_dir_all(&batch_path)?;
|
||||||
|
|
||||||
|
println!("\nBatch {}/{}: Creating new radix tree...", batch + 1, NUM_BATCHES);
|
||||||
|
let mut tree = RadixTree::new(batch_path.to_str().unwrap(), true)?;
|
||||||
|
|
||||||
|
let batch_start_time = Instant::now();
|
||||||
|
let mut last_progress_time = Instant::now();
|
||||||
|
let mut last_progress_count = 0;
|
||||||
|
|
||||||
|
// Insert records for this batch
|
||||||
|
for i in 0..BATCH_SIZE {
|
||||||
|
let global_index = batch * BATCH_SIZE + i;
|
||||||
|
let key = format!("key:{:08}", global_index);
|
||||||
|
let value = format!("val{}", global_index).into_bytes();
|
||||||
|
|
||||||
|
tree.set(&key, value)?;
|
||||||
|
|
||||||
|
// Show progress at intervals
|
||||||
|
if (i + 1) % PROGRESS_INTERVAL == 0 || i == BATCH_SIZE - 1 {
|
||||||
|
let records_since_last = i + 1 - last_progress_count;
|
||||||
|
let time_since_last = last_progress_time.elapsed();
|
||||||
|
let records_per_second = records_since_last as f64 / time_since_last.as_secs_f64();
|
||||||
|
|
||||||
|
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
|
||||||
|
i + 1, BATCH_SIZE,
|
||||||
|
(i + 1) as f64 / BATCH_SIZE as f64 * 100.0,
|
||||||
|
records_per_second);
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
|
last_progress_time = Instant::now();
|
||||||
|
last_progress_count = i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let batch_duration = batch_start_time.elapsed();
|
||||||
|
batch_times.push(batch_duration);
|
||||||
|
total_records_inserted += BATCH_SIZE;
|
||||||
|
|
||||||
|
println!("\nBatch {}/{} completed in {:?} ({:.2} records/sec)",
|
||||||
|
batch + 1, NUM_BATCHES,
|
||||||
|
batch_duration,
|
||||||
|
BATCH_SIZE as f64 / batch_duration.as_secs_f64());
|
||||||
|
|
||||||
|
// Test random access performance for this batch
|
||||||
|
println!("Testing access performance for batch {}...", batch + 1);
|
||||||
|
let mut total_get_time = Duration::new(0, 0);
|
||||||
|
let num_samples = 100;
|
||||||
|
|
||||||
|
// Use a simple distribution pattern
|
||||||
|
for i in 0..num_samples {
|
||||||
|
// Distribute samples across the batch
|
||||||
|
let sample_id = batch * BATCH_SIZE + (i * (BATCH_SIZE / num_samples));
|
||||||
|
let key = format!("key:{:08}", sample_id);
|
||||||
|
|
||||||
|
let get_start = Instant::now();
|
||||||
|
let _ = tree.get(&key)?;
|
||||||
|
total_get_time += get_start.elapsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Average time to retrieve a record: {:?}",
|
||||||
|
total_get_time / num_samples as u32);
|
||||||
|
|
||||||
|
// Test prefix search performance
|
||||||
|
println!("Testing prefix search performance...");
|
||||||
|
let prefix = format!("key:{:02}", batch % 100);
|
||||||
|
|
||||||
|
let list_start = Instant::now();
|
||||||
|
let keys = tree.list(&prefix)?;
|
||||||
|
let list_duration = list_start.elapsed();
|
||||||
|
|
||||||
|
println!("Found {} keys with prefix '{}' in {:?}",
|
||||||
|
keys.len(), prefix, list_duration);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall performance summary
|
||||||
|
let total_duration = total_start_time.elapsed();
|
||||||
|
println!("\n\nPerformance Summary:");
|
||||||
|
println!("Total time to insert {} records: {:?}", total_records_inserted, total_duration);
|
||||||
|
println!("Average insertion rate: {:.2} records/second",
|
||||||
|
total_records_inserted as f64 / total_duration.as_secs_f64());
|
||||||
|
|
||||||
|
// Show performance trend
|
||||||
|
println!("\nPerformance Trend (batch number vs. time):");
|
||||||
|
for (i, duration) in batch_times.iter().enumerate() {
|
||||||
|
if i % 10 == 0 || i == batch_times.len() - 1 { // Only show every 10th point
|
||||||
|
println!(" Batch {}: {:?} ({:.2} records/sec)",
|
||||||
|
i + 1,
|
||||||
|
duration,
|
||||||
|
BATCH_SIZE as f64 / duration.as_secs_f64());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
134
radixtree/examples/performance_test.rs
Normal file
134
radixtree/examples/performance_test.rs
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
// Number of records to insert
|
||||||
|
const TOTAL_RECORDS: usize = 1_000_000;
|
||||||
|
// How often to report progress (every X records)
|
||||||
|
const PROGRESS_INTERVAL: usize = 10_000;
|
||||||
|
// How many records to use for performance sampling
|
||||||
|
const PERFORMANCE_SAMPLE_SIZE: usize = 1000;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("radixtree_performance_test");
|
||||||
|
|
||||||
|
// Completely remove and recreate the directory to ensure a clean start
|
||||||
|
if db_path.exists() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
}
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating radix tree at: {}", db_path.display());
|
||||||
|
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||||
|
|
||||||
|
// Track overall time
|
||||||
|
let start_time = Instant::now();
|
||||||
|
|
||||||
|
// Track performance metrics
|
||||||
|
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
|
||||||
|
let mut last_batch_time = Instant::now();
|
||||||
|
let mut last_batch_records = 0;
|
||||||
|
|
||||||
|
// Insert records and track progress
|
||||||
|
for i in 0..TOTAL_RECORDS {
|
||||||
|
let key = format!("key:{:08}", i);
|
||||||
|
// Use smaller values to avoid exceeding OurDB's size limit
|
||||||
|
let value = format!("val{}", i).into_bytes();
|
||||||
|
|
||||||
|
// Time the insertion of every Nth record for performance sampling
|
||||||
|
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
|
||||||
|
let insert_start = Instant::now();
|
||||||
|
tree.set(&key, value)?;
|
||||||
|
let insert_duration = insert_start.elapsed();
|
||||||
|
|
||||||
|
// Only print detailed timing for specific samples to avoid flooding output
|
||||||
|
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
|
||||||
|
println!("Record {}: Insertion took {:?}", i, insert_duration);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tree.set(&key, value)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show progress at intervals
|
||||||
|
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
|
||||||
|
let records_in_batch = i + 1 - last_batch_records;
|
||||||
|
let batch_duration = last_batch_time.elapsed();
|
||||||
|
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
|
||||||
|
|
||||||
|
insertion_times.push((i + 1, batch_duration));
|
||||||
|
|
||||||
|
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
|
||||||
|
i + 1, TOTAL_RECORDS,
|
||||||
|
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
|
||||||
|
records_per_second);
|
||||||
|
io::stdout().flush().unwrap();
|
||||||
|
|
||||||
|
last_batch_time = Instant::now();
|
||||||
|
last_batch_records = i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let total_duration = start_time.elapsed();
|
||||||
|
println!("\n\nPerformance Summary:");
|
||||||
|
println!("Total time to insert {} records: {:?}", TOTAL_RECORDS, total_duration);
|
||||||
|
println!("Average insertion rate: {:.2} records/second",
|
||||||
|
TOTAL_RECORDS as f64 / total_duration.as_secs_f64());
|
||||||
|
|
||||||
|
// Show performance trend
|
||||||
|
println!("\nPerformance Trend (records inserted vs. time per batch):");
|
||||||
|
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
|
||||||
|
if i % 10 == 0 || i == insertion_times.len() - 1 { // Only show every 10th point to avoid too much output
|
||||||
|
println!(" After {} records: {:?} for {} records ({:.2} records/sec)",
|
||||||
|
record_count,
|
||||||
|
duration,
|
||||||
|
PROGRESS_INTERVAL,
|
||||||
|
PROGRESS_INTERVAL as f64 / duration.as_secs_f64());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test access performance with distributed samples
|
||||||
|
println!("\nTesting access performance with distributed samples...");
|
||||||
|
let mut total_get_time = Duration::new(0, 0);
|
||||||
|
let num_samples = 1000;
|
||||||
|
|
||||||
|
// Use a simple distribution pattern instead of random
|
||||||
|
for i in 0..num_samples {
|
||||||
|
// Distribute samples across the entire range
|
||||||
|
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
|
||||||
|
let key = format!("key:{:08}", sample_id);
|
||||||
|
|
||||||
|
let get_start = Instant::now();
|
||||||
|
let _ = tree.get(&key)?;
|
||||||
|
total_get_time += get_start.elapsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Average time to retrieve a record: {:?}",
|
||||||
|
total_get_time / num_samples as u32);
|
||||||
|
|
||||||
|
// Test prefix search performance
|
||||||
|
println!("\nTesting prefix search performance...");
|
||||||
|
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
|
||||||
|
|
||||||
|
for prefix in &prefixes {
|
||||||
|
let list_start = Instant::now();
|
||||||
|
let keys = tree.list(prefix)?;
|
||||||
|
let list_duration = list_start.elapsed();
|
||||||
|
|
||||||
|
println!("Found {} keys with prefix '{}' in {:?}",
|
||||||
|
keys.len(), prefix, list_duration);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("\nCleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("\nDatabase kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
97
radixtree/examples/prefix_operations.rs
Normal file
97
radixtree/examples/prefix_operations.rs
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn main() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
let db_path = std::env::temp_dir().join("radixtree_prefix_example");
|
||||||
|
std::fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
println!("Creating radix tree at: {}", db_path.display());
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
|
||||||
|
|
||||||
|
// Store data with common prefixes
|
||||||
|
println!("Storing data with common prefixes...");
|
||||||
|
|
||||||
|
// User data
|
||||||
|
tree.set("user:1:name", b"Alice".to_vec())?;
|
||||||
|
tree.set("user:1:email", b"alice@example.com".to_vec())?;
|
||||||
|
tree.set("user:2:name", b"Bob".to_vec())?;
|
||||||
|
tree.set("user:2:email", b"bob@example.com".to_vec())?;
|
||||||
|
|
||||||
|
// Post data
|
||||||
|
tree.set("post:1:title", b"First Post".to_vec())?;
|
||||||
|
tree.set("post:1:content", b"Hello World!".to_vec())?;
|
||||||
|
tree.set("post:2:title", b"Second Post".to_vec())?;
|
||||||
|
tree.set("post:2:content", b"Another post content".to_vec())?;
|
||||||
|
|
||||||
|
// Demonstrate listing keys with a prefix
|
||||||
|
println!("\nListing keys with prefix 'user:1:'");
|
||||||
|
let user1_keys = tree.list("user:1:")?;
|
||||||
|
for key in &user1_keys {
|
||||||
|
println!(" Key: {}", key);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nListing keys with prefix 'post:'");
|
||||||
|
let post_keys = tree.list("post:")?;
|
||||||
|
for key in &post_keys {
|
||||||
|
println!(" Key: {}", key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Demonstrate getting all values with a prefix
|
||||||
|
println!("\nGetting all values with prefix 'user:1:'");
|
||||||
|
let user1_values = tree.getall("user:1:")?;
|
||||||
|
for (i, value) in user1_values.iter().enumerate() {
|
||||||
|
println!(" Value {}: {}", i + 1, String::from_utf8_lossy(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Demonstrate finding all user names
|
||||||
|
println!("\nFinding all user names (prefix 'user:*:name')");
|
||||||
|
let mut user_names = Vec::new();
|
||||||
|
let all_keys = tree.list("user:")?;
|
||||||
|
for key in all_keys {
|
||||||
|
if key.ends_with(":name") {
|
||||||
|
if let Ok(value) = tree.get(&key) {
|
||||||
|
user_names.push((key, String::from_utf8_lossy(&value).to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, name) in user_names {
|
||||||
|
println!(" {}: {}", key, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Demonstrate updating values with a specific prefix
|
||||||
|
println!("\nUpdating all post titles...");
|
||||||
|
let post_title_keys = tree.list("post:")?.into_iter().filter(|k| k.ends_with(":title")).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
for key in post_title_keys {
|
||||||
|
let old_value = tree.get(&key)?;
|
||||||
|
let old_title = String::from_utf8_lossy(&old_value);
|
||||||
|
let new_title = format!("UPDATED: {}", old_title);
|
||||||
|
|
||||||
|
println!(" Updating '{}' to '{}'", old_title, new_title);
|
||||||
|
tree.update(&key, new_title.as_bytes().to_vec())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify updates
|
||||||
|
println!("\nVerifying updates:");
|
||||||
|
let post_keys = tree.list("post:")?;
|
||||||
|
for key in post_keys {
|
||||||
|
if key.ends_with(":title") {
|
||||||
|
let value = tree.get(&key)?;
|
||||||
|
println!(" {}: {}", key, String::from_utf8_lossy(&value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up (optional)
|
||||||
|
if std::env::var("KEEP_DB").is_err() {
|
||||||
|
std::fs::remove_dir_all(&db_path)?;
|
||||||
|
println!("\nCleaned up database directory");
|
||||||
|
} else {
|
||||||
|
println!("\nDatabase kept at: {}", db_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
35
radixtree/src/error.rs
Normal file
35
radixtree/src/error.rs
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
//! Error types for the RadixTree module.
|
||||||
|
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Error type for RadixTree operations.
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum Error {
|
||||||
|
/// Error from OurDB operations.
|
||||||
|
#[error("OurDB error: {0}")]
|
||||||
|
OurDB(#[from] ourdb::Error),
|
||||||
|
|
||||||
|
/// Error when a key is not found.
|
||||||
|
#[error("Key not found: {0}")]
|
||||||
|
KeyNotFound(String),
|
||||||
|
|
||||||
|
/// Error when a prefix is not found.
|
||||||
|
#[error("Prefix not found: {0}")]
|
||||||
|
PrefixNotFound(String),
|
||||||
|
|
||||||
|
/// Error during serialization.
|
||||||
|
#[error("Serialization error: {0}")]
|
||||||
|
Serialization(String),
|
||||||
|
|
||||||
|
/// Error during deserialization.
|
||||||
|
#[error("Deserialization error: {0}")]
|
||||||
|
Deserialization(String),
|
||||||
|
|
||||||
|
/// Error for invalid operations.
|
||||||
|
#[error("Invalid operation: {0}")]
|
||||||
|
InvalidOperation(String),
|
||||||
|
|
||||||
|
/// Error for I/O operations.
|
||||||
|
#[error("I/O error: {0}")]
|
||||||
|
IO(#[from] std::io::Error),
|
||||||
|
}
|
||||||
133
radixtree/src/lib.rs
Normal file
133
radixtree/src/lib.rs
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
|
||||||
|
//! with persistent storage using OurDB as a backend.
|
||||||
|
//!
|
||||||
|
//! This implementation provides a persistent radix tree that can be used for efficient
|
||||||
|
//! prefix-based key operations, such as auto-complete, routing tables, and more.
|
||||||
|
|
||||||
|
mod error;
|
||||||
|
mod node;
|
||||||
|
mod operations;
|
||||||
|
mod serialize;
|
||||||
|
|
||||||
|
pub use error::Error;
|
||||||
|
pub use node::{Node, NodeRef};
|
||||||
|
|
||||||
|
use ourdb::OurDB;
|
||||||
|
|
||||||
|
/// RadixTree represents a radix tree data structure with persistent storage.
|
||||||
|
pub struct RadixTree {
|
||||||
|
db: OurDB,
|
||||||
|
root_id: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RadixTree {
|
||||||
|
/// Creates a new radix tree with the specified database path.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `path` - The path to the database directory
|
||||||
|
/// * `reset` - Whether to reset the database if it exists
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A new `RadixTree` instance
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the database cannot be created or opened
|
||||||
|
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
|
||||||
|
operations::new_radix_tree(path, reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a key-value pair in the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to set
|
||||||
|
/// * `value` - The value to set
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
operations::set(self, key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a value by key from the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to get
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// The value associated with the key
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the key is not found or the operation fails
|
||||||
|
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
|
||||||
|
operations::get(self, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the value at a given key prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The key prefix to update
|
||||||
|
/// * `new_value` - The new value to set
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the prefix is not found or the operation fails
|
||||||
|
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
operations::update(self, prefix, new_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes a key from the tree.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `key` - The key to delete
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the key is not found or the operation fails
|
||||||
|
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
|
||||||
|
operations::delete(self, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all keys with a given prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The prefix to search for
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A list of keys that start with the given prefix
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
|
||||||
|
operations::list(self, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets all values for keys with a given prefix.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `prefix` - The prefix to search for
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A list of values for keys that start with the given prefix
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the operation fails
|
||||||
|
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||||
|
operations::getall(self, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
59
radixtree/src/node.rs
Normal file
59
radixtree/src/node.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
//! Node types for the RadixTree module.
|
||||||
|
|
||||||
|
/// Represents a node in the radix tree.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct Node {
|
||||||
|
/// The segment of the key stored at this node.
|
||||||
|
pub key_segment: String,
|
||||||
|
|
||||||
|
/// Value stored at this node (empty if not a leaf).
|
||||||
|
pub value: Vec<u8>,
|
||||||
|
|
||||||
|
/// References to child nodes.
|
||||||
|
pub children: Vec<NodeRef>,
|
||||||
|
|
||||||
|
/// Whether this node is a leaf node.
|
||||||
|
pub is_leaf: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reference to a node in the database.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct NodeRef {
|
||||||
|
/// The key segment for this child.
|
||||||
|
pub key_part: String,
|
||||||
|
|
||||||
|
/// Database ID of the node.
|
||||||
|
pub node_id: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Creates a new node.
|
||||||
|
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
|
||||||
|
Self {
|
||||||
|
key_segment,
|
||||||
|
value,
|
||||||
|
children: Vec::new(),
|
||||||
|
is_leaf,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new root node.
|
||||||
|
pub fn new_root() -> Self {
|
||||||
|
Self {
|
||||||
|
key_segment: String::new(),
|
||||||
|
value: Vec::new(),
|
||||||
|
children: Vec::new(),
|
||||||
|
is_leaf: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeRef {
|
||||||
|
/// Creates a new node reference.
|
||||||
|
pub fn new(key_part: String, node_id: u32) -> Self {
|
||||||
|
Self {
|
||||||
|
key_part,
|
||||||
|
node_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
508
radixtree/src/operations.rs
Normal file
508
radixtree/src/operations.rs
Normal file
@@ -0,0 +1,508 @@
|
|||||||
|
//! Implementation of RadixTree operations.
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::node::{Node, NodeRef};
|
||||||
|
use crate::RadixTree;
|
||||||
|
use crate::serialize::get_common_prefix;
|
||||||
|
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
|
||||||
|
/// Creates a new radix tree with the specified database path.
|
||||||
|
pub fn new_radix_tree(path: &str, reset: bool) -> Result<RadixTree, Error> {
|
||||||
|
let config = OurDBConfig {
|
||||||
|
path: PathBuf::from(path),
|
||||||
|
incremental_mode: true,
|
||||||
|
file_size: Some(1024 * 1024 * 10), // 10MB file size for better performance with large datasets
|
||||||
|
keysize: Some(6), // Use keysize=6 to support multiple files (file_nr + position)
|
||||||
|
reset: None, // Don't reset existing database
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut db = OurDB::new(config)?;
|
||||||
|
|
||||||
|
// If reset is true, we would clear the database
|
||||||
|
// Since OurDB doesn't have a reset method, we'll handle it by
|
||||||
|
// creating a fresh database when reset is true
|
||||||
|
// We'll implement this by checking if it's a new database (next_id == 1)
|
||||||
|
|
||||||
|
let root_id = if db.get_next_id()? == 1 {
|
||||||
|
// Create a new root node
|
||||||
|
let root = Node::new_root();
|
||||||
|
let root_id = db.set(OurDBSetArgs {
|
||||||
|
id: None,
|
||||||
|
data: &root.serialize(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// First ID should be 1
|
||||||
|
assert_eq!(root_id, 1);
|
||||||
|
root_id
|
||||||
|
} else {
|
||||||
|
// Use existing root node
|
||||||
|
1 // Root node always has ID 1
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(RadixTree {
|
||||||
|
db,
|
||||||
|
root_id,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a key-value pair in the tree.
|
||||||
|
pub fn set(tree: &mut RadixTree, key: &str, value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
let mut current_id = tree.root_id;
|
||||||
|
let mut offset = 0;
|
||||||
|
|
||||||
|
// Handle empty key case
|
||||||
|
if key.is_empty() {
|
||||||
|
let mut root_node = tree.get_node(current_id)?;
|
||||||
|
root_node.is_leaf = true;
|
||||||
|
root_node.value = value;
|
||||||
|
tree.save_node(Some(current_id), &root_node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
while offset < key.len() {
|
||||||
|
let mut node = tree.get_node(current_id)?;
|
||||||
|
|
||||||
|
// Find matching child
|
||||||
|
let mut matched_child = None;
|
||||||
|
for (i, child) in node.children.iter().enumerate() {
|
||||||
|
if key[offset..].starts_with(&child.key_part) {
|
||||||
|
matched_child = Some((i, child.clone()));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched_child.is_none() {
|
||||||
|
// No matching child found, create new leaf node
|
||||||
|
let key_part = key[offset..].to_string();
|
||||||
|
let new_node = Node {
|
||||||
|
key_segment: key_part.clone(),
|
||||||
|
value: value.clone(),
|
||||||
|
children: Vec::new(),
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_id = tree.save_node(None, &new_node)?;
|
||||||
|
|
||||||
|
// Create new child reference and update parent node
|
||||||
|
node.children.push(NodeRef {
|
||||||
|
key_part,
|
||||||
|
node_id: new_id,
|
||||||
|
});
|
||||||
|
|
||||||
|
tree.save_node(Some(current_id), &node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let (child_index, mut child) = matched_child.unwrap();
|
||||||
|
let common_prefix = get_common_prefix(&key[offset..], &child.key_part);
|
||||||
|
|
||||||
|
if common_prefix.len() < child.key_part.len() {
|
||||||
|
// Split existing node
|
||||||
|
let child_node = tree.get_node(child.node_id)?;
|
||||||
|
|
||||||
|
// Create new intermediate node
|
||||||
|
let new_node = Node {
|
||||||
|
key_segment: child.key_part[common_prefix.len()..].to_string(),
|
||||||
|
value: child_node.value.clone(),
|
||||||
|
children: child_node.children.clone(),
|
||||||
|
is_leaf: child_node.is_leaf,
|
||||||
|
};
|
||||||
|
let new_id = tree.save_node(None, &new_node)?;
|
||||||
|
|
||||||
|
// Update current node
|
||||||
|
node.children[child_index] = NodeRef {
|
||||||
|
key_part: common_prefix.to_string(),
|
||||||
|
node_id: new_id,
|
||||||
|
};
|
||||||
|
tree.save_node(Some(current_id), &node)?;
|
||||||
|
|
||||||
|
// Update child node reference
|
||||||
|
child.node_id = new_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset + common_prefix.len() == key.len() {
|
||||||
|
// Update value at existing node
|
||||||
|
let mut child_node = tree.get_node(child.node_id)?;
|
||||||
|
child_node.value = value;
|
||||||
|
child_node.is_leaf = true;
|
||||||
|
tree.save_node(Some(child.node_id), &child_node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += common_prefix.len();
|
||||||
|
current_id = child.node_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a value by key from the tree.
|
||||||
|
pub fn get(tree: &mut RadixTree, key: &str) -> Result<Vec<u8>, Error> {
|
||||||
|
let mut current_id = tree.root_id;
|
||||||
|
let mut offset = 0;
|
||||||
|
|
||||||
|
// Handle empty key case
|
||||||
|
if key.is_empty() {
|
||||||
|
let root_node = tree.get_node(current_id)?;
|
||||||
|
if root_node.is_leaf {
|
||||||
|
return Ok(root_node.value.clone());
|
||||||
|
}
|
||||||
|
return Err(Error::KeyNotFound(key.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
while offset < key.len() {
|
||||||
|
let node = tree.get_node(current_id)?;
|
||||||
|
|
||||||
|
let mut found = false;
|
||||||
|
for child in &node.children {
|
||||||
|
if key[offset..].starts_with(&child.key_part) {
|
||||||
|
if offset + child.key_part.len() == key.len() {
|
||||||
|
let child_node = tree.get_node(child.node_id)?;
|
||||||
|
if child_node.is_leaf {
|
||||||
|
return Ok(child_node.value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
current_id = child.node_id;
|
||||||
|
offset += child.key_part.len();
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return Err(Error::KeyNotFound(key.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::KeyNotFound(key.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the value at a given key prefix.
|
||||||
|
pub fn update(tree: &mut RadixTree, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
|
||||||
|
let mut current_id = tree.root_id;
|
||||||
|
let mut offset = 0;
|
||||||
|
|
||||||
|
// Handle empty prefix case
|
||||||
|
if prefix.is_empty() {
|
||||||
|
return Err(Error::InvalidOperation("Empty prefix not allowed".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
while offset < prefix.len() {
|
||||||
|
let node = tree.get_node(current_id)?;
|
||||||
|
|
||||||
|
let mut found = false;
|
||||||
|
for child in &node.children {
|
||||||
|
if prefix[offset..].starts_with(&child.key_part) {
|
||||||
|
if offset + child.key_part.len() == prefix.len() {
|
||||||
|
// Found exact prefix match
|
||||||
|
let mut child_node = tree.get_node(child.node_id)?;
|
||||||
|
if child_node.is_leaf {
|
||||||
|
// Update the value
|
||||||
|
child_node.value = new_value;
|
||||||
|
tree.save_node(Some(child.node_id), &child_node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
current_id = child.node_id;
|
||||||
|
offset += child.key_part.len();
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return Err(Error::PrefixNotFound(prefix.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::PrefixNotFound(prefix.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes a key from the tree.
|
||||||
|
pub fn delete(tree: &mut RadixTree, key: &str) -> Result<(), Error> {
|
||||||
|
let mut current_id = tree.root_id;
|
||||||
|
let mut offset = 0;
|
||||||
|
let mut path = Vec::new();
|
||||||
|
|
||||||
|
// Handle empty key case
|
||||||
|
if key.is_empty() {
|
||||||
|
let mut root_node = tree.get_node(current_id)?;
|
||||||
|
if !root_node.is_leaf {
|
||||||
|
return Err(Error::KeyNotFound(key.to_string()));
|
||||||
|
}
|
||||||
|
// For the root node, we just mark it as non-leaf
|
||||||
|
root_node.is_leaf = false;
|
||||||
|
root_node.value = Vec::new();
|
||||||
|
tree.save_node(Some(current_id), &root_node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the node to delete
|
||||||
|
while offset < key.len() {
|
||||||
|
let node = tree.get_node(current_id)?;
|
||||||
|
|
||||||
|
let mut found = false;
|
||||||
|
for child in &node.children {
|
||||||
|
if key[offset..].starts_with(&child.key_part) {
|
||||||
|
path.push(child.clone());
|
||||||
|
current_id = child.node_id;
|
||||||
|
offset += child.key_part.len();
|
||||||
|
found = true;
|
||||||
|
|
||||||
|
// Check if we've matched the full key
|
||||||
|
if offset == key.len() {
|
||||||
|
let child_node = tree.get_node(child.node_id)?;
|
||||||
|
if child_node.is_leaf {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return Err(Error::KeyNotFound(key.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.is_empty() {
|
||||||
|
return Err(Error::KeyNotFound(key.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the node to delete
|
||||||
|
let mut last_node = tree.get_node(path.last().unwrap().node_id)?;
|
||||||
|
|
||||||
|
// If the node has children, just mark it as non-leaf
|
||||||
|
if !last_node.children.is_empty() {
|
||||||
|
last_node.is_leaf = false;
|
||||||
|
last_node.value = Vec::new();
|
||||||
|
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// If node has no children, remove it from parent
|
||||||
|
if path.len() > 1 {
|
||||||
|
let parent_id = path[path.len() - 2].node_id;
|
||||||
|
let mut parent_node = tree.get_node(parent_id)?;
|
||||||
|
|
||||||
|
// Find and remove the child from parent
|
||||||
|
for i in 0..parent_node.children.len() {
|
||||||
|
if parent_node.children[i].node_id == path.last().unwrap().node_id {
|
||||||
|
parent_node.children.remove(i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.save_node(Some(parent_id), &parent_node)?;
|
||||||
|
|
||||||
|
// Delete the node from the database
|
||||||
|
tree.db.delete(path.last().unwrap().node_id)?;
|
||||||
|
} else {
|
||||||
|
// If this is a direct child of the root, just mark it as non-leaf
|
||||||
|
last_node.is_leaf = false;
|
||||||
|
last_node.value = Vec::new();
|
||||||
|
tree.save_node(Some(path.last().unwrap().node_id), &last_node)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all keys with a given prefix.
|
||||||
|
pub fn list(tree: &mut RadixTree, prefix: &str) -> Result<Vec<String>, Error> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
|
||||||
|
// Handle empty prefix case - will return all keys
|
||||||
|
if prefix.is_empty() {
|
||||||
|
collect_all_keys(tree, tree.root_id, "", &mut result)?;
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start from the root and find all matching keys
|
||||||
|
find_keys_with_prefix(tree, tree.root_id, "", prefix, &mut result)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to find all keys with a given prefix.
|
||||||
|
fn find_keys_with_prefix(
|
||||||
|
tree: &mut RadixTree,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
prefix: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let node = tree.get_node(node_id)?;
|
||||||
|
|
||||||
|
// If the current path already matches or exceeds the prefix length
|
||||||
|
if current_path.len() >= prefix.len() {
|
||||||
|
// Check if the current path starts with the prefix
|
||||||
|
if current_path.starts_with(prefix) {
|
||||||
|
// If this is a leaf node, add it to the results
|
||||||
|
if node.is_leaf {
|
||||||
|
result.push(current_path.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all keys from this subtree
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current path is shorter than the prefix, continue searching
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
|
||||||
|
// Check if this child's path could potentially match the prefix
|
||||||
|
if prefix.starts_with(current_path) {
|
||||||
|
// The prefix starts with the current path, so we need to check if
|
||||||
|
// the child's key_part matches the next part of the prefix
|
||||||
|
let prefix_remainder = &prefix[current_path.len()..];
|
||||||
|
|
||||||
|
// If the prefix remainder starts with the child's key_part or vice versa
|
||||||
|
if prefix_remainder.starts_with(&child.key_part)
|
||||||
|
|| (child.key_part.starts_with(prefix_remainder)
|
||||||
|
&& child.key_part.len() >= prefix_remainder.len()) {
|
||||||
|
find_keys_with_prefix(tree, child.node_id, &child_path, prefix, result)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to recursively collect all keys under a node.
|
||||||
|
fn collect_all_keys(
|
||||||
|
tree: &mut RadixTree,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let node = tree.get_node(node_id)?;
|
||||||
|
|
||||||
|
// If this node is a leaf, add its path to the result
|
||||||
|
if node.is_leaf {
|
||||||
|
result.push(current_path.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively collect keys from all children
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
collect_all_keys(tree, child.node_id, &child_path, result)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets all values for keys with a given prefix.
|
||||||
|
pub fn getall(tree: &mut RadixTree, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
|
||||||
|
// Get all matching keys
|
||||||
|
let keys = list(tree, prefix)?;
|
||||||
|
|
||||||
|
// Get values for each key
|
||||||
|
let mut values = Vec::new();
|
||||||
|
for key in keys {
|
||||||
|
if let Ok(value) = get(tree, &key) {
|
||||||
|
values.push(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RadixTree {
|
||||||
|
/// Helper function to get a node from the database.
|
||||||
|
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
|
||||||
|
let data = self.db.get(node_id)?;
|
||||||
|
Node::deserialize(&data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to save a node to the database.
|
||||||
|
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
|
||||||
|
let data = node.serialize();
|
||||||
|
let args = OurDBSetArgs {
|
||||||
|
id: node_id,
|
||||||
|
data: &data,
|
||||||
|
};
|
||||||
|
Ok(self.db.set(args)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to find all keys with a given prefix.
|
||||||
|
fn find_keys_with_prefix(
|
||||||
|
&mut self,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
prefix: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let node = self.get_node(node_id)?;
|
||||||
|
|
||||||
|
// If the current path already matches or exceeds the prefix length
|
||||||
|
if current_path.len() >= prefix.len() {
|
||||||
|
// Check if the current path starts with the prefix
|
||||||
|
if current_path.starts_with(prefix) {
|
||||||
|
// If this is a leaf node, add it to the results
|
||||||
|
if node.is_leaf {
|
||||||
|
result.push(current_path.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all keys from this subtree
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current path is shorter than the prefix, continue searching
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
|
||||||
|
// Check if this child's path could potentially match the prefix
|
||||||
|
if prefix.starts_with(current_path) {
|
||||||
|
// The prefix starts with the current path, so we need to check if
|
||||||
|
// the child's key_part matches the next part of the prefix
|
||||||
|
let prefix_remainder = &prefix[current_path.len()..];
|
||||||
|
|
||||||
|
// If the prefix remainder starts with the child's key_part or vice versa
|
||||||
|
if prefix_remainder.starts_with(&child.key_part)
|
||||||
|
|| (child.key_part.starts_with(prefix_remainder)
|
||||||
|
&& child.key_part.len() >= prefix_remainder.len()) {
|
||||||
|
self.find_keys_with_prefix(child.node_id, &child_path, prefix, result)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to recursively collect all keys under a node.
|
||||||
|
fn collect_all_keys(
|
||||||
|
&mut self,
|
||||||
|
node_id: u32,
|
||||||
|
current_path: &str,
|
||||||
|
result: &mut Vec<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let node = self.get_node(node_id)?;
|
||||||
|
|
||||||
|
// If this node is a leaf, add its path to the result
|
||||||
|
if node.is_leaf {
|
||||||
|
result.push(current_path.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively collect keys from all children
|
||||||
|
for child in &node.children {
|
||||||
|
let child_path = format!("{}{}", current_path, child.key_part);
|
||||||
|
self.collect_all_keys(child.node_id, &child_path, result)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
156
radixtree/src/serialize.rs
Normal file
156
radixtree/src/serialize.rs
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
//! Serialization and deserialization for RadixTree nodes.
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::node::{Node, NodeRef};
|
||||||
|
use std::io::{Cursor, Read};
|
||||||
|
use std::mem::size_of;
|
||||||
|
|
||||||
|
/// Current binary format version.
|
||||||
|
const VERSION: u8 = 1;
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
/// Serializes a node to bytes for storage.
|
||||||
|
pub fn serialize(&self) -> Vec<u8> {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
|
// Add version byte
|
||||||
|
buffer.push(VERSION);
|
||||||
|
|
||||||
|
// Add key segment
|
||||||
|
write_string(&mut buffer, &self.key_segment);
|
||||||
|
|
||||||
|
// Add value as []u8
|
||||||
|
write_u16(&mut buffer, self.value.len() as u16);
|
||||||
|
buffer.extend_from_slice(&self.value);
|
||||||
|
|
||||||
|
// Add children
|
||||||
|
write_u16(&mut buffer, self.children.len() as u16);
|
||||||
|
for child in &self.children {
|
||||||
|
write_string(&mut buffer, &child.key_part);
|
||||||
|
write_u32(&mut buffer, child.node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add leaf flag
|
||||||
|
buffer.push(if self.is_leaf { 1 } else { 0 });
|
||||||
|
|
||||||
|
buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes bytes to a node.
|
||||||
|
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
|
||||||
|
if data.is_empty() {
|
||||||
|
return Err(Error::Deserialization("Empty data".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut cursor = Cursor::new(data);
|
||||||
|
|
||||||
|
// Read and verify version
|
||||||
|
let mut version_byte = [0u8; 1];
|
||||||
|
cursor.read_exact(&mut version_byte)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read version byte: {}", e)))?;
|
||||||
|
|
||||||
|
if version_byte[0] != VERSION {
|
||||||
|
return Err(Error::Deserialization(
|
||||||
|
format!("Invalid version byte: expected {}, got {}", VERSION, version_byte[0])
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read key segment
|
||||||
|
let key_segment = read_string(&mut cursor)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read key segment: {}", e)))?;
|
||||||
|
|
||||||
|
// Read value as []u8
|
||||||
|
let value_len = read_u16(&mut cursor)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read value length: {}", e)))?;
|
||||||
|
|
||||||
|
let mut value = vec![0u8; value_len as usize];
|
||||||
|
cursor.read_exact(&mut value)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read value: {}", e)))?;
|
||||||
|
|
||||||
|
// Read children
|
||||||
|
let children_len = read_u16(&mut cursor)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read children length: {}", e)))?;
|
||||||
|
|
||||||
|
let mut children = Vec::with_capacity(children_len as usize);
|
||||||
|
for _ in 0..children_len {
|
||||||
|
let key_part = read_string(&mut cursor)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read child key part: {}", e)))?;
|
||||||
|
|
||||||
|
let node_id = read_u32(&mut cursor)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read child node ID: {}", e)))?;
|
||||||
|
|
||||||
|
children.push(NodeRef {
|
||||||
|
key_part,
|
||||||
|
node_id,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read leaf flag
|
||||||
|
let mut is_leaf_byte = [0u8; 1];
|
||||||
|
cursor.read_exact(&mut is_leaf_byte)
|
||||||
|
.map_err(|e| Error::Deserialization(format!("Failed to read leaf flag: {}", e)))?;
|
||||||
|
|
||||||
|
let is_leaf = is_leaf_byte[0] == 1;
|
||||||
|
|
||||||
|
Ok(Node {
|
||||||
|
key_segment,
|
||||||
|
value,
|
||||||
|
children,
|
||||||
|
is_leaf,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions for serialization
|
||||||
|
|
||||||
|
fn write_string(buffer: &mut Vec<u8>, s: &str) {
|
||||||
|
let bytes = s.as_bytes();
|
||||||
|
write_u16(buffer, bytes.len() as u16);
|
||||||
|
buffer.extend_from_slice(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_u16(buffer: &mut Vec<u8>, value: u16) {
|
||||||
|
buffer.extend_from_slice(&value.to_le_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_u32(buffer: &mut Vec<u8>, value: u32) {
|
||||||
|
buffer.extend_from_slice(&value.to_le_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions for deserialization
|
||||||
|
|
||||||
|
fn read_string(cursor: &mut Cursor<&[u8]>) -> std::io::Result<String> {
|
||||||
|
let len = read_u16(cursor)? as usize;
|
||||||
|
let mut bytes = vec![0u8; len];
|
||||||
|
cursor.read_exact(&mut bytes)?;
|
||||||
|
|
||||||
|
String::from_utf8(bytes)
|
||||||
|
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_u16(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u16> {
|
||||||
|
let mut bytes = [0u8; size_of::<u16>()];
|
||||||
|
cursor.read_exact(&mut bytes)?;
|
||||||
|
|
||||||
|
Ok(u16::from_le_bytes(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_u32(cursor: &mut Cursor<&[u8]>) -> std::io::Result<u32> {
|
||||||
|
let mut bytes = [0u8; size_of::<u32>()];
|
||||||
|
cursor.read_exact(&mut bytes)?;
|
||||||
|
|
||||||
|
Ok(u32::from_le_bytes(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to get the common prefix of two strings.
|
||||||
|
pub fn get_common_prefix(a: &str, b: &str) -> String {
|
||||||
|
let mut i = 0;
|
||||||
|
let a_bytes = a.as_bytes();
|
||||||
|
let b_bytes = b.as_bytes();
|
||||||
|
|
||||||
|
while i < a.len() && i < b.len() && a_bytes[i] == b_bytes[i] {
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
a[..i].to_string()
|
||||||
|
}
|
||||||
144
radixtree/tests/basic_test.rs
Normal file
144
radixtree/tests/basic_test.rs
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic_operations() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Test setting and getting values
|
||||||
|
let key = "test_key";
|
||||||
|
let value = b"test_value".to_vec();
|
||||||
|
tree.set(key, value.clone())?;
|
||||||
|
|
||||||
|
let retrieved_value = tree.get(key)?;
|
||||||
|
assert_eq!(retrieved_value, value);
|
||||||
|
|
||||||
|
// Test updating a value
|
||||||
|
let new_value = b"updated_value".to_vec();
|
||||||
|
tree.update(key, new_value.clone())?;
|
||||||
|
|
||||||
|
let updated_value = tree.get(key)?;
|
||||||
|
assert_eq!(updated_value, new_value);
|
||||||
|
|
||||||
|
// Test deleting a value
|
||||||
|
tree.delete(key)?;
|
||||||
|
|
||||||
|
// Trying to get a deleted key should return an error
|
||||||
|
let result = tree.get(key);
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_key() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Test setting and getting empty key
|
||||||
|
let key = "";
|
||||||
|
let value = b"value_for_empty_key".to_vec();
|
||||||
|
tree.set(key, value.clone())?;
|
||||||
|
|
||||||
|
let retrieved_value = tree.get(key)?;
|
||||||
|
assert_eq!(retrieved_value, value);
|
||||||
|
|
||||||
|
// Test deleting empty key
|
||||||
|
tree.delete(key)?;
|
||||||
|
|
||||||
|
// Trying to get a deleted key should return an error
|
||||||
|
let result = tree.get(key);
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_keys() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Insert multiple keys
|
||||||
|
let test_data = [
|
||||||
|
("key1", b"value1".to_vec()),
|
||||||
|
("key2", b"value2".to_vec()),
|
||||||
|
("key3", b"value3".to_vec()),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (key, value) in &test_data {
|
||||||
|
tree.set(key, value.clone())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all keys can be retrieved
|
||||||
|
for (key, expected_value) in &test_data {
|
||||||
|
let retrieved_value = tree.get(key)?;
|
||||||
|
assert_eq!(&retrieved_value, expected_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_shared_prefixes() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Insert keys with shared prefixes
|
||||||
|
let test_data = [
|
||||||
|
("test", b"value_test".to_vec()),
|
||||||
|
("testing", b"value_testing".to_vec()),
|
||||||
|
("tested", b"value_tested".to_vec()),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (key, value) in &test_data {
|
||||||
|
tree.set(key, value.clone())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all keys can be retrieved
|
||||||
|
for (key, expected_value) in &test_data {
|
||||||
|
let retrieved_value = tree.get(key)?;
|
||||||
|
assert_eq!(&retrieved_value, expected_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_persistence() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree and add some data
|
||||||
|
{
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
tree.set("persistent_key", b"persistent_value".to_vec())?;
|
||||||
|
} // Tree is dropped here
|
||||||
|
|
||||||
|
// Create a new tree instance with the same path
|
||||||
|
{
|
||||||
|
let mut tree = RadixTree::new(db_path, false)?;
|
||||||
|
let value = tree.get("persistent_key")?;
|
||||||
|
assert_eq!(value, b"persistent_value".to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
153
radixtree/tests/getall_test.rs
Normal file
153
radixtree/tests/getall_test.rs
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_getall() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Set up test data with common prefixes
|
||||||
|
let test_data: HashMap<&str, &str> = [
|
||||||
|
("user_1", "data1"),
|
||||||
|
("user_2", "data2"),
|
||||||
|
("user_3", "data3"),
|
||||||
|
("admin_1", "admin_data1"),
|
||||||
|
("admin_2", "admin_data2"),
|
||||||
|
("guest", "guest_data"),
|
||||||
|
].iter().cloned().collect();
|
||||||
|
|
||||||
|
// Set all test data
|
||||||
|
for (key, value) in &test_data {
|
||||||
|
tree.set(key, value.as_bytes().to_vec())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test getall with 'user_' prefix
|
||||||
|
let user_values = tree.getall("user_")?;
|
||||||
|
|
||||||
|
// Should return 3 values
|
||||||
|
assert_eq!(user_values.len(), 3);
|
||||||
|
|
||||||
|
// Convert byte arrays to strings for easier comparison
|
||||||
|
let user_value_strings: Vec<String> = user_values
|
||||||
|
.iter()
|
||||||
|
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check all expected values are present
|
||||||
|
assert!(user_value_strings.contains(&"data1".to_string()));
|
||||||
|
assert!(user_value_strings.contains(&"data2".to_string()));
|
||||||
|
assert!(user_value_strings.contains(&"data3".to_string()));
|
||||||
|
|
||||||
|
// Test getall with 'admin_' prefix
|
||||||
|
let admin_values = tree.getall("admin_")?;
|
||||||
|
|
||||||
|
// Should return 2 values
|
||||||
|
assert_eq!(admin_values.len(), 2);
|
||||||
|
|
||||||
|
// Convert byte arrays to strings for easier comparison
|
||||||
|
let admin_value_strings: Vec<String> = admin_values
|
||||||
|
.iter()
|
||||||
|
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check all expected values are present
|
||||||
|
assert!(admin_value_strings.contains(&"admin_data1".to_string()));
|
||||||
|
assert!(admin_value_strings.contains(&"admin_data2".to_string()));
|
||||||
|
|
||||||
|
// Test getall with empty prefix (should return all values)
|
||||||
|
let all_values = tree.getall("")?;
|
||||||
|
|
||||||
|
// Should return all 6 values
|
||||||
|
assert_eq!(all_values.len(), test_data.len());
|
||||||
|
|
||||||
|
// Test getall with non-existent prefix
|
||||||
|
let non_existent_values = tree.getall("xyz")?;
|
||||||
|
|
||||||
|
// Should return empty array
|
||||||
|
assert_eq!(non_existent_values.len(), 0);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_getall_with_updates() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Set initial values
|
||||||
|
tree.set("key1", b"value1".to_vec())?;
|
||||||
|
tree.set("key2", b"value2".to_vec())?;
|
||||||
|
tree.set("key3", b"value3".to_vec())?;
|
||||||
|
|
||||||
|
// Get initial values
|
||||||
|
let initial_values = tree.getall("key")?;
|
||||||
|
assert_eq!(initial_values.len(), 3);
|
||||||
|
|
||||||
|
// Update a value
|
||||||
|
tree.update("key2", b"updated_value2".to_vec())?;
|
||||||
|
|
||||||
|
// Get values after update
|
||||||
|
let updated_values = tree.getall("key")?;
|
||||||
|
assert_eq!(updated_values.len(), 3);
|
||||||
|
|
||||||
|
// Convert to strings for easier comparison
|
||||||
|
let updated_value_strings: Vec<String> = updated_values
|
||||||
|
.iter()
|
||||||
|
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check the updated value is present
|
||||||
|
assert!(updated_value_strings.contains(&"value1".to_string()));
|
||||||
|
assert!(updated_value_strings.contains(&"updated_value2".to_string()));
|
||||||
|
assert!(updated_value_strings.contains(&"value3".to_string()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_getall_with_deletions() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Set initial values
|
||||||
|
tree.set("prefix_1", b"value1".to_vec())?;
|
||||||
|
tree.set("prefix_2", b"value2".to_vec())?;
|
||||||
|
tree.set("prefix_3", b"value3".to_vec())?;
|
||||||
|
tree.set("other", b"other_value".to_vec())?;
|
||||||
|
|
||||||
|
// Get initial values
|
||||||
|
let initial_values = tree.getall("prefix_")?;
|
||||||
|
assert_eq!(initial_values.len(), 3);
|
||||||
|
|
||||||
|
// Delete a key
|
||||||
|
tree.delete("prefix_2")?;
|
||||||
|
|
||||||
|
// Get values after deletion
|
||||||
|
let after_delete_values = tree.getall("prefix_")?;
|
||||||
|
assert_eq!(after_delete_values.len(), 2);
|
||||||
|
|
||||||
|
// Convert to strings for easier comparison
|
||||||
|
let after_delete_strings: Vec<String> = after_delete_values
|
||||||
|
.iter()
|
||||||
|
.map(|v| String::from_utf8_lossy(v).to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check the remaining values
|
||||||
|
assert!(after_delete_strings.contains(&"value1".to_string()));
|
||||||
|
assert!(after_delete_strings.contains(&"value3".to_string()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
185
radixtree/tests/prefix_test.rs
Normal file
185
radixtree/tests/prefix_test.rs
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
use radixtree::RadixTree;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Insert keys with various prefixes
|
||||||
|
let test_data: HashMap<&str, &str> = [
|
||||||
|
("apple", "fruit1"),
|
||||||
|
("application", "software1"),
|
||||||
|
("apply", "verb1"),
|
||||||
|
("banana", "fruit2"),
|
||||||
|
("ball", "toy1"),
|
||||||
|
("cat", "animal1"),
|
||||||
|
("car", "vehicle1"),
|
||||||
|
("cargo", "shipping1"),
|
||||||
|
].iter().cloned().collect();
|
||||||
|
|
||||||
|
// Set all test data
|
||||||
|
for (key, value) in &test_data {
|
||||||
|
tree.set(key, value.as_bytes().to_vec())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test prefix 'app' - should return apple, application, apply
|
||||||
|
let app_keys = tree.list("app")?;
|
||||||
|
assert_eq!(app_keys.len(), 3);
|
||||||
|
assert!(app_keys.contains(&"apple".to_string()));
|
||||||
|
assert!(app_keys.contains(&"application".to_string()));
|
||||||
|
assert!(app_keys.contains(&"apply".to_string()));
|
||||||
|
|
||||||
|
// Test prefix 'ba' - should return banana, ball
|
||||||
|
let ba_keys = tree.list("ba")?;
|
||||||
|
assert_eq!(ba_keys.len(), 2);
|
||||||
|
assert!(ba_keys.contains(&"banana".to_string()));
|
||||||
|
assert!(ba_keys.contains(&"ball".to_string()));
|
||||||
|
|
||||||
|
// Test prefix 'car' - should return car, cargo
|
||||||
|
let car_keys = tree.list("car")?;
|
||||||
|
assert_eq!(car_keys.len(), 2);
|
||||||
|
assert!(car_keys.contains(&"car".to_string()));
|
||||||
|
assert!(car_keys.contains(&"cargo".to_string()));
|
||||||
|
|
||||||
|
// Test prefix 'z' - should return empty list
|
||||||
|
let z_keys = tree.list("z")?;
|
||||||
|
assert_eq!(z_keys.len(), 0);
|
||||||
|
|
||||||
|
// Test empty prefix - should return all keys
|
||||||
|
let all_keys = tree.list("")?;
|
||||||
|
assert_eq!(all_keys.len(), test_data.len());
|
||||||
|
for key in test_data.keys() {
|
||||||
|
assert!(all_keys.contains(&key.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test exact key as prefix - should return just that key
|
||||||
|
let exact_key = tree.list("apple")?;
|
||||||
|
assert_eq!(exact_key.len(), 1);
|
||||||
|
assert_eq!(exact_key[0], "apple");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_with_deletion() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Set keys with common prefixes
|
||||||
|
tree.set("test1", b"value1".to_vec())?;
|
||||||
|
tree.set("test2", b"value2".to_vec())?;
|
||||||
|
tree.set("test3", b"value3".to_vec())?;
|
||||||
|
tree.set("other", b"value4".to_vec())?;
|
||||||
|
|
||||||
|
// Initial check
|
||||||
|
let test_keys = tree.list("test")?;
|
||||||
|
assert_eq!(test_keys.len(), 3);
|
||||||
|
assert!(test_keys.contains(&"test1".to_string()));
|
||||||
|
assert!(test_keys.contains(&"test2".to_string()));
|
||||||
|
assert!(test_keys.contains(&"test3".to_string()));
|
||||||
|
|
||||||
|
// Delete one key
|
||||||
|
tree.delete("test2")?;
|
||||||
|
|
||||||
|
// Check after deletion
|
||||||
|
let test_keys_after = tree.list("test")?;
|
||||||
|
assert_eq!(test_keys_after.len(), 2);
|
||||||
|
assert!(test_keys_after.contains(&"test1".to_string()));
|
||||||
|
assert!(!test_keys_after.contains(&"test2".to_string()));
|
||||||
|
assert!(test_keys_after.contains(&"test3".to_string()));
|
||||||
|
|
||||||
|
// Check all keys
|
||||||
|
let all_keys = tree.list("")?;
|
||||||
|
assert_eq!(all_keys.len(), 3);
|
||||||
|
assert!(all_keys.contains(&"other".to_string()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_edge_cases() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Test with empty tree
|
||||||
|
let empty_result = tree.list("any")?;
|
||||||
|
assert_eq!(empty_result.len(), 0);
|
||||||
|
|
||||||
|
// Set a single key
|
||||||
|
tree.set("single", b"value".to_vec())?;
|
||||||
|
|
||||||
|
// Test with prefix that's longer than any key
|
||||||
|
let long_prefix = tree.list("singlelonger")?;
|
||||||
|
assert_eq!(long_prefix.len(), 0);
|
||||||
|
|
||||||
|
// Test with partial prefix match
|
||||||
|
let partial = tree.list("sing")?;
|
||||||
|
assert_eq!(partial.len(), 1);
|
||||||
|
assert_eq!(partial[0], "single");
|
||||||
|
|
||||||
|
// Test with very long keys
|
||||||
|
let long_key1 = "a".repeat(100) + "key1";
|
||||||
|
let long_key2 = "a".repeat(100) + "key2";
|
||||||
|
|
||||||
|
tree.set(&long_key1, b"value1".to_vec())?;
|
||||||
|
tree.set(&long_key2, b"value2".to_vec())?;
|
||||||
|
|
||||||
|
let long_prefix_result = tree.list(&"a".repeat(100))?;
|
||||||
|
assert_eq!(long_prefix_result.len(), 2);
|
||||||
|
assert!(long_prefix_result.contains(&long_key1));
|
||||||
|
assert!(long_prefix_result.contains(&long_key2));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_performance() -> Result<(), radixtree::Error> {
|
||||||
|
// Create a temporary directory for the test
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp directory");
|
||||||
|
let db_path = temp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a new radix tree
|
||||||
|
let mut tree = RadixTree::new(db_path, true)?;
|
||||||
|
|
||||||
|
// Insert a large number of keys with different prefixes
|
||||||
|
let prefixes = ["user", "post", "comment", "like", "share"];
|
||||||
|
|
||||||
|
// Set 100 keys for each prefix (500 total)
|
||||||
|
for prefix in &prefixes {
|
||||||
|
for i in 0..100 {
|
||||||
|
let key = format!("{}_{}", prefix, i);
|
||||||
|
tree.set(&key, format!("value_{}", key).as_bytes().to_vec())?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test retrieving by each prefix
|
||||||
|
for prefix in &prefixes {
|
||||||
|
let keys = tree.list(prefix)?;
|
||||||
|
assert_eq!(keys.len(), 100);
|
||||||
|
|
||||||
|
// Verify all keys have the correct prefix
|
||||||
|
for key in &keys {
|
||||||
|
assert!(key.starts_with(prefix));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test retrieving all keys
|
||||||
|
let all_keys = tree.list("")?;
|
||||||
|
assert_eq!(all_keys.len(), 500);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
180
radixtree/tests/serialize_test.rs
Normal file
180
radixtree/tests/serialize_test.rs
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
use radixtree::{Node, NodeRef};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_serialization() {
|
||||||
|
// Create a node with some data
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "test".to_string(),
|
||||||
|
value: b"test_value".to_vec(),
|
||||||
|
children: vec![
|
||||||
|
NodeRef {
|
||||||
|
key_part: "child1".to_string(),
|
||||||
|
node_id: 1,
|
||||||
|
},
|
||||||
|
NodeRef {
|
||||||
|
key_part: "child2".to_string(),
|
||||||
|
node_id: 2,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Serialize the node
|
||||||
|
let serialized = node.serialize();
|
||||||
|
|
||||||
|
// Deserialize the node
|
||||||
|
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||||
|
|
||||||
|
// Verify the deserialized node matches the original
|
||||||
|
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||||
|
assert_eq!(deserialized.value, node.value);
|
||||||
|
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||||
|
assert_eq!(deserialized.children.len(), node.children.len());
|
||||||
|
|
||||||
|
for (i, child) in node.children.iter().enumerate() {
|
||||||
|
assert_eq!(deserialized.children[i].key_part, child.key_part);
|
||||||
|
assert_eq!(deserialized.children[i].node_id, child.node_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_node_serialization() {
|
||||||
|
// Create an empty node
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "".to_string(),
|
||||||
|
value: vec![],
|
||||||
|
children: vec![],
|
||||||
|
is_leaf: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Serialize the node
|
||||||
|
let serialized = node.serialize();
|
||||||
|
|
||||||
|
// Deserialize the node
|
||||||
|
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||||
|
|
||||||
|
// Verify the deserialized node matches the original
|
||||||
|
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||||
|
assert_eq!(deserialized.value, node.value);
|
||||||
|
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||||
|
assert_eq!(deserialized.children.len(), node.children.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_with_many_children() {
|
||||||
|
// Create a node with many children
|
||||||
|
let mut children = Vec::new();
|
||||||
|
for i in 0..100 {
|
||||||
|
children.push(NodeRef {
|
||||||
|
key_part: format!("child{}", i),
|
||||||
|
node_id: i as u32,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "parent".to_string(),
|
||||||
|
value: b"parent_value".to_vec(),
|
||||||
|
children,
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Serialize the node
|
||||||
|
let serialized = node.serialize();
|
||||||
|
|
||||||
|
// Deserialize the node
|
||||||
|
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||||
|
|
||||||
|
// Verify the deserialized node matches the original
|
||||||
|
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||||
|
assert_eq!(deserialized.value, node.value);
|
||||||
|
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||||
|
assert_eq!(deserialized.children.len(), node.children.len());
|
||||||
|
|
||||||
|
for (i, child) in node.children.iter().enumerate() {
|
||||||
|
assert_eq!(deserialized.children[i].key_part, child.key_part);
|
||||||
|
assert_eq!(deserialized.children[i].node_id, child.node_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_with_large_value() {
|
||||||
|
// Create a node with a large value
|
||||||
|
let large_value = vec![0u8; 4096]; // 4KB value
|
||||||
|
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "large_value".to_string(),
|
||||||
|
value: large_value.clone(),
|
||||||
|
children: vec![],
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Serialize the node
|
||||||
|
let serialized = node.serialize();
|
||||||
|
|
||||||
|
// Deserialize the node
|
||||||
|
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||||
|
|
||||||
|
// Verify the deserialized node matches the original
|
||||||
|
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||||
|
assert_eq!(deserialized.value, node.value);
|
||||||
|
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||||
|
assert_eq!(deserialized.children.len(), node.children.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_version_compatibility() {
|
||||||
|
// This test ensures that the serialization format is compatible with version 1
|
||||||
|
|
||||||
|
// Create a node
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "test".to_string(),
|
||||||
|
value: b"test_value".to_vec(),
|
||||||
|
children: vec![
|
||||||
|
NodeRef {
|
||||||
|
key_part: "child".to_string(),
|
||||||
|
node_id: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Serialize the node
|
||||||
|
let serialized = node.serialize();
|
||||||
|
|
||||||
|
// Verify the first byte is the version byte (1)
|
||||||
|
assert_eq!(serialized[0], 1);
|
||||||
|
|
||||||
|
// Deserialize the node
|
||||||
|
let deserialized = Node::deserialize(&serialized).expect("Failed to deserialize node");
|
||||||
|
|
||||||
|
// Verify the deserialized node matches the original
|
||||||
|
assert_eq!(deserialized.key_segment, node.key_segment);
|
||||||
|
assert_eq!(deserialized.value, node.value);
|
||||||
|
assert_eq!(deserialized.is_leaf, node.is_leaf);
|
||||||
|
assert_eq!(deserialized.children.len(), node.children.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_invalid_serialization() {
|
||||||
|
// Test with empty data
|
||||||
|
let result = Node::deserialize(&[]);
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
// Test with invalid version
|
||||||
|
let result = Node::deserialize(&[2, 0, 0, 0, 0]);
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
// Test with truncated data
|
||||||
|
let node = Node {
|
||||||
|
key_segment: "test".to_string(),
|
||||||
|
value: b"test_value".to_vec(),
|
||||||
|
children: vec![],
|
||||||
|
is_leaf: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let serialized = node.serialize();
|
||||||
|
let truncated = &serialized[0..serialized.len() / 2];
|
||||||
|
|
||||||
|
let result = Node::deserialize(truncated);
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
@@ -1,345 +0,0 @@
|
|||||||
|
|
||||||
### 2.1 Accounts
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity (non-negative), unique account id
|
|
||||||
* **pubkey**: `BYTEA` unique public key for signing/encryption
|
|
||||||
* **display\_name**: `TEXT` (optional)
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
### 2.2 Currencies
|
|
||||||
|
|
||||||
* **asset\_code**: `TEXT` PK (e.g., `USDC-ETH`, `EUR`, `LND`)
|
|
||||||
* **name**: `TEXT`
|
|
||||||
* **symbol**: `TEXT`
|
|
||||||
* **decimals**: `INT` (default 2)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 3) Services & Groups
|
|
||||||
|
|
||||||
### 3.1 Services
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **name**: `TEXT` unique
|
|
||||||
* **description**: `TEXT`
|
|
||||||
* **default\_billing\_mode**: `ENUM('per_second','per_request')`
|
|
||||||
* **default\_price**: `NUMERIC(38,18)` (≥0)
|
|
||||||
* **default\_currency**: FK → `currencies(asset_code)`
|
|
||||||
* **max\_request\_seconds**: `INT` (>0 or `NULL`)
|
|
||||||
* **schema\_heroscript**: `TEXT`
|
|
||||||
* **schema\_json**: `JSONB`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
#### Accepted Currencies (per service)
|
|
||||||
|
|
||||||
* **service\_id**: FK → `services(id)`
|
|
||||||
* **asset\_code**: FK → `currencies(asset_code)`
|
|
||||||
* **price\_override**: `NUMERIC(38,18)` (optional)
|
|
||||||
* **billing\_mode\_override**: `ENUM` (optional)
|
|
||||||
Primary key: `(service_id, asset_code)`
|
|
||||||
|
|
||||||
### 3.2 Service Groups
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **name**: `TEXT` unique
|
|
||||||
* **description**: `TEXT`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
#### Group Memberships
|
|
||||||
|
|
||||||
* **group\_id**: FK → `service_groups(id)`
|
|
||||||
* **service\_id**: FK → `services(id)`
|
|
||||||
Primary key: `(group_id, service_id)`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4) Providers & Runners
|
|
||||||
|
|
||||||
### 4.1 Service Providers
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **account\_id**: FK → `accounts(id)` (the owning account)
|
|
||||||
* **name**: `TEXT` unique
|
|
||||||
* **description**: `TEXT`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
#### Providers Offer Groups
|
|
||||||
|
|
||||||
* **provider\_id**: FK → `service_providers(id)`
|
|
||||||
* **group\_id**: FK → `service_groups(id)`
|
|
||||||
Primary key: `(provider_id, group_id)`
|
|
||||||
|
|
||||||
#### Provider Pricing Overrides (optional)
|
|
||||||
|
|
||||||
* **provider\_id**: FK → `service_providers(id)`
|
|
||||||
* **service\_id**: FK → `services(id)`
|
|
||||||
* **asset\_code**: FK → `currencies(asset_code)` (nullable for currency-agnostic override)
|
|
||||||
* **price\_override**: `NUMERIC(38,18)` (optional)
|
|
||||||
* **billing\_mode\_override**: `ENUM` (optional)
|
|
||||||
* **max\_request\_seconds\_override**: `INT` (optional)
|
|
||||||
Primary key: `(provider_id, service_id, asset_code)`
|
|
||||||
|
|
||||||
### 4.2 Runners
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **address**: `INET` (must be IPv6)
|
|
||||||
* **name**: `TEXT`
|
|
||||||
* **description**: `TEXT`
|
|
||||||
* **pubkey**: `BYTEA` (optional)
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
#### Runner Ownership (many-to-many)
|
|
||||||
|
|
||||||
* **runner\_id**: FK → `runners(id)`
|
|
||||||
* **provider\_id**: FK → `service_providers(id)`
|
|
||||||
Primary key: `(runner_id, provider_id)`
|
|
||||||
|
|
||||||
#### Routing (provider → service/service\_group → runners)
|
|
||||||
|
|
||||||
* **provider\_service\_runners**: `(provider_id, service_id, runner_id)` PK
|
|
||||||
* **provider\_service\_group\_runners**: `(provider_id, group_id, runner_id)` PK
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 5) Subscriptions & Spend Control
|
|
||||||
|
|
||||||
A subscription authorizes an **account** to use either a **service** **or** a **service group**, with optional spend limits and allowed providers.
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **account\_id**: FK → `accounts(id)`
|
|
||||||
* **service\_id** *xor* **group\_id**: FK (exactly one must be set)
|
|
||||||
* **secret**: `BYTEA` (random, provided by subscriber; recommend storing a hash)
|
|
||||||
* **subscription\_data**: `JSONB` (free-form)
|
|
||||||
* **limit\_amount**: `NUMERIC(38,18)` (optional)
|
|
||||||
* **limit\_currency**: FK → `currencies(asset_code)` (optional)
|
|
||||||
* **limit\_period**: `ENUM('hour','day','month')` (optional)
|
|
||||||
* **active**: `BOOLEAN` default `TRUE`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
#### Allowed Providers per Subscription
|
|
||||||
|
|
||||||
* **subscription\_id**: FK → `subscriptions(id)`
|
|
||||||
* **provider\_id**: FK → `service_providers(id)`
|
|
||||||
Primary key: `(subscription_id, provider_id)`
|
|
||||||
|
|
||||||
**Intended Use:**
|
|
||||||
|
|
||||||
* Subscribers bound spending by amount/currency/period.
|
|
||||||
* Merchant (provider) can claim charges for requests fulfilled under an active subscription, within limits, and only if listed in `subscription_providers`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 6) Requests & Billing
|
|
||||||
|
|
||||||
### 6.1 Request Lifecycle
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **account\_id**: FK → `accounts(id)`
|
|
||||||
* **subscription\_id**: FK → `subscriptions(id)`
|
|
||||||
* **provider\_id**: FK → `service_providers(id)`
|
|
||||||
* **service\_id**: FK → `services(id)`
|
|
||||||
* **runner\_id**: FK → `runners(id)` (nullable)
|
|
||||||
* **request\_schema**: `JSONB` (payload matching `schema_json`/`schema_heroscript`)
|
|
||||||
* **started\_at**, **ended\_at**: `TIMESTAMPTZ`
|
|
||||||
* **status**: `ENUM('pending','running','succeeded','failed','canceled')`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
### 6.2 Billing Ledger (append-only)
|
|
||||||
|
|
||||||
* **id**: `BIGINT` identity
|
|
||||||
* **account\_id**: FK → `accounts(id)`
|
|
||||||
* **provider\_id**: FK → `service_providers(id)` (nullable)
|
|
||||||
* **service\_id**: FK → `services(id)` (nullable)
|
|
||||||
* **request\_id**: FK → `requests(id)` (nullable)
|
|
||||||
* **amount**: `NUMERIC(38,18)` (debit = positive, credit/refund = negative)
|
|
||||||
* **asset\_code**: FK → `currencies(asset_code)`
|
|
||||||
* **entry\_type**: `ENUM('debit','credit','adjustment')`
|
|
||||||
* **description**: `TEXT`
|
|
||||||
* **created\_at**: `TIMESTAMPTZ`
|
|
||||||
|
|
||||||
**Balances View (example):**
|
|
||||||
|
|
||||||
* `account_balances(account_id, asset_code, balance)` as a view over `billing_ledger`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 7) Pricing Precedence
|
|
||||||
|
|
||||||
When computing the **effective** pricing, billing mode, and max duration for a `(provider, service, currency)`:
|
|
||||||
|
|
||||||
1. **Provider override for (service, asset\_code)** — if present, use it.
|
|
||||||
2. **Service accepted currency override** — if present, use it.
|
|
||||||
3. **Service defaults** — fallback.
|
|
||||||
|
|
||||||
If `billing_mode` or `max_request_seconds` are not overridden at steps (1) or (2), inherit from the next step down.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 8) Key Constraints & Validations
|
|
||||||
|
|
||||||
* All identity ids are non-negative (`CHECK (id >= 0)`).
|
|
||||||
* Runner IPv6 enforcement: `CHECK (family(address) = 6)`.
|
|
||||||
* Subscriptions must point to **exactly one** of `service_id` or `group_id`.
|
|
||||||
* Prices and limits must be non-negative if set.
|
|
||||||
* Unique natural keys where appropriate: service names, provider names, currency asset codes, account pubkeys.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 9) Mermaid Diagrams
|
|
||||||
|
|
||||||
### 9.1 Entity–Relationship Overview
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
erDiagram
|
|
||||||
ACCOUNTS ||--o{ SERVICE_PROVIDERS : "owns via account_id"
|
|
||||||
ACCOUNTS ||--o{ SUBSCRIPTIONS : has
|
|
||||||
CURRENCIES ||--o{ SERVICES : "default_currency"
|
|
||||||
CURRENCIES ||--o{ SERVICE_ACCEPTED_CURRENCIES : "asset_code"
|
|
||||||
CURRENCIES ||--o{ PROVIDER_SERVICE_OVERRIDES : "asset_code"
|
|
||||||
CURRENCIES ||--o{ BILLING_LEDGER : "asset_code"
|
|
||||||
|
|
||||||
SERVICES ||--o{ SERVICE_ACCEPTED_CURRENCIES : has
|
|
||||||
SERVICES ||--o{ SERVICE_GROUP_MEMBERS : member_of
|
|
||||||
SERVICE_GROUPS ||--o{ SERVICE_GROUP_MEMBERS : contains
|
|
||||||
|
|
||||||
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUPS : offers
|
|
||||||
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_OVERRIDES : sets
|
|
||||||
SERVICE_PROVIDERS ||--o{ RUNNER_OWNERS : owns
|
|
||||||
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_RUNNERS : routes
|
|
||||||
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : routes
|
|
||||||
|
|
||||||
RUNNERS ||--o{ RUNNER_OWNERS : owned_by
|
|
||||||
RUNNERS ||--o{ PROVIDER_SERVICE_RUNNERS : executes
|
|
||||||
RUNNERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : executes
|
|
||||||
|
|
||||||
SUBSCRIPTIONS ||--o{ SUBSCRIPTION_PROVIDERS : allow
|
|
||||||
SERVICE_PROVIDERS ||--o{ SUBSCRIPTION_PROVIDERS : allowed
|
|
||||||
|
|
||||||
REQUESTS }o--|| ACCOUNTS : by
|
|
||||||
REQUESTS }o--|| SUBSCRIPTIONS : under
|
|
||||||
REQUESTS }o--|| SERVICE_PROVIDERS : via
|
|
||||||
REQUESTS }o--|| SERVICES : for
|
|
||||||
REQUESTS }o--o{ RUNNERS : executed_by
|
|
||||||
|
|
||||||
BILLING_LEDGER }o--|| ACCOUNTS : charges
|
|
||||||
BILLING_LEDGER }o--o{ SERVICES : reference
|
|
||||||
BILLING_LEDGER }o--o{ SERVICE_PROVIDERS : reference
|
|
||||||
BILLING_LEDGER }o--o{ REQUESTS : reference
|
|
||||||
```
|
|
||||||
|
|
||||||
### 9.2 Request Flow (Happy Path)
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
autonumber
|
|
||||||
participant AC as Account
|
|
||||||
participant API as Broker/API
|
|
||||||
participant PR as Provider
|
|
||||||
participant RU as Runner
|
|
||||||
participant DB as PostgreSQL
|
|
||||||
|
|
||||||
AC->>API: Submit request (subscription_id, service_id, payload, secret)
|
|
||||||
API->>DB: Validate subscription (active, provider allowed, spend limits)
|
|
||||||
DB-->>API: OK + effective pricing (resolve precedence)
|
|
||||||
API->>PR: Dispatch request (service, payload)
|
|
||||||
PR->>DB: Select runner (provider_service_runners / group runners)
|
|
||||||
PR->>RU: Start job (payload)
|
|
||||||
RU-->>PR: Job started (started_at)
|
|
||||||
PR->>DB: Update REQUESTS (status=running, started_at)
|
|
||||||
RU-->>PR: Job finished (duration, result)
|
|
||||||
PR->>DB: Update REQUESTS (status=succeeded, ended_at)
|
|
||||||
API->>DB: Insert BILLING_LEDGER (debit per effective price)
|
|
||||||
DB-->>API: Ledger entry id
|
|
||||||
API-->>AC: Return result + charge info
|
|
||||||
```
|
|
||||||
|
|
||||||
### 9.3 Pricing Resolution
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart TD
|
|
||||||
A[Input: provider_id, service_id, asset_code] --> B{Provider override exists for (service, asset_code)?}
|
|
||||||
B -- Yes --> P1[Use provider price/mode/max]
|
|
||||||
B -- No --> C{Service accepted currency override exists?}
|
|
||||||
C -- Yes --> P2[Use service currency price/mode]
|
|
||||||
C -- No --> P3[Use service defaults]
|
|
||||||
P1 --> OUT[Effective pricing]
|
|
||||||
P2 --> OUT
|
|
||||||
P3 --> OUT
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 10) Operational Notes
|
|
||||||
|
|
||||||
* **Secrets:** store a hash (e.g., `digest(secret,'sha256')`) rather than raw `secret`. Keep the original only client-side.
|
|
||||||
* **Limits enforcement:** before insert of a debit ledger entry, compute period window (hour/day/month UTC or tenant TZ) and enforce `SUM(amount) + new_amount ≤ limit_amount`.
|
|
||||||
* **Durations:** enforce `max_request_seconds` (effective) at orchestration and/or via DB trigger on `REQUESTS` when transitioning to `running/succeeded`.
|
|
||||||
* **Routing:** prefer `provider_service_runners` when a request targets a service directly; otherwise use the union of runners from `provider_service_group_runners` for the group.
|
|
||||||
* **Balances:** serve balance queries via the `account_balances` view or a materialized cache updated by triggers/jobs.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 11) Example Effective Pricing Query (sketch)
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- Inputs: :provider_id, :service_id, :asset_code
|
|
||||||
WITH p AS (
|
|
||||||
SELECT price_override, billing_mode_override, max_request_seconds_override
|
|
||||||
FROM provider_service_overrides
|
|
||||||
WHERE provider_id = :provider_id
|
|
||||||
AND service_id = :service_id
|
|
||||||
AND (asset_code = :asset_code)
|
|
||||||
),
|
|
||||||
sac AS (
|
|
||||||
SELECT price_override, billing_mode_override
|
|
||||||
FROM service_accepted_currencies
|
|
||||||
WHERE service_id = :service_id AND asset_code = :asset_code
|
|
||||||
),
|
|
||||||
svc AS (
|
|
||||||
SELECT default_price AS price, default_billing_mode AS mode, max_request_seconds
|
|
||||||
FROM services WHERE id = :service_id
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
COALESCE(p.price_override, sac.price_override, svc.price) AS effective_price,
|
|
||||||
COALESCE(p.billing_mode_override, sac.billing_mode_override, svc.mode) AS effective_mode,
|
|
||||||
COALESCE(p.max_request_seconds_override, svc.max_request_seconds) AS effective_max_seconds;
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 12) Indices (non-exhaustive)
|
|
||||||
|
|
||||||
* `services(default_currency)`
|
|
||||||
* `service_accepted_currencies(service_id)`
|
|
||||||
* `provider_service_overrides(service_id, provider_id)`
|
|
||||||
* `requests(account_id)`, `requests(provider_id)`, `requests(service_id)`
|
|
||||||
* `billing_ledger(account_id, asset_code)`
|
|
||||||
* `subscriptions(account_id) WHERE active`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 13) Migration & Compatibility
|
|
||||||
|
|
||||||
* Prefer additive migrations (new columns/tables) to avoid downtime.
|
|
||||||
* Use `ENUM` via `CREATE TYPE`; when extending, plan for `ALTER TYPE ... ADD VALUE`.
|
|
||||||
* For high-write ledgers, consider partitioning `billing_ledger` by `created_at` (monthly) and indexing partitions.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 14) Non-Goals
|
|
||||||
|
|
||||||
* Wallet custody and on-chain settlement are out of scope.
|
|
||||||
* SLA tracking and detailed observability (metrics/log schema) are not part of this spec.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 15) Acceptance Criteria
|
|
||||||
|
|
||||||
* Can represent services, groups, and providers with currency-specific pricing.
|
|
||||||
* Can route requests to runners by service or group.
|
|
||||||
* Can authorize usage via subscriptions, enforce spend limits, and record charges.
|
|
||||||
* Can reconstruct balances and audit via append-only ledger.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**End of Spec**
|
|
||||||
@@ -1,225 +0,0 @@
|
|||||||
|
|
||||||
# Concept Note: Generic Billing & Tracking Framework
|
|
||||||
|
|
||||||
## 1) Purpose
|
|
||||||
|
|
||||||
The model is designed to support a **flexible, generic, and auditable** billing environment that can be applied across diverse services and providers — from compute time billing to per-request API usage, across multiple currencies, with dynamic provider-specific overrides.
|
|
||||||
|
|
||||||
It is **not tied to a single business domain** — the same framework can be used for:
|
|
||||||
|
|
||||||
* Cloud compute time (per second)
|
|
||||||
* API transactions (per request)
|
|
||||||
* Data transfer charges
|
|
||||||
* Managed service subscriptions
|
|
||||||
* Brokered third-party service reselling
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 2) Key Concepts
|
|
||||||
|
|
||||||
### 2.1 Accounts
|
|
||||||
|
|
||||||
An **account** represents an economic actor in the system — typically a customer or a service provider.
|
|
||||||
|
|
||||||
* Identified by a **public key** (for authentication & cryptographic signing).
|
|
||||||
* Every billing action traces back to an account.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.2 Currencies & Asset Codes
|
|
||||||
|
|
||||||
The system supports **multiple currencies** (crypto or fiat) via **asset codes**.
|
|
||||||
|
|
||||||
* Asset codes identify the unit of billing (e.g. `USDC-ETH`, `EUR`, `LND`).
|
|
||||||
* Currencies are **decoupled from services** so you can add or remove supported assets at any time.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.3 Services & Groups
|
|
||||||
|
|
||||||
* **Service** = a billable offering (e.g., "Speech-to-Text", "VM Hosting").
|
|
||||||
|
|
||||||
* Has a **billing mode** (`per_second` or `per_request`).
|
|
||||||
* Has a **default price** and **default currency**.
|
|
||||||
* Supports **multiple accepted currencies** with optional per-currency pricing overrides.
|
|
||||||
* Has execution constraints (e.g. `max_request_seconds`).
|
|
||||||
* Includes structured schemas for request payloads.
|
|
||||||
|
|
||||||
* **Service Group** = a logical grouping of services.
|
|
||||||
|
|
||||||
* Groups make it easy to **bundle related services** and manage them together.
|
|
||||||
* Providers can offer entire groups rather than individual services.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.4 Service Providers
|
|
||||||
|
|
||||||
A **service provider** is an **account** that offers services or service groups.
|
|
||||||
They can:
|
|
||||||
|
|
||||||
* Override **pricing** for their offered services (per currency).
|
|
||||||
* Route requests to their own **runners** (execution agents).
|
|
||||||
* Manage multiple **service groups** under one provider identity.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.5 Runners
|
|
||||||
|
|
||||||
A **runner** is an execution agent — a node, VM, or service endpoint that can fulfill requests.
|
|
||||||
|
|
||||||
* Identified by an **IPv6 address** (supports Mycelium or other overlay networks).
|
|
||||||
* Can be owned by one or multiple providers.
|
|
||||||
* Providers map **services/groups → runners** to define routing.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.6 Subscriptions
|
|
||||||
|
|
||||||
A **subscription** is **the authorization mechanism** for usage and spending control:
|
|
||||||
|
|
||||||
* Links an **account** to a **service** or **service group**.
|
|
||||||
* Defines **spending limits** (amount, currency, period: hour/day/month).
|
|
||||||
* Restricts which **providers** are allowed to serve the subscription.
|
|
||||||
* Uses a **secret** chosen by the subscriber — providers use this to claim charges.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.7 Requests
|
|
||||||
|
|
||||||
A **request** represents a single execution under a subscription:
|
|
||||||
|
|
||||||
* Tied to **account**, **subscription**, **provider**, **service**, and optionally **runner**.
|
|
||||||
* Has **status** (`pending`, `running`, `succeeded`, `failed`, `canceled`).
|
|
||||||
* Records start/end times for duration-based billing.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2.8 Billing Ledger
|
|
||||||
|
|
||||||
The **ledger** is **append-only** — the source of truth for all charges and credits.
|
|
||||||
|
|
||||||
* Each entry records:
|
|
||||||
|
|
||||||
* `amount` (positive = debit, negative = credit/refund)
|
|
||||||
* `asset_code`
|
|
||||||
* Links to `account`, `provider`, `service`, and/or `request`
|
|
||||||
* From the ledger, **balances** can be reconstructed at any time.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 3) How Billing Works — Step by Step
|
|
||||||
|
|
||||||
### 3.1 Setup
|
|
||||||
|
|
||||||
1. **Define services** with default pricing & schemas.
|
|
||||||
2. **Define currencies** and accepted currencies for services.
|
|
||||||
3. **Group services** into service groups.
|
|
||||||
4. **Onboard providers** (accounts) and associate them with service groups.
|
|
||||||
5. **Assign runners** to services or groups for execution routing.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3.2 Subscription Creation
|
|
||||||
|
|
||||||
1. Customer **creates a subscription**:
|
|
||||||
|
|
||||||
* Chooses service or service group.
|
|
||||||
* Sets **spending limit** (amount, currency, period).
|
|
||||||
* Chooses **secret**.
|
|
||||||
* Selects **allowed providers**.
|
|
||||||
2. Subscription is stored in DB.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3.3 Request Execution
|
|
||||||
|
|
||||||
1. Customer sends a request to broker/API with:
|
|
||||||
|
|
||||||
* `subscription_id`
|
|
||||||
* Target `service_id`
|
|
||||||
* Payload + signature using account pubkey.
|
|
||||||
2. Broker:
|
|
||||||
|
|
||||||
* Validates **subscription active**.
|
|
||||||
* Validates **provider allowed**.
|
|
||||||
* Checks **spend limit** hasn’t been exceeded for current period.
|
|
||||||
* Resolves **effective price** via:
|
|
||||||
|
|
||||||
1. Provider override (currency-specific)
|
|
||||||
2. Service accepted currency override
|
|
||||||
3. Service default
|
|
||||||
3. Broker selects **runner** from provider’s routing tables.
|
|
||||||
4. Runner executes request and returns result.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3.4 Billing Entry
|
|
||||||
|
|
||||||
1. When the request completes:
|
|
||||||
|
|
||||||
* If `per_second` mode → calculate `duration × rate`.
|
|
||||||
* If `per_request` mode → apply flat rate.
|
|
||||||
2. Broker **inserts ledger entry**:
|
|
||||||
|
|
||||||
* Debit from customer account.
|
|
||||||
* Credit to provider account (can be separate entries or aggregated).
|
|
||||||
3. Ledger is append-only — historical billing cannot be altered.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3.5 Balance & Tracking
|
|
||||||
|
|
||||||
* **Current balances** are a sum of all ledger entries per account+currency.
|
|
||||||
* Spend limits are enforced by **querying the ledger** for the current period before each charge.
|
|
||||||
* Audit trails are guaranteed via immutable ledger entries.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4) Why This is Generic & Reusable
|
|
||||||
|
|
||||||
This design **decouples**:
|
|
||||||
|
|
||||||
* **Service definition** from **provider pricing** → multiple providers can sell the same service at different rates.
|
|
||||||
* **Execution agents** (runners) from **service definitions** → easy scaling or outsourcing of execution.
|
|
||||||
* **Billing rules** (per-second vs per-request) from **subscription limits** → same service can be sold in different billing modes.
|
|
||||||
* **Currencies** from the service → enabling multi-asset billing without changing the service definition.
|
|
||||||
|
|
||||||
Because of these separations, you can:
|
|
||||||
|
|
||||||
* Reuse the model for **compute**, **APIs**, **storage**, **SaaS features**, etc.
|
|
||||||
* Plug in different **payment backends** (on-chain, centralized payment processor, prepaid balance).
|
|
||||||
* Use the same model for **internal cost allocation** or **external customer billing**.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 5) Potential Extensions
|
|
||||||
|
|
||||||
* **Prepaid model**: enforce that ledger debits can’t exceed balance.
|
|
||||||
* **On-chain settlement**: periodically export ledger entries to blockchain transactions.
|
|
||||||
* **Discount models**: percentage or fixed-amount discounts per subscription.
|
|
||||||
* **Usage analytics**: aggregate requests/billing by time period, provider, or service.
|
|
||||||
* **SLAs**: link billing adjustments to performance metrics in requests.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 6) Conceptual Diagram — Billing Flow
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant C as Customer Account
|
|
||||||
participant B as Broker/API
|
|
||||||
participant P as Provider
|
|
||||||
participant R as Runner
|
|
||||||
participant DB as Ledger DB
|
|
||||||
|
|
||||||
C->>B: Request(service, subscription, payload, secret)
|
|
||||||
B->>DB: Validate subscription & spend limit
|
|
||||||
DB-->>B: OK + effective pricing
|
|
||||||
B->>P: Forward request
|
|
||||||
P->>R: Execute request
|
|
||||||
R-->>P: Result + execution time
|
|
||||||
P->>B: Return result
|
|
||||||
B->>DB: Insert debit (customer) + credit (provider)
|
|
||||||
DB-->>B: Ledger updated
|
|
||||||
B-->>C: Return result + charge info
|
|
||||||
```
|
|
||||||
@@ -1,234 +0,0 @@
|
|||||||
-- Enable useful extensions (optional)
|
|
||||||
CREATE EXTENSION IF NOT EXISTS pgcrypto; -- for digests/hashes if you want
|
|
||||||
CREATE EXTENSION IF NOT EXISTS btree_gist; -- for exclusion/partial indexes
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Core: Accounts & Currency
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
CREATE TABLE accounts (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
pubkey BYTEA NOT NULL UNIQUE,
|
|
||||||
display_name TEXT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
CHECK (id >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE currencies (
|
|
||||||
asset_code TEXT PRIMARY KEY, -- e.g. "USDC-ETH", "EUR", "LND"
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
symbol TEXT, -- e.g. "$", "€"
|
|
||||||
decimals INT NOT NULL DEFAULT 2, -- how many decimal places
|
|
||||||
UNIQUE (name)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Services & Groups
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
CREATE TYPE billing_mode AS ENUM ('per_second', 'per_request');
|
|
||||||
|
|
||||||
CREATE TABLE services (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
name TEXT NOT NULL UNIQUE,
|
|
||||||
description TEXT,
|
|
||||||
default_billing_mode billing_mode NOT NULL,
|
|
||||||
default_price NUMERIC(38, 18) NOT NULL, -- default price in "unit currency" (see accepted currencies)
|
|
||||||
default_currency TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
|
|
||||||
max_request_seconds INTEGER, -- nullable means no cap
|
|
||||||
schema_heroscript TEXT,
|
|
||||||
schema_json JSONB,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
CHECK (id >= 0),
|
|
||||||
CHECK (default_price >= 0),
|
|
||||||
CHECK (max_request_seconds IS NULL OR max_request_seconds > 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Accepted currencies for a service (subset + optional specific price per currency)
|
|
||||||
CREATE TABLE service_accepted_currencies (
|
|
||||||
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
|
|
||||||
asset_code TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
|
|
||||||
price_override NUMERIC(38, 18), -- if set, overrides default_price for this currency
|
|
||||||
billing_mode_override billing_mode, -- if set, overrides default_billing_mode
|
|
||||||
PRIMARY KEY (service_id, asset_code),
|
|
||||||
CHECK (price_override IS NULL OR price_override >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE service_groups (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
name TEXT NOT NULL UNIQUE,
|
|
||||||
description TEXT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
CHECK (id >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE service_group_members (
|
|
||||||
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
|
|
||||||
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE RESTRICT,
|
|
||||||
PRIMARY KEY (group_id, service_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Providers, Runners, Routing
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
CREATE TABLE service_providers (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, -- provider is an account
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
UNIQUE (name),
|
|
||||||
CHECK (id >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Providers can offer groups (which imply their services)
|
|
||||||
CREATE TABLE provider_service_groups (
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
|
|
||||||
PRIMARY KEY (provider_id, group_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Providers may set per-service overrides (price/mode/max seconds) (optionally per currency)
|
|
||||||
CREATE TABLE provider_service_overrides (
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
|
|
||||||
asset_code TEXT REFERENCES currencies(asset_code) ON UPDATE CASCADE,
|
|
||||||
price_override NUMERIC(38, 18),
|
|
||||||
billing_mode_override billing_mode,
|
|
||||||
max_request_seconds_override INTEGER,
|
|
||||||
PRIMARY KEY (provider_id, service_id, asset_code),
|
|
||||||
CHECK (price_override IS NULL OR price_override >= 0),
|
|
||||||
CHECK (max_request_seconds_override IS NULL OR max_request_seconds_override > 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Runners
|
|
||||||
CREATE TABLE runners (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
address INET NOT NULL, -- IPv6 (INET supports both IPv4/IPv6; require v6 via CHECK below if you like)
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
pubkey BYTEA, -- optional
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
UNIQUE (address),
|
|
||||||
CHECK (id >= 0),
|
|
||||||
CHECK (family(address) = 6) -- ensure IPv6
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Runner ownership: a runner can be owned by multiple providers
|
|
||||||
CREATE TABLE runner_owners (
|
|
||||||
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
PRIMARY KEY (runner_id, provider_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Routing: link providers' services to specific runners
|
|
||||||
CREATE TABLE provider_service_runners (
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
|
|
||||||
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
|
|
||||||
PRIMARY KEY (provider_id, service_id, runner_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Routing: link providers' service groups to runners
|
|
||||||
CREATE TABLE provider_service_group_runners (
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
|
|
||||||
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
|
|
||||||
PRIMARY KEY (provider_id, group_id, runner_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Subscriptions & Spend Control
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
CREATE TYPE spend_period AS ENUM ('hour', 'day', 'month');
|
|
||||||
|
|
||||||
-- A subscription ties an account to a specific service OR a service group, with spend limits and allowed providers
|
|
||||||
CREATE TABLE subscriptions (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
|
||||||
service_id BIGINT REFERENCES services(id) ON DELETE CASCADE,
|
|
||||||
group_id BIGINT REFERENCES service_groups(id) ON DELETE CASCADE,
|
|
||||||
secret BYTEA NOT NULL, -- caller-chosen secret (consider storing a hash instead)
|
|
||||||
subscription_data JSONB, -- arbitrary client-supplied info
|
|
||||||
limit_amount NUMERIC(38, 18), -- allowed spend in the selected currency per period
|
|
||||||
limit_currency TEXT REFERENCES currencies(asset_code) ON UPDATE CASCADE,
|
|
||||||
limit_period spend_period, -- period for the limit
|
|
||||||
active BOOLEAN NOT NULL DEFAULT TRUE,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
-- Ensure exactly one of service_id or group_id
|
|
||||||
CHECK ( (service_id IS NOT NULL) <> (group_id IS NOT NULL) ),
|
|
||||||
CHECK (limit_amount IS NULL OR limit_amount >= 0),
|
|
||||||
CHECK (id >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Providers that are allowed to serve under a subscription
|
|
||||||
CREATE TABLE subscription_providers (
|
|
||||||
subscription_id BIGINT NOT NULL REFERENCES subscriptions(id) ON DELETE CASCADE,
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
|
|
||||||
PRIMARY KEY (subscription_id, provider_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Usage, Requests & Billing
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
-- A request lifecycle record (optional but useful for auditing and max duration enforcement)
|
|
||||||
CREATE TYPE request_status AS ENUM ('pending', 'running', 'succeeded', 'failed', 'canceled');
|
|
||||||
|
|
||||||
CREATE TABLE requests (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
|
||||||
subscription_id BIGINT NOT NULL REFERENCES subscriptions(id) ON DELETE RESTRICT,
|
|
||||||
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE RESTRICT,
|
|
||||||
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE RESTRICT,
|
|
||||||
runner_id BIGINT REFERENCES runners(id) ON DELETE SET NULL,
|
|
||||||
request_schema JSONB, -- concrete task payload (conforms to schema_json/heroscript)
|
|
||||||
started_at TIMESTAMPTZ,
|
|
||||||
ended_at TIMESTAMPTZ,
|
|
||||||
status request_status NOT NULL DEFAULT 'pending',
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
CHECK (id >= 0),
|
|
||||||
CHECK (ended_at IS NULL OR started_at IS NULL OR ended_at >= started_at)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Billing ledger (debits/credits). Positive amount = debit to account (charge). Negative = credit/refund.
|
|
||||||
CREATE TYPE ledger_entry_type AS ENUM ('debit', 'credit', 'adjustment');
|
|
||||||
|
|
||||||
CREATE TABLE billing_ledger (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
|
||||||
provider_id BIGINT REFERENCES service_providers(id) ON DELETE SET NULL,
|
|
||||||
service_id BIGINT REFERENCES services(id) ON DELETE SET NULL,
|
|
||||||
request_id BIGINT REFERENCES requests(id) ON DELETE SET NULL,
|
|
||||||
amount NUMERIC(38, 18) NOT NULL, -- positive for debit, negative for credit
|
|
||||||
asset_code TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
|
|
||||||
entry_type ledger_entry_type NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
||||||
CHECK (id >= 0)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Optional: running balances per account/currency (materialized view or real-time view)
|
|
||||||
-- This is a plain view; for performance, you might maintain a cached table.
|
|
||||||
CREATE VIEW account_balances AS
|
|
||||||
SELECT
|
|
||||||
account_id,
|
|
||||||
asset_code,
|
|
||||||
SUM(amount) AS balance
|
|
||||||
FROM billing_ledger
|
|
||||||
GROUP BY account_id, asset_code;
|
|
||||||
|
|
||||||
-- =========================
|
|
||||||
-- Helpful Indexes
|
|
||||||
-- =========================
|
|
||||||
|
|
||||||
CREATE INDEX idx_services_default_currency ON services(default_currency);
|
|
||||||
CREATE INDEX idx_service_accepted_currencies_service ON service_accepted_currencies(service_id);
|
|
||||||
CREATE INDEX idx_provider_overrides_service ON provider_service_overrides(service_id);
|
|
||||||
CREATE INDEX idx_requests_account ON requests(account_id);
|
|
||||||
CREATE INDEX idx_requests_provider ON requests(provider_id);
|
|
||||||
CREATE INDEX idx_requests_service ON requests(service_id);
|
|
||||||
CREATE INDEX idx_billing_account_currency ON billing_ledger(account_id, asset_code);
|
|
||||||
CREATE INDEX idx_subscriptions_account_active ON subscriptions(account_id) WHERE active;
|
|
||||||
@@ -1,266 +0,0 @@
|
|||||||
# Billing Logic — Whiteboard Version (for Devs)
|
|
||||||
|
|
||||||
## 1) Inputs You Always Need
|
|
||||||
|
|
||||||
* `account_id`, `subscription_id`
|
|
||||||
* `service_id` (or group → resolved to a service at dispatch)
|
|
||||||
* `provider_id`, `asset_code`
|
|
||||||
* `payload` (validated against service schema)
|
|
||||||
* (Optional) `runner_id`
|
|
||||||
* Idempotency key for the request (client-provided)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 2) Gatekeeping (Hard Checks)
|
|
||||||
|
|
||||||
1. **Subscription**
|
|
||||||
|
|
||||||
* Must be `active`.
|
|
||||||
* Must target **exactly one** of {service, group}.
|
|
||||||
* If group: ensure `service_id` is a member.
|
|
||||||
|
|
||||||
2. **Provider Allowlist**
|
|
||||||
|
|
||||||
* If `subscription_providers` exists → `provider_id` must be listed.
|
|
||||||
|
|
||||||
3. **Spend Limit** (if set)
|
|
||||||
|
|
||||||
* Compute window by `limit_period` (`hour`/`day`/`month`, UTC unless tenant TZ).
|
|
||||||
* Current period spend = `SUM(ledger.amount WHERE account & currency & period)`.
|
|
||||||
* `current_spend + estimated_charge ≤ limit_amount`.
|
|
||||||
|
|
||||||
4. **Max Duration** (effective; see §3):
|
|
||||||
|
|
||||||
* If billing mode is `per_second`, reject if requested/max exceeds effective cap.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 3) Effective Pricing (Single Resolution Function)
|
|
||||||
|
|
||||||
Inputs: `provider_id`, `service_id`, `asset_code`
|
|
||||||
|
|
||||||
Precedence:
|
|
||||||
|
|
||||||
1. `provider_service_overrides` for `(service_id, asset_code)`
|
|
||||||
2. `service_accepted_currencies` for `(service_id, asset_code)`
|
|
||||||
3. `services` defaults
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
|
|
||||||
* `effective_billing_mode ∈ {per_request, per_second}`
|
|
||||||
* `effective_price` (NUMERIC)
|
|
||||||
* `effective_max_request_seconds` (nullable)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4) Request Lifecycle (States)
|
|
||||||
|
|
||||||
* `pending` → `running` → (`succeeded` | `failed` | `canceled`)
|
|
||||||
* Timestamps: set `started_at` on `running`, `ended_at` on terminal states.
|
|
||||||
* Enforce `ended_at ≥ started_at` and `duration ≤ effective_max_request_seconds` (if set).
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 5) Charging Rules
|
|
||||||
|
|
||||||
### A) Per Request
|
|
||||||
|
|
||||||
```
|
|
||||||
charge = effective_price
|
|
||||||
```
|
|
||||||
|
|
||||||
### B) Per Second
|
|
||||||
|
|
||||||
```
|
|
||||||
duration_seconds = ceil(extract(epoch from (ended_at - started_at)))
|
|
||||||
charge = duration_seconds * effective_price
|
|
||||||
```
|
|
||||||
|
|
||||||
* Cap with `effective_max_request_seconds` if present.
|
|
||||||
* If ended early/failed before `started_at`: charge = 0.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 6) Idempotency & Atomicity
|
|
||||||
|
|
||||||
* **Idempotency key** per `(account_id, subscription_id, provider_id, service_id, request_external_id)`; store on `requests` and enforce unique index.
|
|
||||||
* **Single transaction** to:
|
|
||||||
|
|
||||||
1. finalize `REQUESTS` status + timestamps,
|
|
||||||
2. insert **one** debit entry into `billing_ledger`.
|
|
||||||
* Never mutate ledger entries; use compensating **credit** entries for adjustments/refunds.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 7) Spend-Limit Enforcement (Before Charging)
|
|
||||||
|
|
||||||
Pseudocode (SQL-ish):
|
|
||||||
|
|
||||||
```sql
|
|
||||||
WITH window AS (
|
|
||||||
SELECT tsrange(period_start(:limit_period), period_end(:limit_period)) AS w
|
|
||||||
),
|
|
||||||
spent AS (
|
|
||||||
SELECT COALESCE(SUM(amount), 0) AS total
|
|
||||||
FROM billing_ledger, window
|
|
||||||
WHERE account_id = :account_id
|
|
||||||
AND asset_code = :asset_code
|
|
||||||
AND created_at <@ (SELECT w FROM window)
|
|
||||||
),
|
|
||||||
check AS (
|
|
||||||
SELECT (spent.total + :estimated_charge) <= :limit_amount AS ok FROM spent
|
|
||||||
)
|
|
||||||
SELECT ok FROM check;
|
|
||||||
```
|
|
||||||
|
|
||||||
* If not ok → reject before dispatch, or allow but **set hard cap** on max seconds and auto-stop at limit.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 8) Suggested DB Operations (Happy Path)
|
|
||||||
|
|
||||||
1. **Create request**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
INSERT INTO requests (...)
|
|
||||||
VALUES (...)
|
|
||||||
ON CONFLICT (idempotency_key) DO NOTHING
|
|
||||||
RETURNING id;
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Start execution**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
UPDATE requests
|
|
||||||
SET status='running', started_at=now()
|
|
||||||
WHERE id=:id AND status='pending';
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Finish & bill** (single transaction)
|
|
||||||
|
|
||||||
```sql
|
|
||||||
BEGIN;
|
|
||||||
|
|
||||||
-- lock for update to avoid double-billing
|
|
||||||
UPDATE requests
|
|
||||||
SET status=:final_status, ended_at=now()
|
|
||||||
WHERE id=:id AND status='running'
|
|
||||||
RETURNING started_at, ended_at;
|
|
||||||
|
|
||||||
-- compute charge in app (see §5), re-check spend window here
|
|
||||||
|
|
||||||
INSERT INTO billing_ledger (
|
|
||||||
account_id, provider_id, service_id, request_id,
|
|
||||||
amount, asset_code, entry_type, description
|
|
||||||
) VALUES (
|
|
||||||
:account_id, :provider_id, :service_id, :id,
|
|
||||||
:charge, :asset_code, 'debit', :desc
|
|
||||||
);
|
|
||||||
|
|
||||||
COMMIT;
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 9) Balances & Reporting
|
|
||||||
|
|
||||||
* **Current balance** = `SUM(billing_ledger.amount) GROUP BY account_id, asset_code`.
|
|
||||||
* Keep a **view** or **materialized view**; refresh asynchronously if needed.
|
|
||||||
* Never rely on cached balance for hard checks — re-check within the billing transaction if **prepaid** semantics are required.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 10) Error & Edge Rules
|
|
||||||
|
|
||||||
* If runner fails before `running` → no charge.
|
|
||||||
* If runner starts, then fails:
|
|
||||||
|
|
||||||
* **per\_second**: bill actual seconds (can be 0).
|
|
||||||
* **per\_request**: default is **no charge** unless policy says otherwise; if charging partials, document it.
|
|
||||||
* Partial refunds/adjustments → insert **negative** ledger entries (type `credit`/`adjustment`) tied to the original `request_id`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 11) Minimal Pricing Resolver (Sketch)
|
|
||||||
|
|
||||||
```sql
|
|
||||||
WITH p AS (
|
|
||||||
SELECT price_override AS price,
|
|
||||||
billing_mode_override AS mode,
|
|
||||||
max_request_seconds_override AS maxsec
|
|
||||||
FROM provider_service_overrides
|
|
||||||
WHERE provider_id = :pid AND service_id = :sid AND asset_code = :asset
|
|
||||||
LIMIT 1
|
|
||||||
),
|
|
||||||
sac AS (
|
|
||||||
SELECT price_override AS price,
|
|
||||||
billing_mode_override AS mode
|
|
||||||
FROM service_accepted_currencies
|
|
||||||
WHERE service_id = :sid AND asset_code = :asset
|
|
||||||
LIMIT 1
|
|
||||||
),
|
|
||||||
svc AS (
|
|
||||||
SELECT default_price AS price,
|
|
||||||
default_billing_mode AS mode,
|
|
||||||
max_request_seconds AS maxsec
|
|
||||||
FROM services WHERE id = :sid
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
COALESCE(p.price, sac.price, svc.price) AS price,
|
|
||||||
COALESCE(p.mode, sac.mode, svc.mode) AS mode,
|
|
||||||
COALESCE(p.maxsec, svc.maxsec) AS max_seconds;
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 12) Mermaid — Decision Trees
|
|
||||||
|
|
||||||
### Pricing & Duration
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart TD
|
|
||||||
A[provider_id, service_id, asset_code] --> B{Provider override exists?}
|
|
||||||
B -- yes --> P[Use provider price/mode/max]
|
|
||||||
B -- no --> C{Service currency override?}
|
|
||||||
C -- yes --> S[Use service currency price/mode]
|
|
||||||
C -- no --> D[Use service defaults]
|
|
||||||
P --> OUT[effective price/mode/max]
|
|
||||||
S --> OUT
|
|
||||||
D --> OUT
|
|
||||||
```
|
|
||||||
|
|
||||||
### Spend Check & Charge
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart TD
|
|
||||||
S[Has subscription limit?] -->|No| D1[Dispatch]
|
|
||||||
S -->|Yes| C{current_spend + est_charge <= limit?}
|
|
||||||
C -->|No| REJ[Reject or cap duration]
|
|
||||||
C -->|Yes| D1[Dispatch]
|
|
||||||
D1 --> RUN[Run request]
|
|
||||||
RUN --> DONE[Finalize + insert ledger]
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 13) Security Posture
|
|
||||||
|
|
||||||
* Store **hash of subscription secret**; compare hash on use.
|
|
||||||
* Sign client requests with **account pubkey**; verify before dispatch.
|
|
||||||
* Limit **request schema** to validated fields; reject unknowns.
|
|
||||||
* Enforce **IPv6** for runners where required.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 14) What To Implement First
|
|
||||||
|
|
||||||
1. Pricing resolver (single function).
|
|
||||||
2. Spend-window checker (single query).
|
|
||||||
3. Request lifecycle + idempotency.
|
|
||||||
4. Ledger write (append-only) + balances view.
|
|
||||||
|
|
||||||
Everything else layers on top.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
If you want, I can turn this into a small **README.md** with code blocks you can paste into the repo (plus a couple of SQL functions and example tests).
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user