23 Commits

Author SHA1 Message Date
Timur Gordon
53e9a2d4f0 update grid4 & heroledger models 2025-09-16 14:18:08 +02:00
cb1fb0f0ec Merge pull request 'main' (#13) from main into development
Reviewed-on: #13
2025-08-28 14:05:57 +00:00
97c24d146b ... 2025-08-27 18:52:11 +02:00
6bff52e8b7 ... 2025-08-27 14:44:39 +02:00
7a999b7b6e Merge branch 'main' of git.ourworld.tf:herocode/db 2025-08-21 17:26:42 +02:00
095a4d0c69 ... 2025-08-21 17:26:40 +02:00
Timur Gordon
a7c978efd4 use git paths for deps instead 2025-08-21 14:35:07 +02:00
Timur Gordon
0b0d546b4e remove old cargo lock 2025-08-21 14:27:52 +02:00
Timur Gordon
2f5e18df98 reexport heroledger 2025-08-21 14:20:25 +02:00
Timur Gordon
77169c073c clean old deps 2025-08-21 14:17:37 +02:00
Timur Gordon
ce12f26a91 cargo fix & fix heroledger 2025-08-21 14:15:29 +02:00
Timur Gordon
130822b69b Merge branch 'development' of https://git.ourworld.tf/herocode/db into development 2025-08-21 14:06:40 +02:00
Timur Gordon
7439980b33 Merge branch 'main' into development 2025-08-21 14:05:57 +02:00
Timur Gordon
cedea2f305 move rhai wrappers of models from rhailib 2025-08-21 14:05:01 +02:00
Timur Gordon
58ed59cd12 Merge branch 'main' of https://git.ourworld.tf/herocode/db 2025-08-08 09:46:38 +02:00
Timur Gordon
6727c7498d add heroledger models 2025-08-08 09:46:30 +02:00
fc7e327f07 ... 2025-08-08 09:42:47 +02:00
993fa2adcd ... 2025-08-08 08:53:49 +02:00
33d7eafeac Merge branch 'main' of git.ourworld.tf:herocode/db 2025-08-06 13:44:23 +02:00
05259db53d ... 2025-08-06 13:44:21 +02:00
Maxime Van Hees
0cffda37a7 fixed dependencies issues 2025-08-05 13:00:09 +02:00
Timur Gordon
1a62fcacdd add heroledger models 2025-08-05 12:53:24 +02:00
7d9a6906c6 ... 2025-08-05 11:00:20 +02:00
236 changed files with 11519 additions and 11480 deletions

View File

@@ -17,6 +17,17 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "ahash"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
dependencies = [
"getrandom 0.2.16",
"once_cell",
"version_check",
]
[[package]]
name = "ahash"
version = "0.8.12"
@@ -54,13 +65,13 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "async-trait"
version = "0.1.88"
version = "0.1.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -112,9 +123,21 @@ dependencies = [
[[package]]
name = "bitflags"
version = "2.9.1"
version = "2.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "block-buffer"
@@ -125,12 +148,57 @@ dependencies = [
"generic-array",
]
[[package]]
name = "borsh"
version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce"
dependencies = [
"borsh-derive",
"cfg_aliases",
]
[[package]]
name = "borsh-derive"
version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3"
dependencies = [
"once_cell",
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "bumpalo"
version = "3.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
[[package]]
name = "bytecheck"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2"
dependencies = [
"bytecheck_derive",
"ptr_meta",
"simdutf8",
]
[[package]]
name = "bytecheck_derive"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "byteorder"
version = "1.5.0"
@@ -145,18 +213,24 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "cc"
version = "1.2.31"
version = "1.2.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.1"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chrono"
@@ -268,6 +342,12 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8eb564c5c7423d25c886fb561d1e4ee69f72354d16918afa32c08811f6b6a55"
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures-channel"
version = "0.3.31"
@@ -292,7 +372,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -363,9 +443,18 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "hashbrown"
version = "0.15.4"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash 0.7.8",
]
[[package]]
name = "hashbrown"
version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
[[package]]
name = "heck"
@@ -387,6 +476,8 @@ dependencies = [
"r2d2",
"r2d2_postgres",
"rhai",
"rhailib-macros",
"rust_decimal",
"serde",
"serde_json",
"strum",
@@ -400,9 +491,12 @@ dependencies = [
name = "heromodels-derive"
version = "0.1.0"
dependencies = [
"heromodels_core",
"proc-macro2",
"quote",
"syn",
"serde",
"serde_json",
"syn 2.0.106",
]
[[package]]
@@ -453,7 +547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
dependencies = [
"equivalent",
"hashbrown",
"hashbrown 0.15.5",
]
[[package]]
@@ -505,7 +599,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -535,9 +629,9 @@ dependencies = [
[[package]]
name = "jsonb"
version = "0.5.3"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96cbb4fba292867a2d86ed83dbe5f9d036f423bf6a491b7d884058b2fde42fcd"
checksum = "a452366d21e8d3cbca680c41388e01d6a88739afef7877961946a6da409f9ccd"
dependencies = [
"byteorder",
"ethnum",
@@ -555,9 +649,20 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.174"
version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libredox"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
dependencies = [
"bitflags",
"libc",
"redox_syscall",
]
[[package]]
name = "lock_api"
@@ -668,6 +773,7 @@ dependencies = [
[[package]]
name = "ourdb"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"crc32fast",
"log",
@@ -700,9 +806,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
version = "2.3.1"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "phf"
@@ -804,14 +910,43 @@ dependencies = [
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
name = "proc-macro-crate"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
dependencies = [
"toml_edit",
]
[[package]]
name = "proc-macro2"
version = "1.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
dependencies = [
"unicode-ident",
]
[[package]]
name = "ptr_meta"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
dependencies = [
"ptr_meta_derive",
]
[[package]]
name = "ptr_meta_derive"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "quote"
version = "1.0.40"
@@ -848,6 +983,12 @@ dependencies = [
"r2d2",
]
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rand"
version = "0.8.5"
@@ -916,13 +1057,22 @@ dependencies = [
"bitflags",
]
[[package]]
name = "rend"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c"
dependencies = [
"bytecheck",
]
[[package]]
name = "rhai"
version = "1.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2780e813b755850e50b178931aaf94ed24f6817f46aaaf5d21c13c12d939a249"
dependencies = [
"ahash",
"ahash 0.8.12",
"bitflags",
"instant",
"no-std-compat",
@@ -943,7 +1093,45 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
name = "rhailib-macros"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"rhai",
"serde",
]
[[package]]
name = "rkyv"
version = "0.7.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b"
dependencies = [
"bitvec",
"bytecheck",
"bytes",
"hashbrown 0.12.3",
"ptr_meta",
"rend",
"rkyv_derive",
"seahash",
"tinyvec",
"uuid",
]
[[package]]
name = "rkyv_derive"
version = "0.7.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
@@ -953,7 +1141,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d"
dependencies = [
"arrayvec",
"borsh",
"bytes",
"num-traits",
"rand 0.8.5",
"rkyv",
"serde",
"serde_json",
]
[[package]]
@@ -964,9 +1158,9 @@ checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
[[package]]
name = "rustversion"
version = "1.0.21"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
@@ -989,6 +1183,12 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "seahash"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
[[package]]
name = "serde"
version = "1.0.219"
@@ -1006,14 +1206,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
name = "serde_json"
version = "1.0.142"
version = "1.0.143"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
dependencies = [
"indexmap",
"itoa",
@@ -1048,6 +1248,12 @@ dependencies = [
"libc",
]
[[package]]
name = "simdutf8"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "siphasher"
version = "1.0.1"
@@ -1056,9 +1262,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "slab"
version = "0.4.10"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
[[package]]
name = "smallvec"
@@ -1136,7 +1342,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -1147,15 +1353,32 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.104"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "thin-vec"
version = "0.2.14"
@@ -1179,7 +1402,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -1193,9 +1416,9 @@ dependencies = [
[[package]]
name = "tinyvec"
version = "1.9.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71"
checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa"
dependencies = [
"tinyvec_macros",
]
@@ -1234,7 +1457,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -1276,9 +1499,27 @@ dependencies = [
"tokio",
]
[[package]]
name = "toml_datetime"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
[[package]]
name = "toml_edit"
version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
]
[[package]]
name = "tst"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"ourdb",
"thiserror",
@@ -1325,9 +1566,9 @@ checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae"
[[package]]
name = "uuid"
version = "1.17.0"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be"
dependencies = [
"getrandom 0.3.3",
"js-sys",
@@ -1389,7 +1630,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
"wasm-bindgen-shared",
]
@@ -1411,7 +1652,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -1437,11 +1678,11 @@ dependencies = [
[[package]]
name = "whoami"
version = "1.6.0"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7"
checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d"
dependencies = [
"redox_syscall",
"libredox",
"wasite",
"web-sys",
]
@@ -1467,7 +1708,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -1478,7 +1719,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]
[[package]]
@@ -1587,6 +1828,15 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winnow"
version = "0.7.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95"
dependencies = [
"memchr",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
@@ -1596,6 +1846,15 @@ dependencies = [
"bitflags",
]
[[package]]
name = "wyz"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "zerocopy"
version = "0.8.26"
@@ -1613,5 +1872,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.106",
]

6
Cargo.toml Normal file
View File

@@ -0,0 +1,6 @@
[workspace]
members = [
"heromodels",
"heromodels_core",
"heromodels-derive",
]

246
do.sql Normal file
View File

@@ -0,0 +1,246 @@
-- --------------------------------------------------------------
-- do.sql create tables for HeroLedger models (PostgreSQL)
-- --------------------------------------------------------------
BEGIN;
-- 1. DNSZONE
CREATE TABLE dnszone (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
domain TEXT, -- @[index]
administrators INTEGER[], -- array of user ids
status TEXT,
metadata JSONB,
soarecord JSONB, -- store array of SOARecord structs as JSONB
data JSONB NOT NULL
);
CREATE INDEX idx_dnszone_domain ON dnszone(domain);
-- 2. DNSRECORD
CREATE TABLE dnsrecord (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
subdomain TEXT,
record_type TEXT,
value TEXT,
priority INTEGER,
ttl INTEGER,
is_active BOOLEAN,
cat TEXT,
is_wildcard BOOLEAN,
data JSONB NOT NULL
);
-- No explicit index required rarely queried alone
-- 3. GROUP
CREATE TABLE "group" (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
name TEXT NOT NULL,
description TEXT,
dnsrecords INTEGER[], -- FK → dnsrecord.id (array)
administrators INTEGER[],
config JSONB, -- embedded GroupConfig struct
status TEXT,
visibility TEXT,
created_ts BIGINT,
updated_ts BIGINT,
data JSONB NOT NULL
);
CREATE UNIQUE INDEX idx_group_name ON "group"(name);
-- 4. USER_GROUP_MEMBERSHIP
CREATE TABLE user_group_membership (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
user_id INTEGER NOT NULL,
group_ids INTEGER[], -- array of group ids
data JSONB NOT NULL
);
CREATE INDEX idx_ugm_user_id ON user_group_membership(user_id);
CREATE INDEX idx_ugm_group_ids ON user_group_membership USING GIN (group_ids);
-- 5. MEMBER (circle/member.v)
CREATE TABLE member (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
user_id INTEGER NOT NULL,
role TEXT,
status TEXT,
joined_at BIGINT,
invited_by INTEGER,
permissions TEXT[],
data JSONB NOT NULL
);
CREATE INDEX idx_member_user_id ON member(user_id);
-- 6. ACCOUNT
CREATE TABLE account (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
owner_id INTEGER,
address TEXT NOT NULL,
balance DOUBLE PRECISION,
currency TEXT,
assetid INTEGER,
last_activity BIGINT,
administrators INTEGER[],
accountpolicy INTEGER,
data JSONB NOT NULL
);
CREATE UNIQUE INDEX idx_account_address ON account(address);
CREATE INDEX idx_account_assetid ON account(assetid);
-- 7. ASSET
CREATE TABLE asset (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
address TEXT NOT NULL,
assetid INTEGER NOT NULL,
asset_type TEXT,
issuer INTEGER,
supply DOUBLE PRECISION,
decimals SMALLINT,
is_frozen BOOLEAN,
metadata JSONB,
administrators INTEGER[],
min_signatures INTEGER,
data JSONB NOT NULL
);
CREATE UNIQUE INDEX idx_asset_address ON asset(address);
CREATE UNIQUE INDEX idx_asset_assetid ON asset(assetid);
CREATE INDEX idx_asset_issuer ON asset(issuer);
-- 8. ACCOUNT_POLICY (holds three AccountPolicyItem JSONB blobs)
CREATE TABLE account_policy (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
transferpolicy JSONB,
adminpolicy JSONB,
clawbackpolicy JSONB,
freezepolicy JSONB,
data JSONB NOT NULL
);
-- 9. ACCOUNT_POLICY_ITEM (standalone if you ever need a table)
-- (optional we store it as JSONB inside account_policy, so not created)
-- 10. TRANSACTION
CREATE TABLE transaction (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
txid INTEGER NOT NULL,
source INTEGER,
destination INTEGER,
assetid INTEGER,
amount DOUBLE PRECISION,
timestamp BIGINT,
status TEXT,
memo TEXT,
tx_type TEXT,
signatures JSONB, -- array of Signature JSON objects
data JSONB NOT NULL
);
CREATE UNIQUE INDEX idx_transaction_txid ON transaction(txid);
CREATE INDEX idx_transaction_source ON transaction(source);
CREATE INDEX idx_transaction_destination ON transaction(destination);
CREATE INDEX idx_transaction_assetid ON transaction(assetid);
-- 11. SIGNATURE
CREATE TABLE signature (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
signature_id INTEGER NOT NULL,
user_id INTEGER NOT NULL,
value TEXT,
objectid INTEGER,
objecttype TEXT,
status TEXT,
timestamp BIGINT,
data JSONB NOT NULL
);
CREATE INDEX idx_signature_signature_id ON signature(signature_id);
CREATE INDEX idx_signature_user_id ON signature(user_id);
CREATE INDEX idx_signature_objectid ON signature(objectid);
-- 12. USER_KVS
CREATE TABLE user_kvs (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
userid INTEGER NOT NULL,
name TEXT,
data JSONB NOT NULL
);
CREATE INDEX idx_userkvs_userid ON user_kvs(userid);
-- 13. USER_KVS_ITEM
CREATE TABLE user_kvs_item (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
userkvs_id INTEGER NOT NULL,
key TEXT NOT NULL,
value TEXT,
secretbox JSONB,
timestamp BIGINT,
data JSONB NOT NULL
);
CREATE INDEX idx_userkvs_item_userkvs_id ON user_kvs_item(userkvs_id);
CREATE INDEX idx_userkvs_item_key ON user_kvs_item(key);
-- 14. USER
CREATE TABLE "user" (
id SERIAL PRIMARY KEY,
created BIGINT,
updated BIGINT,
deleted BOOLEAN,
version INTEGER,
username TEXT NOT NULL,
pubkey TEXT NOT NULL,
email TEXT[] NOT NULL,
status TEXT,
userprofile JSONB,
kyc JSONB,
data JSONB NOT NULL
);
CREATE UNIQUE INDEX idx_user_username ON "user"(username);
CREATE UNIQUE INDEX idx_user_pubkey ON "user"(pubkey);
-- Email array index use GIN for fast containment queries
CREATE INDEX idx_user_email ON "user" USING GIN (email);
COMMIT;

View File

@@ -14,4 +14,6 @@ quote = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
serde = { version = "1.0", features = ["derive"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
heromodels_core = { path = "../heromodels_core" }

View File

@@ -1,6 +1,6 @@
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::{Data, DeriveInput, Fields, parse_macro_input};
use syn::{parse_macro_input, Data, DeriveInput, Fields, Lit, Meta, MetaList, MetaNameValue};
/// Convert a string to snake_case
fn to_snake_case(s: &str) -> String {
@@ -47,86 +47,165 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream {
let db_prefix = to_snake_case(&name_str);
// Extract fields with #[index] attribute
let mut indexed_fields = Vec::new();
let mut custom_index_names = std::collections::HashMap::new();
// Supports both top-level (no args) and nested path-based indexes declared on a field
#[derive(Clone)]
enum IndexDecl {
TopLevel {
field_ident: syn::Ident,
field_ty: syn::Type,
},
NestedPath {
on_field_ident: syn::Ident,
path: String, // dotted path relative to the field
},
}
let mut index_decls: Vec<IndexDecl> = Vec::new();
if let Data::Struct(ref mut data_struct) = input.data {
if let Fields::Named(ref mut fields_named) = data_struct.fields {
for field in &mut fields_named.named {
let mut attr_idx = None;
let mut to_remove: Vec<usize> = Vec::new();
for (i, attr) in field.attrs.iter().enumerate() {
if attr.path().is_ident("index") {
attr_idx = Some(i);
if let Some(ref field_name) = field.ident {
// Check if the attribute has parameters
let mut custom_name = None;
if !attr.path().is_ident("index") {
continue;
}
to_remove.push(i);
// Parse attribute arguments if any
let meta = attr.meta.clone();
if let syn::Meta::List(list) = meta {
if let Ok(nested) = list.parse_args_with(syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated) {
for meta in nested {
if let syn::Meta::NameValue(name_value) = meta {
if name_value.path.is_ident("name") {
if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(lit_str), .. }) = name_value.value {
custom_name = Some(lit_str.value());
}
if let Some(ref field_name) = field.ident {
match &attr.meta {
Meta::Path(_) => {
// Simple top-level index on this field
index_decls.push(IndexDecl::TopLevel {
field_ident: field_name.clone(),
field_ty: field.ty.clone(),
});
}
Meta::List(MetaList { .. }) => {
// Parse for path = "..."; name is assumed equal to path
// We support syntax: #[index(path = "a.b.c")]
if let Ok(nested) = attr.parse_args_with(
syn::punctuated::Punctuated::<Meta, syn::Token![,]>::parse_terminated,
) {
for meta in nested {
if let Meta::NameValue(MetaNameValue { path, value, .. }) = meta {
if path.is_ident("path") {
if let syn::Expr::Lit(syn::ExprLit { lit: Lit::Str(lit_str), .. }) = value {
let p = lit_str.value();
index_decls.push(IndexDecl::NestedPath {
on_field_ident: field_name.clone(),
path: p,
});
}
}
}
}
}
}
indexed_fields.push((field_name.clone(), field.ty.clone()));
if let Some(name) = custom_name {
custom_index_names.insert(field_name.to_string(), name);
}
_ => {}
}
}
}
if let Some(idx) = attr_idx {
// remove all #[index] attributes we processed
// remove from the back to keep indices valid
to_remove.sort_unstable();
to_remove.drain(..).rev().for_each(|idx| {
field.attrs.remove(idx);
}
});
}
}
}
// Generate Model trait implementation
let db_keys_impl = if indexed_fields.is_empty() {
let db_keys_impl = if index_decls.is_empty() {
quote! {
fn db_keys(&self) -> Vec<heromodels_core::IndexKey> {
Vec::new()
}
}
} else {
let field_keys = indexed_fields.iter().map(|(field_name, _)| {
let name_str = custom_index_names
.get(&field_name.to_string())
.cloned()
.unwrap_or(field_name.to_string());
quote! {
heromodels_core::IndexKey {
name: #name_str,
value: self.#field_name.to_string(),
// Build code for keys from each index declaration
let mut key_snippets: Vec<proc_macro2::TokenStream> = Vec::new();
for decl in &index_decls {
match decl.clone() {
IndexDecl::TopLevel { field_ident, .. } => {
let name_str = field_ident.to_string();
key_snippets.push(quote! {
keys.push(heromodels_core::IndexKey {
name: #name_str,
value: self.#field_ident.to_string(),
});
});
}
IndexDecl::NestedPath { on_field_ident, path } => {
// Name is equal to provided path
let name_str = path.clone();
// Generate traversal code using serde_json to support arrays and objects generically
// Split the path into static segs for iteration
let segs: Vec<String> = path.split('.').map(|s| s.to_string()).collect();
let segs_iter = segs.iter().map(|s| s.as_str());
let segs_array = quote! { [ #( #segs_iter ),* ] };
key_snippets.push(quote! {
// Serialize the target field to JSON for generic traversal
let __hm_json_val = ::serde_json::to_value(&self.#on_field_ident).unwrap_or(::serde_json::Value::Null);
let mut __hm_stack: Vec<&::serde_json::Value> = vec![&__hm_json_val];
for __hm_seg in #segs_array.iter() {
let mut __hm_next: Vec<&::serde_json::Value> = Vec::new();
for __hm_v in &__hm_stack {
match __hm_v {
::serde_json::Value::Array(arr) => {
for __hm_e in arr {
if let ::serde_json::Value::Object(map) = __hm_e {
if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); }
}
}
}
::serde_json::Value::Object(map) => {
if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); }
}
_ => {}
}
}
__hm_stack = __hm_next;
if __hm_stack.is_empty() { break; }
}
for __hm_leaf in __hm_stack {
match __hm_leaf {
::serde_json::Value::Null => {},
::serde_json::Value::Array(_) => {},
::serde_json::Value::Object(_) => {},
other => {
// Convert primitives to string without surrounding quotes for strings
let mut s = other.to_string();
if let ::serde_json::Value::String(_) = other { s = s.trim_matches('"').to_string(); }
keys.push(heromodels_core::IndexKey { name: #name_str, value: s });
}
}
}
});
}
}
});
}
quote! {
fn db_keys(&self) -> Vec<heromodels_core::IndexKey> {
vec![
#(#field_keys),*
]
let mut keys: Vec<heromodels_core::IndexKey> = Vec::new();
#(#key_snippets)*
keys
}
}
};
let indexed_field_names = indexed_fields
let indexed_field_names: Vec<String> = index_decls
.iter()
.map(|f| f.0.to_string())
.collect::<Vec<_>>();
.map(|d| match d {
IndexDecl::TopLevel { field_ident, .. } => field_ident.to_string(),
IndexDecl::NestedPath { path, .. } => path.clone(),
})
.collect();
let model_impl = quote! {
impl heromodels_core::Model for #struct_name {
@@ -152,51 +231,33 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream {
}
};
// Generate Index trait implementations
// Generate Index trait implementations only for top-level fields, keep existing behavior
let mut index_impls = proc_macro2::TokenStream::new();
for decl in &index_decls {
if let IndexDecl::TopLevel { field_ident, field_ty } = decl {
let name_str = field_ident.to_string();
let index_struct_name = format_ident!("{}", &name_str);
let field_type = field_ty.clone();
for (field_name, field_type) in &indexed_fields {
let name_str = field_name.to_string();
let index_impl = quote! {
pub struct #index_struct_name;
// Get custom index name if specified, otherwise use field name
let index_key = match custom_index_names.get(&name_str) {
Some(custom_name) => custom_name.clone(),
None => name_str.clone(),
};
impl heromodels_core::Index for #index_struct_name {
type Model = super::#struct_name;
type Key = #field_type;
// Convert field name to PascalCase for struct name
// let struct_name_str = to_pascal_case(&name_str);
// let index_struct_name = format_ident!("{}", struct_name_str);
let index_struct_name = format_ident!("{}", &name_str);
fn key() -> &'static str { #name_str }
// Default to str for key type
let index_impl = quote! {
pub struct #index_struct_name;
impl heromodels_core::Index for #index_struct_name {
type Model = super::#struct_name;
type Key = #field_type;
fn key() -> &'static str {
#index_key
fn field_name() -> &'static str { #name_str }
}
fn field_name() -> &'static str {
#name_str
}
}
};
index_impls.extend(index_impl);
};
index_impls.extend(index_impl);
}
}
if !index_impls.is_empty() {
let index_mod_name = format_ident!("{}_index", db_prefix);
index_impls = quote! {
pub mod #index_mod_name {
#index_impls
}
}
index_impls = quote! { pub mod #index_mod_name { #index_impls } };
}
// Combine the original struct with the generated implementations

View File

@@ -1,7 +1,38 @@
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
// Define the necessary structs and traits for testing
// Make the current crate visible as an extern crate named `heromodels_core`
extern crate self as heromodels_core;
extern crate serde_json; // ensure ::serde_json path resolves
// Mock the heromodels_core API at crate root (visible via the alias above)
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IndexKey {
pub name: &'static str,
pub value: String,
}
pub trait Model: std::fmt::Debug + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
fn db_prefix() -> &'static str
where
Self: Sized;
fn get_id(&self) -> u32;
fn base_data_mut(&mut self) -> &mut BaseModelData;
fn db_keys(&self) -> Vec<IndexKey> {
Vec::new()
}
fn indexed_fields() -> Vec<&'static str> {
Vec::new()
}
}
pub trait Index {
type Model: Model;
type Key: ToString + ?Sized;
fn key() -> &'static str;
fn field_name() -> &'static str;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BaseModelData {
pub id: u32,
@@ -11,41 +42,18 @@ pub struct BaseModelData {
}
impl BaseModelData {
pub fn new(id: u32) -> Self {
let now = 1000; // Mock timestamp
Self {
id,
created_at: now,
modified_at: now,
comments: Vec::new(),
}
pub fn new() -> Self {
let now = 1000;
Self { id: 0, created_at: now, modified_at: now, comments: Vec::new() }
}
pub fn update_modified(&mut self) { self.modified_at += 1; }
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IndexKey {
pub name: &'static str,
pub value: String,
}
pub trait Model: std::fmt::Debug + Clone {
fn db_prefix() -> &'static str;
fn get_id(&self) -> u32;
fn base_data_mut(&mut self) -> &mut BaseModelData;
fn db_keys(&self) -> Vec<IndexKey>;
}
pub trait Index {
type Model: Model;
type Key: ?Sized;
fn key() -> &'static str;
}
// Test struct using the model macro
// Top-level field index tests
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
struct TestUser {
base_data: BaseModelData,
pub struct TestUser {
base_data: heromodels_core::BaseModelData,
#[index]
username: String,
@@ -54,25 +62,12 @@ struct TestUser {
is_active: bool,
}
// Test struct with custom index name
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
struct TestUserWithCustomIndex {
base_data: BaseModelData,
#[index(name = "custom_username")]
username: String,
#[index]
is_active: bool,
}
#[test]
fn test_basic_model() {
assert_eq!(TestUser::db_prefix(), "test_user");
let user = TestUser {
base_data: BaseModelData::new(1),
base_data: heromodels_core::BaseModelData::new(),
username: "test".to_string(),
is_active: true,
};
@@ -85,22 +80,47 @@ fn test_basic_model() {
assert_eq!(keys[1].value, "true");
}
// Nested path index tests (including vector traversal)
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct GPU { gpu_brand: String }
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct CPU { cpu_brand: String }
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct DeviceInfo { vendor: String, cpu: Vec<CPU>, gpu: Vec<GPU> }
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
pub struct NodeLike {
base_data: heromodels_core::BaseModelData,
#[index(path = "vendor")]
#[index(path = "cpu.cpu_brand")]
#[index(path = "gpu.gpu_brand")]
devices: DeviceInfo,
}
#[test]
fn test_custom_index_name() {
let user = TestUserWithCustomIndex {
base_data: BaseModelData::new(1),
username: "test".to_string(),
is_active: true,
fn test_nested_indexes() {
let n = NodeLike {
base_data: heromodels_core::BaseModelData::new(),
devices: DeviceInfo {
vendor: "SuperVendor".to_string(),
cpu: vec![CPU { cpu_brand: "Intel".into() }, CPU { cpu_brand: "AMD".into() }],
gpu: vec![GPU { gpu_brand: "NVIDIA".into() }, GPU { gpu_brand: "AMD".into() }],
},
};
// Check that the Username struct uses the custom index name
assert_eq!(Username::key(), "custom_username");
let mut keys = n.db_keys();
// Sort for deterministic assertions
keys.sort_by(|a,b| a.name.cmp(b.name).then(a.value.cmp(&b.value)));
// Check that the db_keys method returns the correct keys
let keys = user.db_keys();
assert_eq!(keys.len(), 2);
assert_eq!(keys[0].name, "custom_username");
assert_eq!(keys[0].value, "test");
assert_eq!(keys[1].name, "is_active");
assert_eq!(keys[1].value, "true");
// Expect 1 (vendor) + 2 (cpu brands) + 2 (gpu brands) = 5 keys
assert_eq!(keys.len(), 5);
assert!(keys.iter().any(|k| k.name == "vendor" && k.value == "SuperVendor"));
assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "Intel"));
assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "AMD"));
assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "NVIDIA"));
assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "AMD"));
}

View File

@@ -10,16 +10,18 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bincode = { version = "2", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
ourdb = { path = "../ourdb" }
tst = { path = "../tst" }
ourdb = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "ourdb" }
tst = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "tst" }
heromodels-derive = { path = "../heromodels-derive" }
heromodels_core = { path = "../heromodels_core" }
rhailib-macros = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "rhailib-macros" }
rhai = { version = "1.21.0", features = [
"std",
"sync",
"decimal",
"internals",
] } # Added "decimal" feature, sync for Arc<Mutex<>>
rust_decimal = { version = "1.36", features = ["serde"] }
strum = "0.26"
strum_macros = "0.26"
uuid = { version = "1.17.0", features = ["v4"] }
@@ -51,11 +53,19 @@ path = "examples/finance_example/main.rs"
name = "flow_example"
path = "examples/flow_example.rs"
[[example]]
name = "biz_rhai"
path = "examples/biz_rhai/example.rs"
required-features = ["rhai"]
# [[example]]
# name = "biz_rhai"
# path = "examples/biz_rhai/example.rs"
# required-features = ["rhai"]
[[example]]
name = "postgres_model_example"
path = "examples/postgres_example/example.rs"
[[example]]
name = "heroledger_example"
path = "examples/heroledger_example/example.rs"
[[example]]
name = "grid4_example"
path = "examples/grid4_example/example.rs"

View File

@@ -1,318 +0,0 @@
# Payment Model Usage Guide
This document provides comprehensive instructions for AI assistants on how to use the Payment model in the heromodels repository.
## Overview
The Payment model represents a payment transaction in the system, typically associated with company registration or subscription payments. It integrates with Stripe for payment processing and maintains comprehensive status tracking.
## Model Structure
```rust
pub struct Payment {
pub base_data: BaseModelData, // Auto-managed ID, timestamps, comments
pub payment_intent_id: String, // Stripe payment intent ID
pub company_id: u32, // Foreign key to Company
pub payment_plan: String, // "monthly", "yearly", "two_year"
pub setup_fee: f64, // One-time setup fee
pub monthly_fee: f64, // Recurring monthly fee
pub total_amount: f64, // Total amount paid
pub currency: String, // Currency code (defaults to "usd")
pub status: PaymentStatus, // Current payment status
pub stripe_customer_id: Option<String>, // Stripe customer ID (set on completion)
pub created_at: i64, // Payment creation timestamp
pub completed_at: Option<i64>, // Payment completion timestamp
}
pub enum PaymentStatus {
Pending, // Initial state - payment created but not processed
Processing, // Payment is being processed by Stripe
Completed, // Payment successfully completed
Failed, // Payment processing failed
Refunded, // Payment was refunded
}
```
## Basic Usage
### 1. Creating a New Payment
```rust
use heromodels::models::biz::{Payment, PaymentStatus};
// Create a new payment with required fields
let payment = Payment::new(
"pi_1234567890".to_string(), // Stripe payment intent ID
company_id, // Company ID from database
"monthly".to_string(), // Payment plan
100.0, // Setup fee
49.99, // Monthly fee
149.99, // Total amount
);
// Payment defaults:
// - status: PaymentStatus::Pending
// - currency: "usd"
// - stripe_customer_id: None
// - created_at: current timestamp
// - completed_at: None
```
### 2. Using Builder Pattern
```rust
let payment = Payment::new(
"pi_1234567890".to_string(),
company_id,
"yearly".to_string(),
500.0,
99.99,
1699.88,
)
.currency("eur".to_string())
.stripe_customer_id(Some("cus_existing_customer".to_string()));
```
### 3. Database Operations
```rust
use heromodels::db::Collection;
// Save payment to database
let db = get_db()?;
let (payment_id, saved_payment) = db.set(&payment)?;
// Retrieve payment by ID
let retrieved_payment: Payment = db.get_by_id(payment_id)?.unwrap();
// Update payment
let updated_payment = saved_payment.complete_payment(Some("cus_new_customer".to_string()));
let (_, final_payment) = db.set(&updated_payment)?;
```
## Payment Status Management
### Status Transitions
```rust
// 1. Start with Pending status (default)
let payment = Payment::new(/* ... */);
assert!(payment.is_pending());
// 2. Mark as processing when Stripe starts processing
let processing_payment = payment.process_payment();
assert!(processing_payment.is_processing());
// 3. Complete payment when Stripe confirms success
let completed_payment = processing_payment.complete_payment(Some("cus_123".to_string()));
assert!(completed_payment.is_completed());
assert!(completed_payment.completed_at.is_some());
// 4. Handle failure if payment fails
let failed_payment = processing_payment.fail_payment();
assert!(failed_payment.has_failed());
// 5. Refund if needed
let refunded_payment = completed_payment.refund_payment();
assert!(refunded_payment.is_refunded());
```
### Status Check Methods
```rust
// Check current status
if payment.is_pending() {
// Show "Payment Pending" UI
} else if payment.is_processing() {
// Show "Processing Payment" UI
} else if payment.is_completed() {
// Show "Payment Successful" UI
// Enable company features
} else if payment.has_failed() {
// Show "Payment Failed" UI
// Offer retry option
} else if payment.is_refunded() {
// Show "Payment Refunded" UI
}
```
## Integration with Company Model
### Complete Payment Flow
```rust
use heromodels::models::biz::{Company, CompanyStatus, Payment, PaymentStatus};
// 1. Create company with pending payment status
let company = Company::new(
"TechStart Inc.".to_string(),
"REG-TS-2024-001".to_string(),
chrono::Utc::now().timestamp(),
)
.email("contact@techstart.com".to_string())
.status(CompanyStatus::PendingPayment);
let (company_id, company) = db.set(&company)?;
// 2. Create payment for the company
let payment = Payment::new(
stripe_payment_intent_id,
company_id,
"yearly".to_string(),
500.0, // Setup fee
99.0, // Monthly fee
1688.0, // Total (setup + 12 months)
);
let (payment_id, payment) = db.set(&payment)?;
// 3. Process payment through Stripe
let processing_payment = payment.process_payment();
let (_, processing_payment) = db.set(&processing_payment)?;
// 4. On successful Stripe webhook
let completed_payment = processing_payment.complete_payment(Some(stripe_customer_id));
let (_, completed_payment) = db.set(&completed_payment)?;
// 5. Activate company
let active_company = company.status(CompanyStatus::Active);
let (_, active_company) = db.set(&active_company)?;
```
## Database Indexing
The Payment model provides custom indexes for efficient querying:
```rust
// Indexed fields for fast lookups:
// - payment_intent_id: Find payment by Stripe intent ID
// - company_id: Find all payments for a company
// - status: Find payments by status
// Example queries (conceptual - actual implementation depends on your query layer)
// let pending_payments = db.find_by_index("status", "Pending")?;
// let company_payments = db.find_by_index("company_id", company_id.to_string())?;
// let stripe_payment = db.find_by_index("payment_intent_id", "pi_1234567890")?;
```
## Error Handling Best Practices
```rust
use heromodels::db::DbError;
fn process_payment_flow(payment_intent_id: String, company_id: u32) -> Result<Payment, DbError> {
let db = get_db()?;
// Create payment
let payment = Payment::new(
payment_intent_id,
company_id,
"monthly".to_string(),
100.0,
49.99,
149.99,
);
// Save to database
let (payment_id, payment) = db.set(&payment)?;
// Process through Stripe (external API call)
match process_stripe_payment(&payment.payment_intent_id) {
Ok(stripe_customer_id) => {
// Success: complete payment
let completed_payment = payment.complete_payment(Some(stripe_customer_id));
let (_, final_payment) = db.set(&completed_payment)?;
Ok(final_payment)
}
Err(_) => {
// Failure: mark as failed
let failed_payment = payment.fail_payment();
let (_, final_payment) = db.set(&failed_payment)?;
Ok(final_payment)
}
}
}
```
## Testing
The Payment model includes comprehensive tests in `tests/payment.rs`. When working with payments:
1. **Always test status transitions**
2. **Verify timestamp handling**
3. **Test database persistence**
4. **Test integration with Company model**
5. **Test builder pattern methods**
```bash
# Run payment tests
cargo test payment
# Run specific test
cargo test test_payment_completion
```
## Common Patterns
### 1. Payment Retry Logic
```rust
fn retry_failed_payment(payment: Payment) -> Payment {
if payment.has_failed() {
// Reset to pending for retry
Payment::new(
payment.payment_intent_id,
payment.company_id,
payment.payment_plan,
payment.setup_fee,
payment.monthly_fee,
payment.total_amount,
)
.currency(payment.currency)
} else {
payment
}
}
```
### 2. Payment Summary
```rust
fn get_payment_summary(payment: &Payment) -> String {
format!(
"Payment {} for company {}: {} {} ({})",
payment.payment_intent_id,
payment.company_id,
payment.total_amount,
payment.currency.to_uppercase(),
payment.status
)
}
```
### 3. Payment Validation
```rust
fn validate_payment(payment: &Payment) -> Result<(), String> {
if payment.total_amount <= 0.0 {
return Err("Total amount must be positive".to_string());
}
if payment.payment_intent_id.is_empty() {
return Err("Payment intent ID is required".to_string());
}
if payment.company_id == 0 {
return Err("Valid company ID is required".to_string());
}
Ok(())
}
```
## Key Points for AI Assistants
1. **Always use auto-generated IDs** - Don't manually set IDs, let OurDB handle them
2. **Follow status flow** - Pending → Processing → Completed/Failed → (optionally) Refunded
3. **Update timestamps** - `completed_at` is automatically set when calling `complete_payment()`
4. **Use builder pattern** - For optional fields and cleaner code
5. **Test thoroughly** - Payment logic is critical, always verify with tests
6. **Handle errors gracefully** - Payment failures should be tracked, not ignored
7. **Integrate with Company** - Payments typically affect company status
8. **Use proper indexing** - Leverage indexed fields for efficient queries
This model follows the heromodels patterns and integrates seamlessly with the existing codebase architecture.

View File

@@ -0,0 +1,300 @@
# AI Prompt: Convert V Language Specs to Rust Hero Models
## Objective
Convert V language model specifications (`.v` files) to Rust hero models that integrate with the heromodels framework. The generated Rust models should follow the established patterns for base data embedding, indexing, fluent builder APIs, and Rhai scripting integration.
## V Language Input Structure Analysis
### V Spec Patterns to Recognize:
1. **Module Declaration**: `module circle` or `module group`
2. **Base Embedding**: `core.Base` - represents the base model data
3. **Index Fields**: Fields marked with `@[index]` comments
4. **Mutability**: Fields declared with `pub mut:`
5. **Enums**: `pub enum Status { active, inactive, suspended }`
6. **Nested Structs**: Embedded configuration or related data structures
7. **Collections**: `[]u32`, `[]string`, `map[string]string`
8. **References**: `u32` fields typically represent foreign key references
### Example V Spec Structure:
```v
module circle
import freeflowuniverse.herolib.hero.models.core
pub struct User {
core.Base
pub mut:
username string @[index] // Unique username
email []string @[index] // Multiple email addresses
status UserStatus // Enum reference
profile UserProfile // Nested struct
metadata map[string]string // Key-value pairs
}
pub enum UserStatus {
active
inactive
suspended
}
pub struct UserProfile {
pub mut:
full_name string
bio string
links map[string]string
}
```
## Rust Hero Model Conversion Rules
### 1. File Structure and Imports
```rust
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use rhai::CustomType;
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
```
### 2. Base Data Embedding
- **V**: `core.Base`
- **Rust**: `pub base_data: BaseModelData,`
### 3. Index Field Conversion
- **V**: `field_name string @[index]`
- **Rust**: `#[index] pub field_name: String,`
### 4. Type Mappings
| V Type | Rust Type |
|--------|-----------|
| `string` | `String` |
| `[]string` | `Vec<String>` |
| `[]u32` | `Vec<u32>` |
| `u32` | `u32` |
| `u64` | `u64` |
| `f64` | `f64` |
| `bool` | `bool` |
| `map[string]string` | `std::collections::HashMap<String, String>` |
### 5. Struct Declaration Pattern
```rust
/// Documentation comment describing the model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType, Default, RhaiApi)]
pub struct ModelName {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub indexed_field: String,
pub regular_field: String,
pub optional_field: Option<String>,
pub nested_struct: NestedType,
pub collection: Vec<u32>,
pub metadata: std::collections::HashMap<String, String>,
}
```
### 6. Enum Conversion
```rust
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum UserStatus {
Active,
Inactive,
Suspended,
}
```
### 7. Fluent Builder Implementation
Every model must implement a fluent builder pattern:
```rust
impl ModelName {
/// Create a new instance
pub fn new(id: u32) -> Self {
Self {
base_data: BaseModelData::new(id),
indexed_field: String::new(),
regular_field: String::new(),
optional_field: None,
nested_struct: NestedType::new(),
collection: Vec::new(),
metadata: std::collections::HashMap::new(),
}
}
/// Set indexed field (fluent)
pub fn indexed_field(mut self, value: impl ToString) -> Self {
self.indexed_field = value.to_string();
self
}
/// Set regular field (fluent)
pub fn regular_field(mut self, value: impl ToString) -> Self {
self.regular_field = value.to_string();
self
}
/// Set optional field (fluent)
pub fn optional_field(mut self, value: impl ToString) -> Self {
self.optional_field = Some(value.to_string());
self
}
/// Set nested struct (fluent)
pub fn nested_struct(mut self, value: NestedType) -> Self {
self.nested_struct = value;
self
}
/// Add to collection (fluent)
pub fn add_to_collection(mut self, value: u32) -> Self {
self.collection.push(value);
self
}
/// Set entire collection (fluent)
pub fn collection(mut self, value: Vec<u32>) -> Self {
self.collection = value;
self
}
/// Add metadata entry (fluent)
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
self.metadata.insert(key.to_string(), value.to_string());
self
}
/// Build the final instance
pub fn build(self) -> Self {
self
}
}
```
### 8. Model Trait Implementation
```rust
impl Model for ModelName {
fn db_prefix() -> &'static str {
"modelname"
}
fn get_id(&self) -> u32 {
self.base_data.id
}
fn base_data_mut(&mut self) -> &mut BaseModelData {
&mut self.base_data
}
fn db_keys(&self) -> Vec<IndexKey> {
let mut keys = Vec::new();
// Add index keys for fields marked with #[index]
keys.push(IndexKey::new("indexed_field", &self.indexed_field));
// Add additional index keys as needed
keys
}
}
```
### 9. Nested Struct Builder Pattern
For embedded types, implement similar builder patterns:
```rust
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct NestedType {
pub field1: String,
pub field2: String,
}
impl NestedType {
pub fn new() -> Self {
Self {
field1: String::new(),
field2: String::new(),
}
}
pub fn field1(mut self, value: impl ToString) -> Self {
self.field1 = value.to_string();
self
}
pub fn field2(mut self, value: impl ToString) -> Self {
self.field2 = value.to_string();
self
}
pub fn build(self) -> Self {
self
}
}
```
## Conversion Steps
1. **Analyze V Spec Structure**
- Identify the module name and main structs
- Note which fields are marked with `@[index]`
- Identify nested structs and enums
- Map field types from V to Rust
2. **Create Rust File Structure**
- Add appropriate imports
- Convert enums first (they're often referenced by structs)
- Convert nested structs before main structs
3. **Implement Main Struct**
- Add `#[model]` macro and derives
- Embed `BaseModelData` as `base_data`
- Mark indexed fields with `#[index]`
- Convert field types according to mapping table
4. **Implement Builder Pattern**
- Add `new(id: u32)` constructor
- Add fluent setter methods for each field
- Handle optional fields appropriately
- Add collection manipulation methods
5. **Implement Model Trait**
- Define appropriate `db_prefix`
- Implement required trait methods
- Add index keys for searchable fields
6. **Add Documentation**
- Document the struct and its purpose
- Document each field's meaning
- Add usage examples in comments
## Example Usage After Conversion
```rust
let user = User::new(1)
.username("john_doe")
.add_email("john@example.com")
.add_email("john.doe@company.com")
.status(UserStatus::Active)
.profile(
UserProfile::new()
.full_name("John Doe")
.bio("Software developer")
.build()
)
.add_metadata("department", "engineering")
.build();
```
## Notes and Best Practices
1. **Field Naming**: Convert V snake_case to Rust snake_case (usually no change needed)
2. **Optional Fields**: Use `Option<T>` for fields that may be empty in V
3. **Collections**: Always provide both `add_item` and `set_collection` methods
4. **Error Handling**: Builder methods should not panic; use appropriate defaults
5. **Documentation**: Include comprehensive documentation for public APIs
6. **Testing**: Consider adding unit tests for builder patterns
7. **Validation**: Add validation logic in builder methods if needed
## File Organization
Place the converted Rust models in the appropriate subdirectory under `heromodels/src/models/` based on the domain (e.g., `user/`, `finance/`, `governance/`, etc.).

View File

@@ -1,10 +1,25 @@
use chrono::{Duration, Utc};
use chrono::{Duration, Utc, NaiveDateTime};
use heromodels::db::{Collection, Db};
use heromodels::models::User;
use heromodels::models::calendar::{AttendanceStatus, Attendee, Calendar, Event, EventStatus};
use heromodels_core::Model;
fn main() {
// Helper to format i64 timestamps
let fmt_time = |ts: i64| -> String {
let ndt = NaiveDateTime::from_timestamp_opt(ts, 0)
.unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap());
chrono::DateTime::<Utc>::from_utc(ndt, Utc)
.format("%Y-%m-%d %H:%M")
.to_string()
};
let fmt_date = |ts: i64| -> String {
let ndt = NaiveDateTime::from_timestamp_opt(ts, 0)
.unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap());
chrono::DateTime::<Utc>::from_utc(ndt, Utc)
.format("%Y-%m-%d")
.to_string()
};
// Create a new DB instance, reset before every run
let db_path = "/tmp/ourdb_calendar_example";
let db = heromodels::db::hero::OurDB::new(db_path, true).expect("Can create DB");
@@ -47,50 +62,21 @@ fn main() {
println!("- User 2 (ID: {}): {}", user2_id, stored_user2.full_name);
println!("- User 3 (ID: {}): {}", user3_id, stored_user3.full_name);
// --- Create Attendees ---
// --- Create Attendees (embedded in events, not stored separately) ---
println!("\n--- Creating Attendees ---");
let attendee1 = Attendee::new(user1_id).status(AttendanceStatus::Accepted);
let attendee2 = Attendee::new(user2_id).status(AttendanceStatus::Tentative);
let attendee3 = Attendee::new(user3_id); // Default NoResponse
// Store attendees in database and get their IDs
let attendee_collection = db
.collection::<Attendee>()
.expect("can open attendee collection");
let (attendee1_id, stored_attendee1) = attendee_collection
.set(&attendee1)
.expect("can set attendee1");
let (attendee2_id, stored_attendee2) = attendee_collection
.set(&attendee2)
.expect("can set attendee2");
let (attendee3_id, stored_attendee3) = attendee_collection
.set(&attendee3)
.expect("can set attendee3");
println!("Created attendees:");
println!(
"- Attendee 1 (ID: {}): Contact ID {}, Status: {:?}",
attendee1_id, stored_attendee1.contact_id, stored_attendee1.status
);
println!(
"- Attendee 2 (ID: {}): Contact ID {}, Status: {:?}",
attendee2_id, stored_attendee2.contact_id, stored_attendee2.status
);
println!(
"- Attendee 3 (ID: {}): Contact ID {}, Status: {:?}",
attendee3_id, stored_attendee3.contact_id, stored_attendee3.status
);
// --- Create Events with Attendees ---
println!("\n--- Creating Events with Enhanced Features ---");
let now = Utc::now();
let event1_start = (now + Duration::hours(1)).timestamp();
let event1_end = (now + Duration::hours(2)).timestamp();
let event1 = Event::new(
"Team Meeting",
now + Duration::hours(1),
now + Duration::hours(2),
)
let event1 = Event::new()
.title("Team Meeting")
.reschedule(event1_start, event1_end)
.description("Weekly sync-up meeting to discuss project progress.")
.location("Conference Room A")
.color("#FF5722") // Red-orange color
@@ -99,14 +85,14 @@ fn main() {
.category("Work")
.reminder_minutes(15)
.timezone("UTC")
.add_attendee(attendee1_id)
.add_attendee(attendee2_id);
.add_attendee(attendee1.clone())
.add_attendee(attendee2.clone());
let event2 = Event::new(
"Project Brainstorm",
now + Duration::days(1),
now + Duration::days(1) + Duration::minutes(90),
)
let event2_start = (now + Duration::days(1)).timestamp();
let event2_end = (now + Duration::days(1) + Duration::minutes(90)).timestamp();
let event2 = Event::new()
.title("Project Brainstorm")
.reschedule(event2_start, event2_end)
.description("Brainstorming session for new project features.")
.location("Innovation Lab")
.color("#4CAF50") // Green color
@@ -115,28 +101,28 @@ fn main() {
.category("Planning")
.reminder_minutes(30)
.is_recurring(true)
.add_attendee(attendee1_id)
.add_attendee(attendee3_id);
.add_attendee(attendee1.clone())
.add_attendee(attendee3.clone());
let event3 = Event::new(
"Client Call",
now + Duration::days(2),
now + Duration::days(2) + Duration::hours(1),
)
let event3_start = (now + Duration::days(2)).timestamp();
let event3_end = (now + Duration::days(2) + Duration::hours(1)).timestamp();
let event3 = Event::new()
.title("Client Call")
.reschedule(event3_start, event3_end)
.description("Quarterly review with key client.")
.color("#9C27B0") // Purple color
.created_by(user3_id)
.status(EventStatus::Published)
.category("Client")
.reminder_minutes(60)
.add_attendee(attendee2_id);
.add_attendee(attendee2.clone());
// Create an all-day event
let event4 = Event::new(
"Company Holiday",
now + Duration::days(7),
now + Duration::days(7) + Duration::hours(24),
)
let event4_start = (now + Duration::days(7)).timestamp();
let event4_end = (now + Duration::days(7) + Duration::hours(24)).timestamp();
let event4 = Event::new()
.title("Company Holiday")
.reschedule(event4_start, event4_end)
.description("National holiday - office closed.")
.color("#FFC107") // Amber color
.all_day(true)
@@ -148,7 +134,7 @@ fn main() {
println!(
"- Event 1: '{}' at {} with {} attendees",
event1.title,
event1.start_time.format("%Y-%m-%d %H:%M"),
fmt_time(event1.start_time),
event1.attendees.len()
);
println!(
@@ -174,12 +160,19 @@ fn main() {
);
println!(" All-day: {}", event1.all_day);
println!(" Recurring: {}", event1.is_recurring);
println!(" Attendee IDs: {:?}", event1.attendees);
println!(
" Attendee IDs: {:?}",
event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(
"- Event 2: '{}' at {} with {} attendees",
event2.title,
event2.start_time.format("%Y-%m-%d %H:%M"),
fmt_time(event2.start_time),
event2.attendees.len()
);
println!(
@@ -205,12 +198,19 @@ fn main() {
);
println!(" All-day: {}", event2.all_day);
println!(" Recurring: {}", event2.is_recurring);
println!(" Attendee IDs: {:?}", event2.attendees);
println!(
" Attendee IDs: {:?}",
event2
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(
"- Event 3: '{}' at {} with {} attendees",
event3.title,
event3.start_time.format("%Y-%m-%d %H:%M"),
fmt_time(event3.start_time),
event3.attendees.len()
);
println!(
@@ -236,12 +236,19 @@ fn main() {
);
println!(" All-day: {}", event3.all_day);
println!(" Recurring: {}", event3.is_recurring);
println!(" Attendee IDs: {:?}", event3.attendees);
println!(
" Attendee IDs: {:?}",
event3
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(
"- Event 4: '{}' at {} (All-day: {})",
event4.title,
event4.start_time.format("%Y-%m-%d"),
fmt_date(event4.start_time),
event4.all_day
);
println!(
@@ -262,25 +269,37 @@ fn main() {
let new_start = now + Duration::hours(2);
let new_end = now + Duration::hours(3);
let mut updated_event1 = event1.clone();
updated_event1 = updated_event1.reschedule(new_start, new_end);
updated_event1 = updated_event1.reschedule(new_start.timestamp(), new_end.timestamp());
println!(
"Rescheduled '{}' to {}",
updated_event1.title,
new_start.format("%Y-%m-%d %H:%M")
fmt_time(new_start.timestamp())
);
// Remove an attendee
updated_event1 = updated_event1.remove_attendee(attendee1_id);
updated_event1 = updated_event1.remove_attendee(user1_id);
println!(
"Removed attendee {} from '{}'. Remaining attendee IDs: {:?}",
attendee1_id, updated_event1.title, updated_event1.attendees
user1_id,
updated_event1.title,
updated_event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
// Add a new attendee
updated_event1 = updated_event1.add_attendee(attendee3_id);
updated_event1 = updated_event1.add_attendee(attendee3.clone());
println!(
"Added attendee {} to '{}'. Current attendee IDs: {:?}",
attendee3_id, updated_event1.title, updated_event1.attendees
user3_id,
updated_event1.title,
updated_event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
// --- Demonstrate Event Status Changes ---
@@ -300,11 +319,11 @@ fn main() {
println!("Cancelled event: '{}'", cancelled_event.title);
// Update event with new features
let enhanced_event = Event::new(
"Enhanced Meeting",
now + Duration::days(5),
now + Duration::days(5) + Duration::hours(2),
)
let enhanced_start = (now + Duration::days(5)).timestamp();
let enhanced_end = (now + Duration::days(5) + Duration::hours(2)).timestamp();
let enhanced_event = Event::new()
.title("Enhanced Meeting")
.reschedule(enhanced_start, enhanced_end)
.description("Meeting with all new features demonstrated.")
.location("Virtual - Zoom")
.color("#673AB7") // Deep purple
@@ -314,9 +333,9 @@ fn main() {
.reminder_minutes(45)
.timezone("America/New_York")
.is_recurring(true)
.add_attendee(attendee1_id)
.add_attendee(attendee2_id)
.add_attendee(attendee3_id);
.add_attendee(attendee1)
.add_attendee(attendee2)
.add_attendee(attendee3);
println!("Created enhanced event with all features:");
println!(" Title: {}", enhanced_event.title);
@@ -485,13 +504,13 @@ fn main() {
println!("\n--- Modifying Calendar ---");
// Create and store a new event
let new_event = Event::new(
"1-on-1 Meeting",
now + Duration::days(3),
now + Duration::days(3) + Duration::minutes(30),
)
.description("One-on-one meeting with team member.")
.location("Office");
let ne_start = (now + Duration::days(3)).timestamp();
let ne_end = (now + Duration::days(3) + Duration::minutes(30)).timestamp();
let new_event = Event::new()
.title("1-on-1 Meeting")
.reschedule(ne_start, ne_end)
.description("One-on-one meeting with team member.")
.location("Office");
let (new_event_id, _stored_new_event) =
event_collection.set(&new_event).expect("can set new event");
@@ -565,7 +584,7 @@ fn main() {
"- Event ID: {}, Title: '{}', Start: {}, Attendees: {}",
event.get_id(),
event.title,
event.start_time.format("%Y-%m-%d %H:%M"),
fmt_time(event.start_time),
event.attendees.len()
);
}
@@ -583,22 +602,16 @@ fn main() {
retrieved_event1.attendees.len()
);
// Look up attendee details for each attendee ID
for &attendee_id in &retrieved_event1.attendees {
if let Some(attendee) = attendee_collection
.get_by_id(attendee_id)
.expect("can try to get attendee")
// Look up attendee details directly from embedded attendees
for attendee in &retrieved_event1.attendees {
if let Some(user) = user_collection
.get_by_id(attendee.contact_id)
.expect("can try to get user")
{
// Look up user details for the attendee's contact_id
if let Some(user) = user_collection
.get_by_id(attendee.contact_id)
.expect("can try to get user")
{
println!(
" - Attendee ID {}: {} (User: {}, Status: {:?})",
attendee_id, user.full_name, attendee.contact_id, attendee.status
);
}
println!(
" - User {}: {} (Status: {:?})",
attendee.contact_id, user.full_name, attendee.status
);
}
}
}

View File

@@ -1,26 +1,26 @@
use circles_launcher::{new_launcher};
use heromodels::models::circle::circle::{new_circle};
use secp256k1::{Secp256k1, SecretKey, PublicKey};
use circles_launcher::new_launcher;
use heromodels::models::circle::circle::new_circle;
use rand::rngs::OsRng;
use secp256k1::{PublicKey, Secp256k1, SecretKey};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Generate valid secp256k1 keypairs for testing
let secp = Secp256k1::new();
let mut rng = OsRng;
let secret_key1 = SecretKey::new(&mut rng);
let public_key1 = PublicKey::from_secret_key(&secp, &secret_key1);
let pk1_hex = hex::encode(public_key1.serialize());
let secret_key2 = SecretKey::new(&mut rng);
let public_key2 = PublicKey::from_secret_key(&secp, &secret_key2);
let pk2_hex = hex::encode(public_key2.serialize());
let secret_key3 = SecretKey::new(&mut rng);
let public_key3 = PublicKey::from_secret_key(&secp, &secret_key3);
let pk3_hex = hex::encode(public_key3.serialize());
println!("Generated test public keys:");
println!(" PK1: {}", pk1_hex);
println!(" PK2: {}", pk2_hex);
@@ -36,4 +36,4 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.save();
Ok(())
}
}

View File

@@ -0,0 +1,199 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Bid, BidStatus, BillingPeriod};
use heromodels::models::grid4::bid::bid_index::customer_id;
use heromodels_core::Model;
// Helper function to print bid details
fn print_bid_details(bid: &Bid) {
println!("\n--- Bid Details ---");
println!("ID: {}", bid.get_id());
println!("Customer ID: {}", bid.customer_id);
println!("Compute Slices: {}", bid.compute_slices_nr);
println!("Compute Slice Price: ${:.2}", bid.compute_slice_price);
println!("Storage Slices: {}", bid.storage_slices_nr);
println!("Storage Slice Price: ${:.2}", bid.storage_slice_price);
println!("Status: {:?}", bid.status);
println!("Obligation: {}", bid.obligation);
println!("Start Date: {}", bid.start_date);
println!("End Date: {}", bid.end_date);
println!("Billing Period: {:?}", bid.billing_period);
println!("Signature User: {}", bid.signature_user);
println!("Created At: {}", bid.base_data.created_at);
println!("Modified At: {}", bid.base_data.modified_at);
}
fn main() {
// Create a new DB instance in /tmp/grid4_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_db", true).expect("Can create DB");
println!("Grid4 Bid Models - Basic Usage Example");
println!("=====================================");
// Create bids with different configurations
// Bid 1 - Small compute request
let bid1 = Bid::new()
.customer_id(101)
.compute_slices_nr(4)
.compute_slice_price(0.05)
.storage_slices_nr(10)
.storage_slice_price(0.02)
.status(BidStatus::Pending)
.obligation(false)
.start_date(1640995200) // 2022-01-01
.end_date(1672531200) // 2023-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("sig_user_101_abc123".to_string());
// Bid 2 - Large compute request with obligation
let bid2 = Bid::new()
.customer_id(102)
.compute_slices_nr(16)
.compute_slice_price(0.04)
.storage_slices_nr(50)
.storage_slice_price(0.015)
.status(BidStatus::Confirmed)
.obligation(true)
.start_date(1640995200)
.end_date(1704067200) // 2024-01-01
.billing_period(BillingPeriod::Yearly)
.signature_user("sig_user_102_def456".to_string());
// Bid 3 - Storage-heavy request
let bid3 = Bid::new()
.customer_id(103)
.compute_slices_nr(2)
.compute_slice_price(0.06)
.storage_slices_nr(100)
.storage_slice_price(0.01)
.status(BidStatus::Assigned)
.obligation(true)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Hourly)
.signature_user("sig_user_103_ghi789".to_string());
// Bid 4 - Cancelled bid
let bid4 = Bid::new()
.customer_id(104)
.compute_slices_nr(8)
.compute_slice_price(0.055)
.storage_slices_nr(25)
.storage_slice_price(0.018)
.status(BidStatus::Cancelled)
.obligation(false)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Monthly)
.signature_user("sig_user_104_jkl012".to_string());
// Save all bids to database and get their assigned IDs and updated models
let (bid1_id, db_bid1) = db
.collection()
.expect("can open bid collection")
.set(&bid1)
.expect("can set bid");
let (bid2_id, db_bid2) = db
.collection()
.expect("can open bid collection")
.set(&bid2)
.expect("can set bid");
let (bid3_id, db_bid3) = db
.collection()
.expect("can open bid collection")
.set(&bid3)
.expect("can set bid");
let (bid4_id, db_bid4) = db
.collection()
.expect("can open bid collection")
.set(&bid4)
.expect("can set bid");
println!("Bid 1 assigned ID: {}", bid1_id);
println!("Bid 2 assigned ID: {}", bid2_id);
println!("Bid 3 assigned ID: {}", bid3_id);
println!("Bid 4 assigned ID: {}", bid4_id);
// Print all bids retrieved from database
println!("\n--- Bids Retrieved from Database ---");
println!("\n1. Small compute bid:");
print_bid_details(&db_bid1);
println!("\n2. Large compute bid with obligation:");
print_bid_details(&db_bid2);
println!("\n3. Storage-heavy bid:");
print_bid_details(&db_bid3);
println!("\n4. Cancelled bid:");
print_bid_details(&db_bid4);
// Demonstrate different ways to retrieve bids from the database
println!("\n--- Retrieving Bids by Different Methods ---");
println!("\n1. By Customer ID Index (Customer 102):");
let customer_bids = db
.collection::<Bid>()
.expect("can open bid collection")
.get::<customer_id, _>(&102u32)
.expect("can load bids by customer");
assert_eq!(customer_bids.len(), 1);
print_bid_details(&customer_bids[0]);
println!("\n2. Updating Bid Status:");
let mut updated_bid = db_bid1.clone();
updated_bid.status = BidStatus::Confirmed;
let (_, confirmed_bid) = db
.collection::<Bid>()
.expect("can open bid collection")
.set(&updated_bid)
.expect("can update bid");
println!("Updated bid status to Confirmed:");
print_bid_details(&confirmed_bid);
// 3. Delete a bid and show the updated results
println!("\n3. After Deleting a Bid:");
println!("Deleting bid with ID: {}", bid4_id);
db.collection::<Bid>()
.expect("can open bid collection")
.delete_by_id(bid4_id)
.expect("can delete existing bid");
// Show remaining bids
let all_bids = db
.collection::<Bid>()
.expect("can open bid collection")
.get_all()
.expect("can load all bids");
println!("Remaining bids count: {}", all_bids.len());
assert_eq!(all_bids.len(), 3);
// Calculate total compute and storage requested
println!("\n--- Bid Analytics ---");
let total_compute_slices: i32 = all_bids.iter().map(|b| b.compute_slices_nr).sum();
let total_storage_slices: i32 = all_bids.iter().map(|b| b.storage_slices_nr).sum();
let avg_compute_price: f64 = all_bids.iter().map(|b| b.compute_slice_price).sum::<f64>() / all_bids.len() as f64;
let avg_storage_price: f64 = all_bids.iter().map(|b| b.storage_slice_price).sum::<f64>() / all_bids.len() as f64;
println!("Total Compute Slices Requested: {}", total_compute_slices);
println!("Total Storage Slices Requested: {}", total_storage_slices);
println!("Average Compute Price: ${:.3}", avg_compute_price);
println!("Average Storage Price: ${:.3}", avg_storage_price);
// Count bids by status
let confirmed_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Confirmed)).count();
let assigned_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Assigned)).count();
let pending_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Pending)).count();
println!("\nBids by Status:");
println!(" Confirmed: {}", confirmed_count);
println!(" Assigned: {}", assigned_count);
println!(" Pending: {}", pending_count);
println!("\n--- Model Information ---");
println!("Bid DB Prefix: {}", Bid::db_prefix());
}

View File

@@ -0,0 +1,301 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Contract, ContractStatus};
use heromodels::models::grid4::contract::contract_index::customer_id;
use heromodels_core::Model;
// Helper function to print contract details
fn print_contract_details(contract: &Contract) {
println!("\n--- Contract Details ---");
println!("ID: {}", contract.get_id());
println!("Customer ID: {}", contract.customer_id);
println!("Compute Slices: {}", contract.compute_slices.len());
println!("Storage Slices: {}", contract.storage_slices.len());
println!("Compute Slice Price: ${:.2}", contract.compute_slice_price);
println!("Storage Slice Price: ${:.2}", contract.storage_slice_price);
println!("Network Slice Price: ${:.2}", contract.network_slice_price);
println!("Status: {:?}", contract.status);
println!("Start Date: {}", contract.start_date);
println!("End Date: {}", contract.end_date);
println!("Billing Period: {:?}", contract.billing_period);
println!("Signature User: {}", contract.signature_user);
println!("Signature Hoster: {}", contract.signature_hoster);
println!("Created At: {}", contract.base_data.created_at);
println!("Modified At: {}", contract.base_data.modified_at);
// Print compute slices details
if !contract.compute_slices.is_empty() {
println!(" Compute Slices:");
for (i, slice) in contract.compute_slices.iter().enumerate() {
println!(" {}. Node: {}, ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, Passmark: {}, vCores: {}",
i + 1, slice.node_id, slice.id, slice.mem_gb, slice.storage_gb, slice.passmark, slice.vcores);
}
}
// Print storage slices details
if !contract.storage_slices.is_empty() {
println!(" Storage Slices:");
for (i, slice) in contract.storage_slices.iter().enumerate() {
println!(" {}. Node: {}, ID: {}, Size: {}GB",
i + 1, slice.node_id, slice.id, slice.storage_size_gb);
}
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_contracts_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_contracts_db", true).expect("Can create DB");
println!("Grid4 Contract Models - Basic Usage Example");
println!("==========================================");
// Create compute slices for contracts
let compute_slice1 = ComputeSliceProvisioned::new()
.node_id(1001)
.id(1)
.mem_gb(2.0)
.storage_gb(20.0)
.passmark(2500)
.vcores(2)
.cpu_oversubscription(150)
.tags("web-server,production".to_string());
let compute_slice2 = ComputeSliceProvisioned::new()
.node_id(1002)
.id(2)
.mem_gb(4.0)
.storage_gb(40.0)
.passmark(5000)
.vcores(4)
.cpu_oversubscription(120)
.tags("database,high-performance".to_string());
let compute_slice3 = ComputeSliceProvisioned::new()
.node_id(1003)
.id(1)
.mem_gb(8.0)
.storage_gb(80.0)
.passmark(10000)
.vcores(8)
.cpu_oversubscription(100)
.tags("ml-training,gpu-enabled".to_string());
// Create storage slices for contracts
let storage_slice1 = StorageSliceProvisioned::new()
.node_id(2001)
.id(1)
.storage_size_gb(100)
.tags("backup,cold-storage".to_string());
let storage_slice2 = StorageSliceProvisioned::new()
.node_id(2002)
.id(2)
.storage_size_gb(500)
.tags("data-lake,analytics".to_string());
let storage_slice3 = StorageSliceProvisioned::new()
.node_id(2003)
.id(1)
.storage_size_gb(1000)
.tags("archive,long-term".to_string());
// Create contracts with different configurations
// Contract 1 - Small web hosting contract
let contract1 = Contract::new()
.customer_id(201)
.add_compute_slice(compute_slice1.clone())
.add_storage_slice(storage_slice1.clone())
.compute_slice_price(0.05)
.storage_slice_price(0.02)
.network_slice_price(0.01)
.status(ContractStatus::Active)
.start_date(1640995200) // 2022-01-01
.end_date(1672531200) // 2023-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("contract_user_201_abc123".to_string())
.signature_hoster("hoster_node1001_xyz789".to_string());
// Contract 2 - Database hosting contract
let contract2 = Contract::new()
.customer_id(202)
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice2.clone())
.compute_slice_price(0.04)
.storage_slice_price(0.015)
.network_slice_price(0.008)
.status(ContractStatus::Active)
.start_date(1640995200)
.end_date(1704067200) // 2024-01-01
.billing_period(BillingPeriod::Yearly)
.signature_user("contract_user_202_def456".to_string())
.signature_hoster("hoster_node1002_uvw123".to_string());
// Contract 3 - ML training contract (paused)
let contract3 = Contract::new()
.customer_id(203)
.add_compute_slice(compute_slice3.clone())
.add_storage_slice(storage_slice3.clone())
.compute_slice_price(0.08)
.storage_slice_price(0.01)
.network_slice_price(0.015)
.status(ContractStatus::Paused)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Hourly)
.signature_user("contract_user_203_ghi789".to_string())
.signature_hoster("hoster_node1003_rst456".to_string());
// Contract 4 - Multi-slice enterprise contract
let contract4 = Contract::new()
.customer_id(204)
.add_compute_slice(compute_slice1.clone())
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.compute_slice_price(0.045)
.storage_slice_price(0.018)
.network_slice_price(0.012)
.status(ContractStatus::Active)
.start_date(1640995200)
.end_date(1735689600) // 2025-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("contract_user_204_jkl012".to_string())
.signature_hoster("hoster_enterprise_mno345".to_string());
// Save all contracts to database and get their assigned IDs and updated models
let (contract1_id, db_contract1) = db
.collection()
.expect("can open contract collection")
.set(&contract1)
.expect("can set contract");
let (contract2_id, db_contract2) = db
.collection()
.expect("can open contract collection")
.set(&contract2)
.expect("can set contract");
let (contract3_id, db_contract3) = db
.collection()
.expect("can open contract collection")
.set(&contract3)
.expect("can set contract");
let (contract4_id, db_contract4) = db
.collection()
.expect("can open contract collection")
.set(&contract4)
.expect("can set contract");
println!("Contract 1 assigned ID: {}", contract1_id);
println!("Contract 2 assigned ID: {}", contract2_id);
println!("Contract 3 assigned ID: {}", contract3_id);
println!("Contract 4 assigned ID: {}", contract4_id);
// Print all contracts retrieved from database
println!("\n--- Contracts Retrieved from Database ---");
println!("\n1. Web hosting contract:");
print_contract_details(&db_contract1);
println!("\n2. Database hosting contract:");
print_contract_details(&db_contract2);
println!("\n3. ML training contract (paused):");
print_contract_details(&db_contract3);
println!("\n4. Enterprise multi-slice contract:");
print_contract_details(&db_contract4);
// Demonstrate different ways to retrieve contracts from the database
// 1. Retrieve by customer ID index
println!("\n--- Retrieving Contracts by Different Methods ---");
println!("\n1. By Customer ID Index (Customer 202):");
let customer_contracts = db
.collection::<Contract>()
.expect("can open contract collection")
.get::<customer_id, _>(&202u32)
.expect("can load contracts by customer");
assert_eq!(customer_contracts.len(), 1);
print_contract_details(&customer_contracts[0]);
// 2. Update contract status
println!("\n2. Resuming Paused Contract:");
let mut updated_contract = db_contract3.clone();
updated_contract.status = ContractStatus::Active;
let (_, resumed_contract) = db
.collection::<Contract>()
.expect("can open contract collection")
.set(&updated_contract)
.expect("can update contract");
println!("Updated contract status to Active:");
print_contract_details(&resumed_contract);
// 3. Cancel a contract
println!("\n3. Cancelling a Contract:");
let mut cancelled_contract = db_contract1.clone();
cancelled_contract.status = ContractStatus::Cancelled;
let (_, final_contract) = db
.collection::<Contract>()
.expect("can open contract collection")
.set(&cancelled_contract)
.expect("can update contract");
println!("Cancelled contract:");
print_contract_details(&final_contract);
// Show remaining active contracts
let all_contracts = db
.collection::<Contract>()
.expect("can open contract collection")
.get_all()
.expect("can load all contracts");
println!("\n--- Contract Analytics ---");
let active_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Active))
.collect();
let paused_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Paused))
.collect();
let cancelled_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Cancelled))
.collect();
println!("Total Contracts: {}", all_contracts.len());
println!("Active Contracts: {}", active_contracts.len());
println!("Paused Contracts: {}", paused_contracts.len());
println!("Cancelled Contracts: {}", cancelled_contracts.len());
// Calculate total provisioned resources
let total_compute_slices: usize = all_contracts.iter().map(|c| c.compute_slices.len()).sum();
let total_storage_slices: usize = all_contracts.iter().map(|c| c.storage_slices.len()).sum();
let total_memory_gb: f64 = all_contracts.iter()
.flat_map(|c| &c.compute_slices)
.map(|s| s.mem_gb)
.sum();
let total_storage_gb: i32 = all_contracts.iter()
.flat_map(|c| &c.storage_slices)
.map(|s| s.storage_size_gb)
.sum();
println!("\nProvisioned Resources:");
println!(" Total Compute Slices: {}", total_compute_slices);
println!(" Total Storage Slices: {}", total_storage_slices);
println!(" Total Memory: {:.1} GB", total_memory_gb);
println!(" Total Storage: {} GB", total_storage_gb);
// Calculate average pricing
let avg_compute_price: f64 = all_contracts.iter().map(|c| c.compute_slice_price).sum::<f64>() / all_contracts.len() as f64;
let avg_storage_price: f64 = all_contracts.iter().map(|c| c.storage_slice_price).sum::<f64>() / all_contracts.len() as f64;
let avg_network_price: f64 = all_contracts.iter().map(|c| c.network_slice_price).sum::<f64>() / all_contracts.len() as f64;
println!("\nAverage Pricing:");
println!(" Compute: ${:.3} per slice", avg_compute_price);
println!(" Storage: ${:.3} per slice", avg_storage_price);
println!(" Network: ${:.3} per slice", avg_network_price);
println!("\n--- Model Information ---");
println!("Contract DB Prefix: {}", Contract::db_prefix());
}

View File

@@ -0,0 +1,12 @@
# Grid4 Node Example (OurDB)
This example demonstrates how to use the Grid4 `Node` model against the embedded OurDB backend.
- Creates an in-memory/on-disk OurDB under `/tmp`.
- Demonstrates CRUD and simple index lookups on `country`, `nodegroupid`, and `pubkey`.
Run it:
```bash
cargo run -p heromodels --example grid4_example
```

View File

@@ -0,0 +1,66 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use std::sync::Arc;
fn main() {
// Create a temp OurDB
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let path = format!("/tmp/grid4_example_{}", ts);
let _ = std::fs::remove_dir_all(&path);
let db = Arc::new(OurDB::new(&path, true).expect("create OurDB"));
let nodes = db.collection::<Node>().expect("open node collection");
// Build a node
let cs = ComputeSlice::new()
.nodeid(1)
.slice_id(1)
.mem_gb(64.0)
.storage_gb(1024.0)
.passmark(8000)
.vcores(24)
.gpus(2)
.price_cc(0.5);
let dev = DeviceInfo {
vendor: "ACME".into(),
..Default::default()
};
let n = Node::new()
.nodegroupid(7)
.uptime(98)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("PUB_NODE_X")
.build();
// Store
let (id, stored) = nodes.set(&n).expect("store node");
println!("Stored node id={id} pubkey={} country={}", stored.pubkey, stored.country);
// Query by indexes
let by_country = nodes.get::<country, _>("BE").expect("query country");
println!("Found {} nodes in country=BE", by_country.len());
let by_group = nodes.get::<nodegroupid, _>(&7).expect("query group");
println!("Found {} nodes in group=7", by_group.len());
let by_key = nodes.get::<pubkey, _>("PUB_NODE_X").expect("query pubkey");
println!("Found {} with pubkey PUB_NODE_X", by_key.len());
// Update
let updated = stored.clone().country("NL");
let (_, back) = nodes.set(&updated).expect("update node");
println!("Updated node country={}", back.country);
// Delete
nodes.delete_by_id(id).expect("delete node");
println!("Deleted node id={id}");
}

View File

@@ -0,0 +1,390 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Node, NodeDevice, ComputeSlice, StorageSlice};
use heromodels::models::grid4::node::node_index::{nodegroupid, country};
use heromodels_core::Model;
// Helper function to print node details
fn print_node_details(node: &Node) {
println!("\n--- Node Details ---");
println!("ID: {}", node.get_id());
println!("NodeGroup ID: {}", node.nodegroupid);
println!("Uptime: {}%", node.uptime);
println!("Country: {}", node.country);
println!("Birth Time: {}", node.birthtime);
println!("Public Key: {}", node.pubkey);
println!("Compute Slices: {}", node.computeslices.len());
println!("Storage Slices: {}", node.storageslices.len());
println!("Created At: {}", node.base_data.created_at);
println!("Modified At: {}", node.base_data.modified_at);
// Print capacity details
println!(" Capacity:");
println!(" Storage: {:.1} GB", node.capacity.storage_gb);
println!(" Memory: {:.1} GB", node.capacity.mem_gb);
println!(" GPU Memory: {:.1} GB", node.capacity.mem_gb_gpu);
println!(" Passmark: {}", node.capacity.passmark);
println!(" vCores: {}", node.capacity.vcores);
// Print device info
println!(" Devices:");
println!(" Vendor: {}", node.devices.vendor);
println!(" CPUs: {}", node.devices.cpu.len());
println!(" GPUs: {}", node.devices.gpu.len());
println!(" Memory: {}", node.devices.memory.len());
println!(" Storage: {}", node.devices.storage.len());
println!(" Network: {}", node.devices.network.len());
// Print compute slices
if !node.computeslices.is_empty() {
println!(" Compute Slices:");
for (i, slice) in node.computeslices.iter().enumerate() {
println!(" {}. ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, vCores: {}, GPUs: {}",
i + 1, slice.id, slice.mem_gb, slice.storage_gb, slice.vcores, slice.gpus);
}
}
// Print storage slices
if !node.storageslices.is_empty() {
println!(" Storage Slices:");
for (i, slice) in node.storageslices.iter().enumerate() {
println!(" {}. ID: {}", i + 1, slice.id);
}
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_nodes_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodes_db", true).expect("Can create DB");
println!("Grid4 Node Models - Basic Usage Example");
println!("======================================");
// Create device components for nodes
// CPU devices
let cpu1 = CPUDevice {
id: "cpu_intel_i7_12700k".to_string(),
cores: 12,
passmark: 28500,
description: "Intel Core i7-12700K".to_string(),
cpu_brand: "Intel".to_string(),
cpu_version: "12th Gen".to_string(),
};
let cpu2 = CPUDevice {
id: "cpu_amd_ryzen_9_5900x".to_string(),
cores: 12,
passmark: 32000,
description: "AMD Ryzen 9 5900X".to_string(),
cpu_brand: "AMD".to_string(),
cpu_version: "Zen 3".to_string(),
};
// GPU devices
let gpu1 = GPUDevice {
id: "gpu_rtx_3080".to_string(),
cores: 8704,
memory_gb: 10.0,
description: "NVIDIA GeForce RTX 3080".to_string(),
gpu_brand: "NVIDIA".to_string(),
gpu_version: "RTX 30 Series".to_string(),
};
let gpu2 = GPUDevice {
id: "gpu_rtx_4090".to_string(),
cores: 16384,
memory_gb: 24.0,
description: "NVIDIA GeForce RTX 4090".to_string(),
gpu_brand: "NVIDIA".to_string(),
gpu_version: "RTX 40 Series".to_string(),
};
// Memory devices
let memory1 = MemoryDevice {
id: "mem_ddr4_32gb".to_string(),
size_gb: 32.0,
description: "DDR4-3200 32GB Kit".to_string(),
};
let memory2 = MemoryDevice {
id: "mem_ddr5_64gb".to_string(),
size_gb: 64.0,
description: "DDR5-5600 64GB Kit".to_string(),
};
// Storage devices
let storage1 = StorageDevice {
id: "ssd_nvme_1tb".to_string(),
size_gb: 1000.0,
description: "NVMe SSD 1TB".to_string(),
};
let storage2 = StorageDevice {
id: "hdd_sata_4tb".to_string(),
size_gb: 4000.0,
description: "SATA HDD 4TB".to_string(),
};
// Network devices
let network1 = NetworkDevice {
id: "eth_1gbit".to_string(),
speed_mbps: 1000,
description: "Gigabit Ethernet".to_string(),
};
let network2 = NetworkDevice {
id: "eth_10gbit".to_string(),
speed_mbps: 10000,
description: "10 Gigabit Ethernet".to_string(),
};
// Create device info configurations
let devices1 = DeviceInfo {
vendor: "Dell".to_string(),
cpu: vec![cpu1.clone()],
gpu: vec![gpu1.clone()],
memory: vec![memory1.clone()],
storage: vec![storage1.clone(), storage2.clone()],
network: vec![network1.clone()],
};
let devices2 = DeviceInfo {
vendor: "HP".to_string(),
cpu: vec![cpu2.clone()],
gpu: vec![gpu2.clone()],
memory: vec![memory2.clone()],
storage: vec![storage1.clone()],
network: vec![network2.clone()],
};
// Create node capacities
let capacity1 = NodeCapacity {
storage_gb: 5000.0,
mem_gb: 32.0,
mem_gb_gpu: 10.0,
passmark: 28500,
vcores: 24,
};
let capacity2 = NodeCapacity {
storage_gb: 1000.0,
mem_gb: 64.0,
mem_gb_gpu: 24.0,
passmark: 32000,
vcores: 24,
};
// Create compute slices
let compute_slice1 = ComputeSlice::new()
.id(1)
.mem_gb(4.0)
.storage_gb(100.0)
.passmark(3000)
.vcores(2)
.cpu_oversubscription(150)
.storage_oversubscription(120)
.gpus(0);
let compute_slice2 = ComputeSlice::new()
.id(2)
.mem_gb(8.0)
.storage_gb(200.0)
.passmark(6000)
.vcores(4)
.cpu_oversubscription(130)
.storage_oversubscription(110)
.gpus(1);
let compute_slice3 = ComputeSlice::new()
.id(1)
.mem_gb(16.0)
.storage_gb(400.0)
.passmark(12000)
.vcores(8)
.cpu_oversubscription(110)
.storage_oversubscription(100)
.gpus(1);
// Create storage slices
let storage_slice1 = StorageSlice::new().id(1);
let storage_slice2 = StorageSlice::new().id(2);
let storage_slice3 = StorageSlice::new().id(3);
// Create nodes with different configurations
// Node 1 - Web hosting node
let node1 = Node::new()
.nodegroupid(1001)
.uptime(98)
.add_compute_slice(compute_slice1.clone())
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.devices(devices1.clone())
.country("US".to_string())
.capacity(capacity1.clone())
.birthtime(1640995200) // 2022-01-01
.pubkey("node1_pubkey_abc123xyz789".to_string())
.signature_node("node1_signature_def456".to_string())
.signature_farmer("farmer1_signature_ghi789".to_string());
// Node 2 - High-performance computing node
let node2 = Node::new()
.nodegroupid(1002)
.uptime(99)
.add_compute_slice(compute_slice3.clone())
.add_storage_slice(storage_slice3.clone())
.devices(devices2.clone())
.country("DE".to_string())
.capacity(capacity2.clone())
.birthtime(1672531200) // 2023-01-01
.pubkey("node2_pubkey_jkl012mno345".to_string())
.signature_node("node2_signature_pqr678".to_string())
.signature_farmer("farmer2_signature_stu901".to_string());
// Node 3 - Storage-focused node
let node3 = Node::new()
.nodegroupid(1001)
.uptime(95)
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.add_storage_slice(storage_slice3.clone())
.devices(devices1.clone())
.country("NL".to_string())
.capacity(capacity1.clone())
.birthtime(1704067200) // 2024-01-01
.pubkey("node3_pubkey_vwx234yzab567".to_string())
.signature_node("node3_signature_cde890".to_string())
.signature_farmer("farmer1_signature_fgh123".to_string());
// Save all nodes to database and get their assigned IDs and updated models
let (node1_id, db_node1) = db
.collection()
.expect("can open node collection")
.set(&node1)
.expect("can set node");
let (node2_id, db_node2) = db
.collection()
.expect("can open node collection")
.set(&node2)
.expect("can set node");
let (node3_id, db_node3) = db
.collection()
.expect("can open node collection")
.set(&node3)
.expect("can set node");
println!("Node 1 assigned ID: {}", node1_id);
println!("Node 2 assigned ID: {}", node2_id);
println!("Node 3 assigned ID: {}", node3_id);
// Print all nodes retrieved from database
println!("\n--- Nodes Retrieved from Database ---");
println!("\n1. Web hosting node:");
print_node_details(&db_node1);
println!("\n2. High-performance computing node:");
print_node_details(&db_node2);
println!("\n3. Storage-focused node:");
print_node_details(&db_node3);
// Demonstrate different ways to retrieve nodes from the database
// 1. Retrieve by nodegroup ID index
println!("\n--- Retrieving Nodes by Different Methods ---");
println!("\n1. By NodeGroup ID Index (NodeGroup 1001):");
let nodegroup_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<nodegroupid, _>(&1001i32)
.expect("can load nodes by nodegroup");
assert_eq!(nodegroup_nodes.len(), 2);
for (i, node) in nodegroup_nodes.iter().enumerate() {
println!(" Node {}: ID {}, Country: {}, Uptime: {}%",
i + 1, node.get_id(), node.country, node.uptime);
}
// 2. Retrieve by country index
println!("\n2. By Country Index (Germany - DE):");
let country_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<country, _>("DE")
.expect("can load nodes by country");
assert_eq!(country_nodes.len(), 1);
print_node_details(&country_nodes[0]);
// 3. Update node uptime
println!("\n3. Updating Node Uptime:");
let mut updated_node = db_node1.clone();
updated_node.uptime = 99;
let (_, uptime_updated_node) = db
.collection::<Node>()
.expect("can open node collection")
.set(&updated_node)
.expect("can update node");
println!("Updated node uptime to 99%:");
println!(" Node ID: {}, New Uptime: {}%", uptime_updated_node.get_id(), uptime_updated_node.uptime);
// Show all nodes and calculate analytics
let all_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get_all()
.expect("can load all nodes");
println!("\n--- Node Analytics ---");
println!("Total Nodes: {}", all_nodes.len());
// Calculate total capacity
let total_storage_gb: f64 = all_nodes.iter().map(|n| n.capacity.storage_gb).sum();
let total_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb).sum();
let total_gpu_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb_gpu).sum();
let total_vcores: i32 = all_nodes.iter().map(|n| n.capacity.vcores).sum();
let avg_uptime: f64 = all_nodes.iter().map(|n| n.uptime as f64).sum::<f64>() / all_nodes.len() as f64;
println!("Total Capacity:");
println!(" Storage: {:.1} GB", total_storage_gb);
println!(" Memory: {:.1} GB", total_memory_gb);
println!(" GPU Memory: {:.1} GB", total_gpu_memory_gb);
println!(" vCores: {}", total_vcores);
println!(" Average Uptime: {:.1}%", avg_uptime);
// Count nodes by country
let mut country_counts = std::collections::HashMap::new();
for node in &all_nodes {
*country_counts.entry(&node.country).or_insert(0) += 1;
}
println!("\nNodes by Country:");
for (country, count) in country_counts {
println!(" {}: {}", country, count);
}
// Count total slices
let total_compute_slices: usize = all_nodes.iter().map(|n| n.computeslices.len()).sum();
let total_storage_slices: usize = all_nodes.iter().map(|n| n.storageslices.len()).sum();
println!("\nTotal Slices:");
println!(" Compute Slices: {}", total_compute_slices);
println!(" Storage Slices: {}", total_storage_slices);
// Vendor distribution
let mut vendor_counts = std::collections::HashMap::new();
for node in &all_nodes {
*vendor_counts.entry(&node.devices.vendor).or_insert(0) += 1;
}
println!("\nNodes by Vendor:");
for (vendor, count) in vendor_counts {
println!(" {}: {}", vendor, count);
}
println!("\n--- Model Information ---");
println!("Node DB Prefix: {}", Node::db_prefix());
}

View File

@@ -0,0 +1,284 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{NodeGroup, PricingPolicy, SLAPolicy};
use heromodels_core::Model;
// Helper function to print nodegroup details
fn print_nodegroup_details(nodegroup: &NodeGroup) {
println!("\n--- NodeGroup Details ---");
println!("ID: {}", nodegroup.get_id());
println!("Farmer ID: {}", nodegroup.farmerid);
println!("Description: {}", nodegroup.description);
println!("Secret: {}", nodegroup.secret);
println!("Compute Slice Pricing (CC): {:.4}", nodegroup.compute_slice_normalized_pricing_cc);
println!("Storage Slice Pricing (CC): {:.4}", nodegroup.storage_slice_normalized_pricing_cc);
println!("Signature Farmer: {}", nodegroup.signature_farmer);
println!("Created At: {}", nodegroup.base_data.created_at);
println!("Modified At: {}", nodegroup.base_data.modified_at);
// Print SLA Policy details
println!(" SLA Policy:");
println!(" Uptime: {}%", nodegroup.slapolicy.sla_uptime);
println!(" Bandwidth: {} Mbit/s", nodegroup.slapolicy.sla_bandwidth_mbit);
println!(" Penalty: {}%", nodegroup.slapolicy.sla_penalty);
// Print Pricing Policy details
println!(" Pricing Policy:");
println!(" Marketplace Year Discounts: {:?}%", nodegroup.pricingpolicy.marketplace_year_discounts);
}
fn main() {
// Create a new DB instance in /tmp/grid4_nodegroups_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodegroups_db", true).expect("Can create DB");
println!("Grid4 NodeGroup Models - Basic Usage Example");
println!("===========================================");
// Create SLA policies
let sla_policy_premium = SLAPolicy {
sla_uptime: 99,
sla_bandwidth_mbit: 1000,
sla_penalty: 200,
};
let sla_policy_standard = SLAPolicy {
sla_uptime: 95,
sla_bandwidth_mbit: 100,
sla_penalty: 100,
};
let sla_policy_basic = SLAPolicy {
sla_uptime: 90,
sla_bandwidth_mbit: 50,
sla_penalty: 50,
};
// Create pricing policies
let pricing_policy_aggressive = PricingPolicy {
marketplace_year_discounts: vec![40, 50, 60],
};
let pricing_policy_standard = PricingPolicy {
marketplace_year_discounts: vec![30, 40, 50],
};
let pricing_policy_conservative = PricingPolicy {
marketplace_year_discounts: vec![20, 30, 40],
};
// Create nodegroups with different configurations
// NodeGroup 1 - Premium hosting provider
let nodegroup1 = NodeGroup::new()
.farmerid(501)
.secret("encrypted_boot_secret_premium_abc123".to_string())
.description("Premium hosting with 99% uptime SLA and high-speed connectivity".to_string())
.slapolicy(sla_policy_premium.clone())
.pricingpolicy(pricing_policy_aggressive.clone())
.compute_slice_normalized_pricing_cc(0.0450)
.storage_slice_normalized_pricing_cc(0.0180)
.signature_farmer("farmer_501_premium_signature_xyz789".to_string());
// NodeGroup 2 - Standard business provider
let nodegroup2 = NodeGroup::new()
.farmerid(502)
.secret("encrypted_boot_secret_standard_def456".to_string())
.description("Standard business hosting with reliable performance".to_string())
.slapolicy(sla_policy_standard.clone())
.pricingpolicy(pricing_policy_standard.clone())
.compute_slice_normalized_pricing_cc(0.0350)
.storage_slice_normalized_pricing_cc(0.0150)
.signature_farmer("farmer_502_standard_signature_uvw012".to_string());
// NodeGroup 3 - Budget-friendly provider
let nodegroup3 = NodeGroup::new()
.farmerid(503)
.secret("encrypted_boot_secret_budget_ghi789".to_string())
.description("Cost-effective hosting for development and testing".to_string())
.slapolicy(sla_policy_basic.clone())
.pricingpolicy(pricing_policy_conservative.clone())
.compute_slice_normalized_pricing_cc(0.0250)
.storage_slice_normalized_pricing_cc(0.0120)
.signature_farmer("farmer_503_budget_signature_rst345".to_string());
// NodeGroup 4 - Enterprise provider
let nodegroup4 = NodeGroup::new()
.farmerid(504)
.secret("encrypted_boot_secret_enterprise_jkl012".to_string())
.description("Enterprise-grade infrastructure with maximum reliability".to_string())
.slapolicy(sla_policy_premium.clone())
.pricingpolicy(pricing_policy_standard.clone())
.compute_slice_normalized_pricing_cc(0.0500)
.storage_slice_normalized_pricing_cc(0.0200)
.signature_farmer("farmer_504_enterprise_signature_mno678".to_string());
// Save all nodegroups to database and get their assigned IDs and updated models
let (nodegroup1_id, db_nodegroup1) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup1)
.expect("can set nodegroup");
let (nodegroup2_id, db_nodegroup2) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup2)
.expect("can set nodegroup");
let (nodegroup3_id, db_nodegroup3) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup3)
.expect("can set nodegroup");
let (nodegroup4_id, db_nodegroup4) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup4)
.expect("can set nodegroup");
println!("NodeGroup 1 assigned ID: {}", nodegroup1_id);
println!("NodeGroup 2 assigned ID: {}", nodegroup2_id);
println!("NodeGroup 3 assigned ID: {}", nodegroup3_id);
println!("NodeGroup 4 assigned ID: {}", nodegroup4_id);
// Print all nodegroups retrieved from database
println!("\n--- NodeGroups Retrieved from Database ---");
println!("\n1. Premium hosting provider:");
print_nodegroup_details(&db_nodegroup1);
println!("\n2. Standard business provider:");
print_nodegroup_details(&db_nodegroup2);
println!("\n3. Budget-friendly provider:");
print_nodegroup_details(&db_nodegroup3);
println!("\n4. Enterprise provider:");
print_nodegroup_details(&db_nodegroup4);
// Demonstrate different ways to retrieve nodegroups from the database
// 1. Retrieve by farmer ID index
println!("\n--- Retrieving NodeGroups by Different Methods ---");
println!("\n1. By Farmer ID Index (Farmer 502):");
let farmer_nodegroups = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.get_by_index("farmerid", &502u32)
.expect("can load nodegroups by farmer");
assert_eq!(farmer_nodegroups.len(), 1);
print_nodegroup_details(&farmer_nodegroups[0]);
// 2. Update nodegroup pricing
println!("\n2. Updating NodeGroup Pricing:");
let mut updated_nodegroup = db_nodegroup3.clone();
updated_nodegroup.compute_slice_normalized_pricing_cc = 0.0280;
updated_nodegroup.storage_slice_normalized_pricing_cc = 0.0130;
let (_, price_updated_nodegroup) = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.set(&updated_nodegroup)
.expect("can update nodegroup");
println!("Updated pricing for budget provider:");
println!(" Compute: {:.4} CC", price_updated_nodegroup.compute_slice_normalized_pricing_cc);
println!(" Storage: {:.4} CC", price_updated_nodegroup.storage_slice_normalized_pricing_cc);
// 3. Update SLA policy
println!("\n3. Updating SLA Policy:");
let mut sla_updated_nodegroup = db_nodegroup2.clone();
sla_updated_nodegroup.slapolicy.sla_uptime = 98;
sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit = 500;
let (_, sla_updated_nodegroup) = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.set(&sla_updated_nodegroup)
.expect("can update nodegroup");
println!("Updated SLA policy for standard provider:");
println!(" Uptime: {}%", sla_updated_nodegroup.slapolicy.sla_uptime);
println!(" Bandwidth: {} Mbit/s", sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit);
// Show all nodegroups and calculate analytics
let all_nodegroups = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.get_all()
.expect("can load all nodegroups");
println!("\n--- NodeGroup Analytics ---");
println!("Total NodeGroups: {}", all_nodegroups.len());
// Calculate pricing statistics
let avg_compute_price: f64 = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_storage_price: f64 = all_nodegroups.iter()
.map(|ng| ng.storage_slice_normalized_pricing_cc)
.sum::<f64>() / all_nodegroups.len() as f64;
let min_compute_price = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.fold(f64::INFINITY, f64::min);
let max_compute_price = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.fold(f64::NEG_INFINITY, f64::max);
println!("Pricing Statistics:");
println!(" Average Compute Price: {:.4} CC", avg_compute_price);
println!(" Average Storage Price: {:.4} CC", avg_storage_price);
println!(" Compute Price Range: {:.4} - {:.4} CC", min_compute_price, max_compute_price);
// Calculate SLA statistics
let avg_uptime: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_uptime as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_bandwidth: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_bandwidth_mbit as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_penalty: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_penalty as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
println!("\nSLA Statistics:");
println!(" Average Uptime Guarantee: {:.1}%", avg_uptime);
println!(" Average Bandwidth Guarantee: {:.0} Mbit/s", avg_bandwidth);
println!(" Average Penalty Rate: {:.0}%", avg_penalty);
// Count farmers
let unique_farmers: std::collections::HashSet<_> = all_nodegroups.iter()
.map(|ng| ng.farmerid)
.collect();
println!("\nFarmer Statistics:");
println!(" Unique Farmers: {}", unique_farmers.len());
println!(" NodeGroups per Farmer: {:.1}", all_nodegroups.len() as f64 / unique_farmers.len() as f64);
// Analyze discount policies
let total_discount_tiers: usize = all_nodegroups.iter()
.map(|ng| ng.pricingpolicy.marketplace_year_discounts.len())
.sum();
let avg_discount_tiers: f64 = total_discount_tiers as f64 / all_nodegroups.len() as f64;
println!("\nDiscount Policy Statistics:");
println!(" Average Discount Tiers: {:.1}", avg_discount_tiers);
// Find best value providers (high SLA, low price)
println!("\n--- Provider Rankings ---");
let mut providers_with_scores: Vec<_> = all_nodegroups.iter()
.map(|ng| {
let value_score = (ng.slapolicy.sla_uptime as f64) / ng.compute_slice_normalized_pricing_cc;
(ng, value_score)
})
.collect();
providers_with_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
println!("Best Value Providers (Uptime/Price ratio):");
for (i, (ng, score)) in providers_with_scores.iter().enumerate() {
println!(" {}. Farmer {}: {:.0} ({}% uptime, {:.4} CC)",
i + 1, ng.farmerid, score, ng.slapolicy.sla_uptime, ng.compute_slice_normalized_pricing_cc);
}
println!("\n--- Model Information ---");
println!("NodeGroup DB Prefix: {}", NodeGroup::db_prefix());
}

View File

@@ -0,0 +1,311 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{NodeGroupReputation, NodeReputation};
use heromodels_core::Model;
// Helper function to print nodegroup reputation details
fn print_nodegroup_reputation_details(reputation: &NodeGroupReputation) {
println!("\n--- NodeGroup Reputation Details ---");
println!("ID: {}", reputation.get_id());
println!("NodeGroup ID: {}", reputation.nodegroup_id);
println!("Reputation Score: {}/100", reputation.reputation);
println!("Uptime: {}%", reputation.uptime);
println!("Node Count: {}", reputation.nodes.len());
println!("Created At: {}", reputation.base_data.created_at);
println!("Modified At: {}", reputation.base_data.modified_at);
// Print individual node reputations
if !reputation.nodes.is_empty() {
println!(" Individual Node Reputations:");
for (i, node_rep) in reputation.nodes.iter().enumerate() {
println!(" {}. Node {}: Reputation {}/100, Uptime {}%",
i + 1, node_rep.node_id, node_rep.reputation, node_rep.uptime);
}
// Calculate average node reputation and uptime
let avg_node_reputation: f64 = reputation.nodes.iter()
.map(|n| n.reputation as f64)
.sum::<f64>() / reputation.nodes.len() as f64;
let avg_node_uptime: f64 = reputation.nodes.iter()
.map(|n| n.uptime as f64)
.sum::<f64>() / reputation.nodes.len() as f64;
println!(" Average Node Reputation: {:.1}/100", avg_node_reputation);
println!(" Average Node Uptime: {:.1}%", avg_node_uptime);
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_reputation_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_reputation_db", true).expect("Can create DB");
println!("Grid4 Reputation Models - Basic Usage Example");
println!("============================================");
// Create individual node reputations
// High-performing nodes
let node_rep1 = NodeReputation::new()
.node_id(1001)
.reputation(85)
.uptime(99);
let node_rep2 = NodeReputation::new()
.node_id(1002)
.reputation(92)
.uptime(98);
let node_rep3 = NodeReputation::new()
.node_id(1003)
.reputation(78)
.uptime(97);
// Medium-performing nodes
let node_rep4 = NodeReputation::new()
.node_id(2001)
.reputation(65)
.uptime(94);
let node_rep5 = NodeReputation::new()
.node_id(2002)
.reputation(72)
.uptime(96);
// Lower-performing nodes
let node_rep6 = NodeReputation::new()
.node_id(3001)
.reputation(45)
.uptime(88);
let node_rep7 = NodeReputation::new()
.node_id(3002)
.reputation(38)
.uptime(85);
// New nodes with default reputation
let node_rep8 = NodeReputation::new()
.node_id(4001)
.reputation(50) // default
.uptime(0); // just started
let node_rep9 = NodeReputation::new()
.node_id(4002)
.reputation(50) // default
.uptime(0); // just started
// Create nodegroup reputations with different performance profiles
// NodeGroup 1 - High-performance provider
let nodegroup_rep1 = NodeGroupReputation::new()
.nodegroup_id(1001)
.reputation(85) // high reputation earned over time
.uptime(98) // excellent uptime
.add_node_reputation(node_rep1.clone())
.add_node_reputation(node_rep2.clone())
.add_node_reputation(node_rep3.clone());
// NodeGroup 2 - Medium-performance provider
let nodegroup_rep2 = NodeGroupReputation::new()
.nodegroup_id(1002)
.reputation(68) // decent reputation
.uptime(95) // good uptime
.add_node_reputation(node_rep4.clone())
.add_node_reputation(node_rep5.clone());
// NodeGroup 3 - Struggling provider
let nodegroup_rep3 = NodeGroupReputation::new()
.nodegroup_id(1003)
.reputation(42) // below average reputation
.uptime(87) // poor uptime
.add_node_reputation(node_rep6.clone())
.add_node_reputation(node_rep7.clone());
// NodeGroup 4 - New provider (default reputation)
let nodegroup_rep4 = NodeGroupReputation::new()
.nodegroup_id(1004)
.reputation(50) // default starting reputation
.uptime(0) // no history yet
.add_node_reputation(node_rep8.clone())
.add_node_reputation(node_rep9.clone());
// Save all nodegroup reputations to database and get their assigned IDs and updated models
let (rep1_id, db_rep1) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep1)
.expect("can set reputation");
let (rep2_id, db_rep2) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep2)
.expect("can set reputation");
let (rep3_id, db_rep3) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep3)
.expect("can set reputation");
let (rep4_id, db_rep4) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep4)
.expect("can set reputation");
println!("NodeGroup Reputation 1 assigned ID: {}", rep1_id);
println!("NodeGroup Reputation 2 assigned ID: {}", rep2_id);
println!("NodeGroup Reputation 3 assigned ID: {}", rep3_id);
println!("NodeGroup Reputation 4 assigned ID: {}", rep4_id);
// Print all reputation records retrieved from database
println!("\n--- Reputation Records Retrieved from Database ---");
println!("\n1. High-performance provider:");
print_nodegroup_reputation_details(&db_rep1);
println!("\n2. Medium-performance provider:");
print_nodegroup_reputation_details(&db_rep2);
println!("\n3. Struggling provider:");
print_nodegroup_reputation_details(&db_rep3);
println!("\n4. New provider:");
print_nodegroup_reputation_details(&db_rep4);
// Demonstrate different ways to retrieve reputation records from the database
// 1. Retrieve by nodegroup ID index
println!("\n--- Retrieving Reputation by Different Methods ---");
println!("\n1. By NodeGroup ID Index (NodeGroup 1002):");
let nodegroup_reps = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.get_by_index("nodegroup_id", &1002u32)
.expect("can load reputation by nodegroup");
assert_eq!(nodegroup_reps.len(), 1);
print_nodegroup_reputation_details(&nodegroup_reps[0]);
// 2. Update reputation scores (simulate performance improvement)
println!("\n2. Updating Reputation Scores (Performance Improvement):");
let mut improved_rep = db_rep3.clone();
improved_rep.reputation = 55; // improved from 42
improved_rep.uptime = 92; // improved from 87
// Also improve individual node reputations
for node_rep in &mut improved_rep.nodes {
node_rep.reputation += 10; // boost each node's reputation
node_rep.uptime += 5; // improve uptime
}
let (_, updated_rep) = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.set(&improved_rep)
.expect("can update reputation");
println!("Updated reputation for struggling provider:");
print_nodegroup_reputation_details(&updated_rep);
// 3. Add new node to existing nodegroup reputation
println!("\n3. Adding New Node to Existing NodeGroup:");
let new_node_rep = NodeReputation::new()
.node_id(1004)
.reputation(88)
.uptime(99);
let mut expanded_rep = db_rep1.clone();
expanded_rep.add_node_reputation(new_node_rep);
// Recalculate nodegroup reputation based on node average
let total_node_rep: i32 = expanded_rep.nodes.iter().map(|n| n.reputation).sum();
expanded_rep.reputation = total_node_rep / expanded_rep.nodes.len() as i32;
let (_, expanded_rep) = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.set(&expanded_rep)
.expect("can update reputation");
println!("Added new high-performing node to top provider:");
print_nodegroup_reputation_details(&expanded_rep);
// Show all reputation records and calculate analytics
let all_reps = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.get_all()
.expect("can load all reputations");
println!("\n--- Reputation Analytics ---");
println!("Total NodeGroup Reputations: {}", all_reps.len());
// Calculate overall statistics
let avg_nodegroup_reputation: f64 = all_reps.iter()
.map(|r| r.reputation as f64)
.sum::<f64>() / all_reps.len() as f64;
let avg_nodegroup_uptime: f64 = all_reps.iter()
.filter(|r| r.uptime > 0) // exclude new providers with 0 uptime
.map(|r| r.uptime as f64)
.sum::<f64>() / all_reps.iter().filter(|r| r.uptime > 0).count() as f64;
println!("Overall Statistics:");
println!(" Average NodeGroup Reputation: {:.1}/100", avg_nodegroup_reputation);
println!(" Average NodeGroup Uptime: {:.1}%", avg_nodegroup_uptime);
// Count reputation tiers
let excellent_reps = all_reps.iter().filter(|r| r.reputation >= 80).count();
let good_reps = all_reps.iter().filter(|r| r.reputation >= 60 && r.reputation < 80).count();
let average_reps = all_reps.iter().filter(|r| r.reputation >= 40 && r.reputation < 60).count();
let poor_reps = all_reps.iter().filter(|r| r.reputation < 40).count();
println!("\nReputation Distribution:");
println!(" Excellent (80-100): {}", excellent_reps);
println!(" Good (60-79): {}", good_reps);
println!(" Average (40-59): {}", average_reps);
println!(" Poor (0-39): {}", poor_reps);
// Calculate total nodes and their statistics
let total_nodes: usize = all_reps.iter().map(|r| r.nodes.len()).sum();
let all_node_reps: Vec<i32> = all_reps.iter()
.flat_map(|r| &r.nodes)
.map(|n| n.reputation)
.collect();
let all_node_uptimes: Vec<i32> = all_reps.iter()
.flat_map(|r| &r.nodes)
.filter(|n| n.uptime > 0)
.map(|n| n.uptime)
.collect();
let avg_node_reputation: f64 = all_node_reps.iter().sum::<i32>() as f64 / all_node_reps.len() as f64;
let avg_node_uptime: f64 = all_node_uptimes.iter().sum::<i32>() as f64 / all_node_uptimes.len() as f64;
println!("\nNode-Level Statistics:");
println!(" Total Nodes: {}", total_nodes);
println!(" Average Node Reputation: {:.1}/100", avg_node_reputation);
println!(" Average Node Uptime: {:.1}%", avg_node_uptime);
// Find best and worst performing nodegroups
let best_nodegroup = all_reps.iter().max_by_key(|r| r.reputation).unwrap();
let worst_nodegroup = all_reps.iter().min_by_key(|r| r.reputation).unwrap();
println!("\nPerformance Leaders:");
println!(" Best NodeGroup: {} (Reputation: {}, Uptime: {}%)",
best_nodegroup.nodegroup_id, best_nodegroup.reputation, best_nodegroup.uptime);
println!(" Worst NodeGroup: {} (Reputation: {}, Uptime: {}%)",
worst_nodegroup.nodegroup_id, worst_nodegroup.reputation, worst_nodegroup.uptime);
// Rank nodegroups by reputation
let mut ranked_nodegroups: Vec<_> = all_reps.iter().collect();
ranked_nodegroups.sort_by(|a, b| b.reputation.cmp(&a.reputation));
println!("\nNodeGroup Rankings (by Reputation):");
for (i, rep) in ranked_nodegroups.iter().enumerate() {
let status = match rep.reputation {
80..=100 => "Excellent",
60..=79 => "Good",
40..=59 => "Average",
_ => "Poor",
};
println!(" {}. NodeGroup {}: {} ({}/100, {}% uptime)",
i + 1, rep.nodegroup_id, status, rep.reputation, rep.uptime);
}
println!("\n--- Model Information ---");
println!("NodeGroupReputation DB Prefix: {}", NodeGroupReputation::db_prefix());
}

View File

@@ -0,0 +1,53 @@
// heroledger.rhai - Demonstration of HeroLedger models in Rhai
print("=== HeroLedger Models Demo ===");
// Create a new user
print("\n--- Creating User ---");
let new_user = new_user()
.name("Alice Johnson")
.email("alice@herocode.com")
.pubkey("0x1234567890abcdef")
.status("Active")
.save_user();
print("Created user: " + new_user.get_name());
print("User ID: " + new_user.get_id());
print("User email: " + new_user.get_email());
print("User pubkey: " + new_user.get_pubkey());
// Create a new group
print("\n--- Creating Group ---");
let new_group = new_group()
.name("HeroCode Developers")
.description("A group for HeroCode development team members")
.visibility("Public")
.save_group();
print("Created group: " + new_group.get_name());
print("Group ID: " + new_group.get_id());
print("Group description: " + new_group.get_description());
// Create a new account
print("\n--- Creating Account ---");
let new_account = new_account()
.name("Alice's Main Account")
.description("Primary account for Alice Johnson")
.currency("USD")
.save_account();
print("Created account: " + new_account.get_name());
print("Account ID: " + new_account.get_id());
print("Account currency: " + new_account.get_currency());
// Create a new DNS zone
print("\n--- Creating DNS Zone ---");
let new_dns_zone = new_dns_zone()
.name("herocode.com")
.description("Main domain for HeroCode")
.save_dns_zone();
print("Created DNS zone: " + new_dns_zone.get_name());
print("DNS zone ID: " + new_dns_zone.get_id());
print("\n=== Demo Complete ===");

View File

@@ -0,0 +1,50 @@
use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules;
use heromodels_core::db::hero::OurDB;
use rhai::{Dynamic, Engine};
use std::sync::Arc;
use std::{fs, path::Path};
const CALLER_ID: &str = "example_caller";
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize Rhai engine
let mut engine = Engine::new();
// Initialize database with OurDB
let db_path = "temp_heroledger_db";
// Clean up previous database file if it exists
if Path::new(db_path).exists() {
fs::remove_dir_all(db_path)?;
}
let _db = Arc::new(OurDB::new(db_path, true).expect("Failed to create database"));
// Register the heroledger modules with Rhai
register_heroledger_rhai_modules(&mut engine);
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.into());
db_config.insert("CALLER_ID".into(), CALLER_ID.into());
db_config.insert("CONTEXT_ID".into(), CALLER_ID.into());
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
// Load and evaluate the Rhai script
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let script_path = Path::new(manifest_dir)
.join("examples")
.join("heroledger")
.join("heroledger.rhai");
println!("Script path: {}", script_path.display());
let script = fs::read_to_string(&script_path)?;
println!("--- Running HeroLedger Rhai Script ---");
match engine.eval::<()>(&script) {
Ok(_) => println!("\n--- Script executed successfully! ---"),
Err(e) => eprintln!("\n--- Script execution failed: {} ---", e),
}
// Clean up the database file
fs::remove_dir_all(db_path)?;
println!("--- Cleaned up temporary database. ---");
Ok(())
}

View File

@@ -0,0 +1,15 @@
# Heroledger Postgres Example
This example demonstrates how to use the Heroledger `User` model against Postgres using the `heromodels::db::postgres` backend.
- Connects to Postgres with user `postgres` and password `test123` on `localhost:5432`.
- Creates the table and indexes automatically on first use.
- Shows basic CRUD and an index lookup on `username`.
Run it:
```bash
cargo run -p heromodels --example heroledger_example
```
Make sure Postgres is running locally and accessible with the credentials above.

View File

@@ -0,0 +1,54 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::heroledger::user::user_index::username;
use heromodels::models::heroledger::user::{SecretBox, User};
fn main() {
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("Can connect to Postgres");
println!("Heroledger User - Postgres Example");
println!("==================================");
let users = db.collection::<User>().expect("open user collection");
// Clean
if let Ok(existing) = users.get_all() {
for u in existing {
let _ = users.delete_by_id(u.get_id());
}
}
let sb = SecretBox::new().data(vec![1, 2, 3]).nonce(vec![9, 9, 9]).build();
let u = User::new(0)
.username("alice")
.pubkey("PUBKEY_A")
.add_email("alice@example.com")
.add_userprofile(sb)
.build();
let (id, stored) = users.set(&u).expect("store user");
println!("Stored user id={id} username={} pubkey={}", stored.username, stored.pubkey);
let by_idx = users.get::<username, _>("alice").expect("by username");
println!("Found {} user(s) with username=alice", by_idx.len());
let fetched = users.get_by_id(id).expect("get by id").expect("exists");
println!("Fetched by id={} username={} emails={:?}", id, fetched.username, fetched.email);
// Update
let updated = fetched.clone().add_email("work@alice.example");
let (_, back) = users.set(&updated).expect("update user");
println!("Updated emails = {:?}", back.email);
// Delete
users.delete_by_id(id).expect("delete user");
println!("Deleted user id={id}");
}

View File

@@ -73,7 +73,7 @@ fn main() {
// The `#[model]` derive handles `created_at` and `updated_at` in `base_data`.
// `base_data.touch()` might be called internally by setters or needs explicit call if fields are set directly.
// For builder pattern, the final state of `base_data.updated_at` reflects the time of the last builder call if `touch()` is implicit.
// For builder pattern, the final state of `base_data.modified_at` reflects the time of the last builder call if `touch()` is implicit.
// If not, one might call `contract.base_data.touch()` after building.
println!("\n--- Initial Contract Details ---");

View File

@@ -1,8 +1,11 @@
use heromodels::db::postgres::Config;
use heromodels::db::{Collection, Db};
use heromodels::models::userexample::user::user_index::{is_active, username};
use heromodels::models::userexample::user::user_index::{email, username};
use heromodels::models::{Comment, User};
use heromodels_core::Model;
// For demonstrating embedded/nested indexes
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels::models::grid4::node::node_index::{country as node_country, pubkey as node_pubkey};
// Helper function to print user details
fn print_user_details(user: &User) {
@@ -37,14 +40,31 @@ fn main() {
)
.expect("Can connect to postgress");
// Unique suffix to avoid collisions with legacy rows from prior runs
use std::time::{SystemTime, UNIX_EPOCH};
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let user1_name = format!("johndoe_{}", ts);
let user2_name = format!("janesmith_{}", ts);
let user3_name = format!("willism_{}", ts);
let user4_name = format!("carrols_{}", ts);
let user1_email = format!("john.doe+{}@example.com", ts);
let user2_email = format!("jane.smith+{}@example.com", ts);
let user3_email = format!("willis.masters+{}@example.com", ts);
let user4_email = format!("carrol.smith+{}@example.com", ts);
println!("Hero Models - Basic Usage Example");
println!("================================");
// Clean up any existing data to ensure consistent results
println!("Cleaning up existing data...");
let user_collection = db.collection::<User>().expect("can open user collection");
let comment_collection = db.collection::<Comment>().expect("can open comment collection");
let comment_collection = db
.collection::<Comment>()
.expect("can open comment collection");
// Clear all existing users and comments
if let Ok(existing_users) = user_collection.get_all() {
for user in existing_users {
@@ -62,32 +82,32 @@ fn main() {
// User 1
let user1 = User::new()
.username("johndoe")
.email("john.doe@example.com")
.username(&user1_name)
.email(&user1_email)
.full_name("John Doe")
.is_active(false)
.build();
// User 2
let user2 = User::new()
.username("janesmith")
.email("jane.smith@example.com")
.username(&user2_name)
.email(&user2_email)
.full_name("Jane Smith")
.is_active(true)
.build();
// User 3
let user3 = User::new()
.username("willism")
.email("willis.masters@example.com")
.username(&user3_name)
.email(&user3_email)
.full_name("Willis Masters")
.is_active(true)
.build();
// User 4
let user4 = User::new()
.username("carrols")
.email("carrol.smith@example.com")
.username(&user4_name)
.email(&user4_email)
.full_name("Carrol Smith")
.is_active(false)
.build();
@@ -143,66 +163,95 @@ fn main() {
let stored_users = db
.collection::<User>()
.expect("can open user collection")
.get::<username, _>("johndoe")
.get::<username, _>(&user1_name)
.expect("can load stored user");
assert_eq!(stored_users.len(), 1);
print_user_details(&stored_users[0]);
// 2. Retrieve by active status
println!("\n2. By Active Status (Active = true):");
let active_users = db
// 2. Retrieve by email index
println!("\n2. By Email Index:");
let by_email = db
.collection::<User>()
.expect("can open user collection")
.get::<is_active, _>(&true)
.expect("can load stored users");
assert_eq!(active_users.len(), 2);
for active_user in active_users.iter() {
print_user_details(active_user);
}
.get::<email, _>(&user2_email)
.expect("can load stored user by email");
assert_eq!(by_email.len(), 1);
print_user_details(&by_email[0]);
// 3. Delete a user and show the updated results
println!("\n3. After Deleting a User:");
let user_to_delete_id = active_users[0].get_id();
let user_to_delete_id = stored_users[0].get_id();
println!("Deleting user with ID: {user_to_delete_id}");
db.collection::<User>()
.expect("can open user collection")
.delete_by_id(user_to_delete_id)
.expect("can delete existing user");
// Show remaining active users
let active_users = db
// Verify deletion by querying the same username again
let should_be_empty = db
.collection::<User>()
.expect("can open user collection")
.get::<is_active, _>(&true)
.expect("can load stored users");
println!(" a. Remaining Active Users:");
assert_eq!(active_users.len(), 1);
for active_user in active_users.iter() {
print_user_details(active_user);
}
// Show inactive users
let inactive_users = db
.collection::<User>()
.expect("can open user collection")
.get::<is_active, _>(&false)
.expect("can load stored users");
println!(" b. Inactive Users:");
assert_eq!(inactive_users.len(), 2);
for inactive_user in inactive_users.iter() {
print_user_details(inactive_user);
}
.get::<username, _>(&user1_name)
.expect("can query by username after delete");
println!(" a. Query by username '{}' after delete -> {} results", user1_name, should_be_empty.len());
assert_eq!(should_be_empty.len(), 0);
// Delete a user based on an index for good measure
db.collection::<User>()
.expect("can open user collection")
.delete::<username, _>("janesmith")
.delete::<username, _>(&user4_name)
.expect("can delete existing user");
// Demonstrate embedded/nested indexes with Grid4 Node
println!("\n--- Demonstrating Embedded/Nested Indexes (Grid4::Node) ---");
println!("Node indexed fields: {:?}", Node::indexed_fields());
// Build a minimal node with nested data and persist it
let cs = ComputeSlice::new()
.nodeid(42)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(6000)
.vcores(16)
.gpus(1)
.price_cc(0.33);
let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() };
let node = Node::new()
.nodegroupid(101)
.uptime(99)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("EX_NODE_PK_1")
.build();
let (node_id, _stored_node) = db
.collection::<Node>()
.expect("can open node collection")
.set(&node)
.expect("can set node");
println!("Stored node id: {}", node_id);
// Query by top-level indexes
let be_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<node_country, _>("BE")
.expect("can query nodes by country");
println!("Nodes in BE (count may include legacy rows): {}", be_nodes.len());
let by_pk = db
.collection::<Node>()
.expect("can open node collection")
.get::<node_pubkey, _>("EX_NODE_PK_1")
.expect("can query node by pubkey");
assert!(by_pk.iter().any(|n| n.get_id() == node_id));
// Note: Nested path indexes (e.g., devices.vendor, computeslices.passmark) are created and used
// for DB-side indexing, but are not yet exposed as typed Index keys in the API. They appear in
// Node::indexed_fields() and contribute to Model::db_keys(), enabling performant JSONB GIN indexes.
println!("\n--- User Model Information ---");
println!("User DB Prefix: {}", User::db_prefix());
@@ -212,7 +261,7 @@ fn main() {
// 1. Create and save a comment
println!("\n1. Creating a Comment:");
let comment = Comment::new()
.user_id(db_user1.get_id()) // commenter's user ID
.user_id(db_user2.get_id()) // commenter's user ID (use an existing user)
.content("This is a comment on the user")
.build();
@@ -230,7 +279,7 @@ fn main() {
// 3. Associate the comment with a user
println!("\n2. Associating Comment with User:");
let mut updated_user = db_user1.clone();
let mut updated_user = db_user2.clone();
updated_user.base_data.add_comment(db_comment.get_id());
// Save the updated user and get the new version

View File

@@ -8,8 +8,8 @@ use std::{
collections::HashSet,
path::PathBuf,
sync::{
atomic::{AtomicU32, Ordering},
Arc, Mutex,
atomic::{AtomicU32, Ordering},
},
};

View File

@@ -0,0 +1,148 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use heromodels::models::access::Access;
type RhaiAccess = Access;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
#[export_module]
mod rhai_access_module {
// --- Access Functions ---
#[rhai_fn(name = "new_access", return_raw)]
pub fn new_access() -> Result<RhaiAccess, Box<EvalAltResult>> {
let access = Access::new();
Ok(access)
}
/// Sets the access object_id
#[rhai_fn(name = "object_id", return_raw)]
pub fn set_object_id(
access: &mut RhaiAccess,
object_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(object_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.object_id(id);
Ok(access.clone())
}
/// Sets the circle public key
#[rhai_fn(name = "circle_public_key", return_raw)]
pub fn set_circle_pk(
access: &mut RhaiAccess,
circle_pk: String,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let owned_access = std::mem::take(access);
*access = owned_access.circle_pk(circle_pk);
Ok(access.clone())
}
/// Sets the group id
#[rhai_fn(name = "group_id", return_raw)]
pub fn set_group_id(
access: &mut RhaiAccess,
group_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(group_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.group_id(id);
Ok(access.clone())
}
/// Sets the contact id
#[rhai_fn(name = "contact_id", return_raw)]
pub fn set_contact_id(
access: &mut RhaiAccess,
contact_id: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let id = macros::id_from_i64_to_u32(contact_id)?;
let owned_access = std::mem::take(access);
*access = owned_access.contact_id(id);
Ok(access.clone())
}
/// Sets the expiration time
#[rhai_fn(name = "expires_at", return_raw)]
pub fn set_expires_at(
access: &mut RhaiAccess,
expires_at: i64,
) -> Result<RhaiAccess, Box<EvalAltResult>> {
let owned_access = std::mem::take(access);
*access = owned_access.expires_at(expires_at);
Ok(access.clone())
}
// Access Getters
#[rhai_fn(name = "get_access_id")]
pub fn get_access_id(access: &mut RhaiAccess) -> i64 {
access.base.id as i64
}
#[rhai_fn(name = "get_access_object_id")]
pub fn get_access_object_id(access: &mut RhaiAccess) -> i64 {
access.object_id as i64
}
#[rhai_fn(name = "get_access_circle_pk")]
pub fn get_access_circle_pk(access: &mut RhaiAccess) -> String {
access.circle_pk.clone()
}
#[rhai_fn(name = "get_access_group_id")]
pub fn get_access_group_id(access: &mut RhaiAccess) -> i64 {
access.group_id as i64
}
#[rhai_fn(name = "get_access_contact_id")]
pub fn get_access_contact_id(access: &mut RhaiAccess) -> i64 {
access.contact_id as i64
}
#[rhai_fn(name = "get_access_expires_at")]
pub fn get_access_expires_at(access: &mut RhaiAccess) -> i64 {
access.expires_at
}
#[rhai_fn(name = "get_access_created_at")]
pub fn get_access_created_at(access: &mut RhaiAccess) -> i64 {
access.base.created_at
}
#[rhai_fn(name = "get_access_modified_at")]
pub fn get_access_modified_at(access: &mut RhaiAccess) -> i64 {
access.base.modified_at
}
}
pub fn register_access_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_access_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_access",
resource_type_str: "Access",
rhai_return_rust_type: heromodels::models::access::Access
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,422 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Engine, EvalAltResult, Module, Position, FLOAT, INT};
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
use heromodels::models::biz::product::{Product, ProductComponent, ProductStatus, ProductType};
use heromodels::models::biz::company::{BusinessType, Company, CompanyStatus};
use heromodels::models::biz::sale::{Sale, SaleItem, SaleStatus};
use heromodels::models::biz::shareholder::{Shareholder, ShareholderType};
type RhaiProduct = Product;
type RhaiProductComponent = ProductComponent;
type RhaiCompany = Company;
type RhaiSale = Sale;
type RhaiSaleItem = SaleItem;
type RhaiShareholder = Shareholder;
#[export_module]
mod rhai_product_component_module {
use super::{RhaiProductComponent, INT};
#[rhai_fn(name = "new_product_component", return_raw)]
pub fn new_product_component() -> Result<RhaiProductComponent, Box<EvalAltResult>> {
Ok(ProductComponent::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
component: &mut RhaiProductComponent,
name: String,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.name(name);
Ok(component.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
component: &mut RhaiProductComponent,
description: String,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.description(description);
Ok(component.clone())
}
#[rhai_fn(name = "quantity", return_raw)]
pub fn set_quantity(
component: &mut RhaiProductComponent,
quantity: INT,
) -> Result<RhaiProductComponent, Box<EvalAltResult>> {
let owned = std::mem::take(component);
*component = owned.quantity(quantity as u32);
Ok(component.clone())
}
// --- Getters ---
#[rhai_fn(name = "get_name")]
pub fn get_name(c: &mut RhaiProductComponent) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(c: &mut RhaiProductComponent) -> String {
c.description.clone()
}
#[rhai_fn(name = "get_quantity")]
pub fn get_quantity(c: &mut RhaiProductComponent) -> INT {
c.quantity as INT
}
}
#[export_module]
mod rhai_product_module {
use super::{Array, ProductStatus, ProductType, RhaiProduct, RhaiProductComponent, FLOAT, INT};
#[rhai_fn(name = "new_product", return_raw)]
pub fn new_product() -> Result<RhaiProduct, Box<EvalAltResult>> {
Ok(Product::new())
}
// --- Setters ---
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
product: &mut RhaiProduct,
name: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.name(name);
Ok(product.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
product: &mut RhaiProduct,
description: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.description(description);
Ok(product.clone())
}
#[rhai_fn(name = "price", return_raw)]
pub fn set_price(
product: &mut RhaiProduct,
price: FLOAT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.price(price);
Ok(product.clone())
}
#[rhai_fn(name = "category", return_raw)]
pub fn set_category(
product: &mut RhaiProduct,
category: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.category(category);
Ok(product.clone())
}
#[rhai_fn(name = "max_amount", return_raw)]
pub fn set_max_amount(
product: &mut RhaiProduct,
max_amount: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.max_amount(max_amount as u32);
Ok(product.clone())
}
#[rhai_fn(name = "purchase_till", return_raw)]
pub fn set_purchase_till(
product: &mut RhaiProduct,
purchase_till: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.purchase_till(purchase_till);
Ok(product.clone())
}
#[rhai_fn(name = "active_till", return_raw)]
pub fn set_active_till(
product: &mut RhaiProduct,
active_till: INT,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.active_till(active_till);
Ok(product.clone())
}
#[rhai_fn(name = "type", return_raw)]
pub fn set_type(
product: &mut RhaiProduct,
type_str: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let product_type = match type_str.to_lowercase().as_str() {
"physical" => ProductType::Physical,
"digital" => ProductType::Digital,
"service" => ProductType::Service,
"subscription" => ProductType::Subscription,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid ProductType".to_string(),
"Must be one of: Physical, Digital, Service, Subscription".into(),
)
.into())
}
};
let owned = std::mem::take(product);
*product = owned.product_type(product_type);
Ok(product.clone())
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_status(
product: &mut RhaiProduct,
status_str: String,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let status = match status_str.to_lowercase().as_str() {
"active" => ProductStatus::Active,
"inactive" => ProductStatus::Inactive,
"discontinued" => ProductStatus::Discontinued,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid ProductStatus".to_string(),
"Must be one of: Active, Inactive, Discontinued".into(),
)
.into())
}
};
let owned = std::mem::take(product);
*product = owned.status(status);
Ok(product.clone())
}
#[rhai_fn(name = "add_component", return_raw)]
pub fn add_component(
product: &mut RhaiProduct,
component: RhaiProductComponent,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let owned = std::mem::take(product);
*product = owned.add_component(component);
Ok(product.clone())
}
#[rhai_fn(name = "set_components", return_raw)]
pub fn set_components(
product: &mut RhaiProduct,
components: Array,
) -> Result<RhaiProduct, Box<EvalAltResult>> {
let mut product_components = Vec::new();
for component_dynamic in components {
if let Ok(component) = component_dynamic.try_cast::<RhaiProductComponent>() {
product_components.push(component);
} else {
return Err(EvalAltResult::ErrorSystem(
"Invalid component type".to_string(),
"All components must be ProductComponent objects".into(),
)
.into());
}
}
let owned = std::mem::take(product);
*product = owned.components(product_components);
Ok(product.clone())
}
// --- Getters ---
#[rhai_fn(name = "get_id")]
pub fn get_id(p: &mut RhaiProduct) -> i64 {
p.base.id as i64
}
#[rhai_fn(name = "get_name")]
pub fn get_name(p: &mut RhaiProduct) -> String {
p.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(p: &mut RhaiProduct) -> String {
p.description.clone()
}
#[rhai_fn(name = "get_price")]
pub fn get_price(p: &mut RhaiProduct) -> FLOAT {
p.price
}
#[rhai_fn(name = "get_category")]
pub fn get_category(p: &mut RhaiProduct) -> String {
p.category.clone()
}
#[rhai_fn(name = "get_max_amount")]
pub fn get_max_amount(p: &mut RhaiProduct) -> INT {
p.max_amount as INT
}
#[rhai_fn(name = "get_purchase_till")]
pub fn get_purchase_till(p: &mut RhaiProduct) -> INT {
p.purchase_till
}
#[rhai_fn(name = "get_active_till")]
pub fn get_active_till(p: &mut RhaiProduct) -> INT {
p.active_till
}
#[rhai_fn(name = "get_type")]
pub fn get_type(p: &mut RhaiProduct) -> String {
format!("{:?}", p.product_type)
}
#[rhai_fn(name = "get_status")]
pub fn get_status(p: &mut RhaiProduct) -> String {
format!("{:?}", p.status)
}
#[rhai_fn(name = "get_components")]
pub fn get_components(p: &mut RhaiProduct) -> Array {
p.components
.iter()
.map(|c| rhai::Dynamic::from(c.clone()))
.collect()
}
}
pub fn register_product_rhai_module(engine: &mut Engine) {
let mut product_module = exported_module!(rhai_product_module);
let mut component_module = exported_module!(rhai_product_component_module);
register_authorized_create_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "save_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
register_authorized_get_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "get_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
register_authorized_delete_by_id_fn!(
product_module: &mut product_module,
rhai_fn_name: "delete_product",
resource_type_str: "Product",
rhai_return_rust_type: heromodels::models::biz::product::Product
);
engine.register_global_module(product_module.into());
engine.register_global_module(component_module.into());
}
// Company Rhai wrapper functions
#[export_module]
mod rhai_company_module {
use super::{BusinessType, CompanyStatus, RhaiCompany};
#[rhai_fn(name = "new_company", return_raw)]
pub fn new_company() -> Result<RhaiCompany, Box<EvalAltResult>> {
Ok(Company::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
company: &mut RhaiCompany,
name: String,
) -> Result<RhaiCompany, Box<EvalAltResult>> {
let owned = std::mem::take(company);
*company = owned.name(name);
Ok(company.clone())
}
#[rhai_fn(name = "get_company_id")]
pub fn get_company_id(company: &mut RhaiCompany) -> i64 {
company.id() as i64
}
#[rhai_fn(name = "get_company_name")]
pub fn get_company_name(company: &mut RhaiCompany) -> String {
company.name().clone()
}
}
pub fn register_company_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_company_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_company",
resource_type_str: "Company",
rhai_return_rust_type: heromodels::models::biz::company::Company
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_company",
resource_type_str: "Company",
rhai_return_rust_type: heromodels::models::biz::company::Company
);
engine.register_global_module(module.into());
}
// Sale Rhai wrapper functions
#[export_module]
mod rhai_sale_module {
use super::{RhaiSale, RhaiSaleItem, SaleStatus};
#[rhai_fn(name = "new_sale", return_raw)]
pub fn new_sale() -> Result<RhaiSale, Box<EvalAltResult>> {
Ok(Sale::new())
}
#[rhai_fn(name = "new_sale_item", return_raw)]
pub fn new_sale_item() -> Result<RhaiSaleItem, Box<EvalAltResult>> {
Ok(SaleItem::new())
}
#[rhai_fn(name = "company_id", return_raw)]
pub fn set_sale_company_id(sale: &mut RhaiSale, company_id: i64) -> Result<RhaiSale, Box<EvalAltResult>> {
let owned = std::mem::take(sale);
*sale = owned.company_id(company_id as u32);
Ok(sale.clone())
}
#[rhai_fn(name = "total_amount", return_raw)]
pub fn set_sale_total_amount(sale: &mut RhaiSale, total_amount: f64) -> Result<RhaiSale, Box<EvalAltResult>> {
let owned = std::mem::take(sale);
*sale = owned.total_amount(total_amount);
Ok(sale.clone())
}
#[rhai_fn(name = "get_sale_id")]
pub fn get_sale_id(sale: &mut RhaiSale) -> i64 {
sale.id() as i64
}
#[rhai_fn(name = "get_sale_total_amount")]
pub fn get_sale_total_amount(sale: &mut RhaiSale) -> f64 {
sale.total_amount()
}
}
pub fn register_sale_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_sale_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_sale",
resource_type_str: "Sale",
rhai_return_rust_type: heromodels::models::biz::sale::Sale
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_sale",
resource_type_str: "Sale",
rhai_return_rust_type: heromodels::models::biz::sale::Sale
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,246 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use crate::models::calendar::{AttendanceStatus, Attendee, Calendar, Event};
type RhaiCalendar = Calendar;
type RhaiEvent = Event;
type RhaiAttendee = Attendee;
use crate::db::hero::OurDB;
use crate::db::Collection;
#[export_module]
mod rhai_calendar_module {
use super::{AttendanceStatus, RhaiAttendee, RhaiCalendar, RhaiEvent};
// --- Attendee Builder ---
#[rhai_fn(name = "new_attendee", return_raw)]
pub fn new_attendee(contact_id: i64) -> Result<RhaiAttendee, Box<EvalAltResult>> {
Ok(Attendee::new(contact_id as u32))
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_attendee_status(
attendee: &mut RhaiAttendee,
status_str: String,
) -> Result<RhaiAttendee, Box<EvalAltResult>> {
let status = match status_str.to_lowercase().as_str() {
"accepted" => AttendanceStatus::Accepted,
"declined" => AttendanceStatus::Declined,
"tentative" => AttendanceStatus::Tentative,
"noresponse" => AttendanceStatus::NoResponse,
_ => {
return Err(EvalAltResult::ErrorSystem(
"Invalid Status".to_string(),
"Must be one of: Accepted, Declined, Tentative, NoResponse".into(),
)
.into())
}
};
let owned = std::mem::take(attendee);
*attendee = owned.status(status);
Ok(attendee.clone())
}
// --- Event Builder ---
#[rhai_fn(name = "new_event", return_raw)]
pub fn new_event() -> Result<RhaiEvent, Box<EvalAltResult>> {
Ok(Event::new())
}
#[rhai_fn(name = "title", return_raw)]
pub fn set_event_title(
event: &mut RhaiEvent,
title: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.title(title);
Ok(event.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_event_description(
event: &mut RhaiEvent,
description: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.description(description);
Ok(event.clone())
}
#[rhai_fn(name = "location", return_raw)]
pub fn set_event_location(
event: &mut RhaiEvent,
location: String,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.location(location);
Ok(event.clone())
}
#[rhai_fn(name = "add_attendee", return_raw)]
pub fn add_event_attendee(
event: &mut RhaiEvent,
attendee: RhaiAttendee,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.add_attendee(attendee);
Ok(event.clone())
}
#[rhai_fn(name = "reschedule", return_raw)]
pub fn reschedule_event(
event: &mut RhaiEvent,
start_time: i64,
end_time: i64,
) -> Result<RhaiEvent, Box<EvalAltResult>> {
let owned = std::mem::take(event);
*event = owned.reschedule(start_time, end_time);
Ok(event.clone())
}
// --- Calendar Builder ---
#[rhai_fn(name = "new_calendar", return_raw)]
pub fn new_calendar(name: String) -> Result<RhaiCalendar, Box<EvalAltResult>> {
Ok(Calendar::new().name(name))
}
#[rhai_fn(name = "calendar_name", return_raw)]
pub fn set_calendar_name(
calendar: &mut RhaiCalendar,
name: String,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.name(name);
Ok(calendar.clone())
}
#[rhai_fn(name = "calendar_description", return_raw)]
pub fn set_calendar_description(
calendar: &mut RhaiCalendar,
description: String,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.description(description);
Ok(calendar.clone())
}
#[rhai_fn(name = "add_event", return_raw)]
pub fn add_calendar_event(
calendar: &mut RhaiCalendar,
event_id: i64,
) -> Result<RhaiCalendar, Box<EvalAltResult>> {
let owned = std::mem::take(calendar);
*calendar = owned.add_event(event_id as u32);
Ok(calendar.clone())
}
// --- Getters ---
// Calendar
#[rhai_fn(name = "get_calendar_id")]
pub fn get_calendar_id(c: &mut RhaiCalendar) -> i64 {
c.base.id as i64
}
#[rhai_fn(name = "get_calendar_name")]
pub fn get_calendar_name(c: &mut RhaiCalendar) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_calendar_description")]
pub fn get_calendar_description(c: &mut RhaiCalendar) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_calendar_events")]
pub fn get_calendar_events(c: &mut RhaiCalendar) -> Array {
c.events.iter().map(|id| Dynamic::from(*id as i64)).collect()
}
// Event
#[rhai_fn(name = "get_event_id")]
pub fn get_event_id(e: &mut RhaiEvent) -> i64 {
e.base.id as i64
}
#[rhai_fn(name = "get_event_title")]
pub fn get_event_title(e: &mut RhaiEvent) -> String {
e.title.clone()
}
#[rhai_fn(name = "get_event_description")]
pub fn get_event_description(e: &mut RhaiEvent) -> Option<String> {
e.description.clone()
}
#[rhai_fn(name = "get_event_start_time")]
pub fn get_event_start_time(e: &mut RhaiEvent) -> i64 {
e.start_time
}
#[rhai_fn(name = "get_event_end_time")]
pub fn get_event_end_time(e: &mut RhaiEvent) -> i64 {
e.end_time
}
#[rhai_fn(name = "get_event_attendees")]
pub fn get_event_attendees(e: &mut RhaiEvent) -> Array {
e.attendees.iter().map(|a| Dynamic::from(a.clone())).collect()
}
#[rhai_fn(name = "get_event_location")]
pub fn get_event_location(e: &mut RhaiEvent) -> Option<String> {
e.location.clone()
}
// Attendee
#[rhai_fn(name = "get_attendee_contact_id")]
pub fn get_attendee_contact_id(a: &mut RhaiAttendee) -> i64 {
a.contact_id as i64
}
#[rhai_fn(name = "get_attendee_status")]
pub fn get_attendee_status(a: &mut RhaiAttendee) -> String {
format!("{:?}", a.status)
}
}
pub fn register_calendar_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_calendar_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_calendar",
resource_type_str: "Calendar",
rhai_return_rust_type: heromodels::models::calendar::Calendar
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_event",
resource_type_str: "Event",
rhai_return_rust_type: heromodels::models::calendar::Event
);
engine.register_global_module(module.into());
}

View File

@@ -119,4 +119,4 @@ impl Circle {
/// Creates a new circle builder
pub fn new_circle() -> Circle {
Circle::new()
}
}

View File

@@ -1,412 +1,156 @@
use crate::db::Db;
use rhai::plugin::*;
use rhai::{Array, CustomType, Dynamic, Engine, EvalAltResult, INT, Module, Position};
use std::mem;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use std::collections::HashMap;
use std::sync::Arc;
use super::circle::{Circle, ThemeData};
use crate::models::circle::Circle;
type RhaiCircle = Circle;
type RhaiThemeData = ThemeData;
use crate::db::Collection;
use crate::db::hero::OurDB;
use serde::Serialize;
use serde_json;
/// Registers a `.json()` method for any type `T` that implements the required traits.
fn register_json_method<T>(engine: &mut Engine)
where
T: CustomType + Clone + Serialize,
{
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
serde_json::to_string(obj).map_err(|e| e.to_string().into())
};
engine.build_type::<T>().register_fn("json", to_json_fn);
}
// Helper to convert i64 from Rhai to u32 for IDs
fn id_from_i64_to_u32(id_i64: i64) -> Result<u32, Box<EvalAltResult>> {
u32::try_from(id_i64).map_err(|_| {
Box::new(EvalAltResult::ErrorArithmetic(
format!("Failed to convert ID '{}' to u32", id_i64).into(),
Position::NONE,
))
})
}
#[export_module]
mod rhai_theme_data_module {
#[rhai_fn(name = "new_theme_data")]
pub fn new_theme_data() -> RhaiThemeData {
ThemeData::default()
}
// --- Setters for ThemeData ---
#[rhai_fn(name = "primary_color", return_raw, global, pure)]
pub fn set_primary_color(
theme: &mut RhaiThemeData,
color: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.primary_color = color;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "background_color", return_raw, global, pure)]
pub fn set_background_color(
theme: &mut RhaiThemeData,
color: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.background_color = color;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "background_pattern", return_raw, global, pure)]
pub fn set_background_pattern(
theme: &mut RhaiThemeData,
pattern: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.background_pattern = pattern;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "logo_symbol", return_raw, global, pure)]
pub fn set_logo_symbol(
theme: &mut RhaiThemeData,
symbol: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.logo_symbol = symbol;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "logo_url", return_raw, global, pure)]
pub fn set_logo_url(
theme: &mut RhaiThemeData,
url: String,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.logo_url = url;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "nav_dashboard_visible", return_raw, global, pure)]
pub fn set_nav_dashboard_visible(
theme: &mut RhaiThemeData,
visible: bool,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.nav_dashboard_visible = visible;
*theme = owned_theme;
Ok(theme.clone())
}
#[rhai_fn(name = "nav_timeline_visible", return_raw, global, pure)]
pub fn set_nav_timeline_visible(
theme: &mut RhaiThemeData,
visible: bool,
) -> Result<RhaiThemeData, Box<EvalAltResult>> {
let mut owned_theme = mem::take(theme);
owned_theme.nav_timeline_visible = visible;
*theme = owned_theme;
Ok(theme.clone())
}
// --- Getters for ThemeData ---
#[rhai_fn(name = "get_primary_color", pure)]
pub fn get_primary_color(theme: &mut RhaiThemeData) -> String {
theme.primary_color.clone()
}
#[rhai_fn(name = "get_background_color", pure)]
pub fn get_background_color(theme: &mut RhaiThemeData) -> String {
theme.background_color.clone()
}
#[rhai_fn(name = "get_background_pattern", pure)]
pub fn get_background_pattern(theme: &mut RhaiThemeData) -> String {
theme.background_pattern.clone()
}
#[rhai_fn(name = "get_logo_symbol", pure)]
pub fn get_logo_symbol(theme: &mut RhaiThemeData) -> String {
theme.logo_symbol.clone()
}
#[rhai_fn(name = "get_logo_url", pure)]
pub fn get_logo_url(theme: &mut RhaiThemeData) -> String {
theme.logo_url.clone()
}
#[rhai_fn(name = "get_nav_dashboard_visible", pure)]
pub fn get_nav_dashboard_visible(theme: &mut RhaiThemeData) -> bool {
theme.nav_dashboard_visible
}
#[rhai_fn(name = "get_nav_timeline_visible", pure)]
pub fn get_nav_timeline_visible(theme: &mut RhaiThemeData) -> bool {
theme.nav_timeline_visible
}
}
use crate::models::circle::ThemeData;
#[export_module]
mod rhai_circle_module {
// --- Circle Functions ---
#[rhai_fn(name = "new_circle")]
pub fn new_circle() -> RhaiCircle {
Circle::new()
use super::RhaiCircle;
// this one configures the users own circle
#[rhai_fn(name = "configure", return_raw)]
pub fn configure() -> Result<RhaiCircle, Box<EvalAltResult>> {
Ok(Circle::new())
}
/// Sets the circle title
#[rhai_fn(name = "title", return_raw, global, pure)]
pub fn circle_title(
#[rhai_fn(name = "new_circle", return_raw)]
pub fn new_circle() -> Result<RhaiCircle, Box<EvalAltResult>> {
Ok(Circle::new())
}
#[rhai_fn(name = "set_title", return_raw)]
pub fn set_title(
circle: &mut RhaiCircle,
title: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.title(title);
let owned = std::mem::take(circle);
*circle = owned.title(title);
Ok(circle.clone())
}
/// Sets the circle ws_url
#[rhai_fn(name = "ws_url", return_raw, global, pure)]
pub fn circle_ws_url(
#[rhai_fn(name = "set_ws_url", return_raw)]
pub fn set_ws_url(
circle: &mut RhaiCircle,
ws_url: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.ws_url(ws_url);
let owned = std::mem::take(circle);
*circle = owned.ws_url(ws_url);
Ok(circle.clone())
}
/// Sets the circle description
#[rhai_fn(name = "description", return_raw, global, pure)]
pub fn circle_description(
#[rhai_fn(name = "set_description", return_raw)]
pub fn set_description(
circle: &mut RhaiCircle,
description: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.description(description);
let owned = std::mem::take(circle);
*circle = owned.description(description);
Ok(circle.clone())
}
/// Sets the circle logo
#[rhai_fn(name = "logo", return_raw, global, pure)]
pub fn circle_logo(
#[rhai_fn(name = "set_logo", return_raw)]
pub fn set_logo(
circle: &mut RhaiCircle,
logo: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.logo(logo);
let owned = std::mem::take(circle);
*circle = owned.logo(logo);
Ok(circle.clone())
}
/// Sets the circle theme
#[rhai_fn(name = "theme", return_raw, global, pure)]
pub fn circle_theme(
#[rhai_fn(name = "set_theme", return_raw)]
pub fn set_theme(
circle: &mut RhaiCircle,
theme: RhaiThemeData,
theme: ThemeData,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.theme(theme);
let owned = std::mem::take(circle);
*circle = owned.theme(theme);
Ok(circle.clone())
}
/// Adds an attendee to the circle
#[rhai_fn(name = "add_circle", return_raw, global, pure)]
pub fn circle_add_circle(
#[rhai_fn(name = "add_circle", return_raw)]
pub fn add_circle(
circle: &mut RhaiCircle,
added_circle: String,
new_circle: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.add_circle(added_circle);
let owned = std::mem::take(circle);
*circle = owned.add_circle(new_circle);
Ok(circle.clone())
}
/// Adds an attendee to the circle
#[rhai_fn(name = "add_member", return_raw, global, pure)]
pub fn circle_add_member(
#[rhai_fn(name = "add_member", return_raw)]
pub fn add_member(
circle: &mut RhaiCircle,
added_member: String,
member: String,
) -> Result<RhaiCircle, Box<EvalAltResult>> {
let owned_circle = mem::take(circle);
*circle = owned_circle.add_member(added_member);
let owned = std::mem::take(circle);
*circle = owned.add_member(member);
Ok(circle.clone())
}
// Circle Getters
#[rhai_fn(name = "get_id", pure)]
pub fn get_circle_id(circle: &mut RhaiCircle) -> i64 {
circle.base_data.id as i64
// --- Getters ---
#[rhai_fn(name = "get_id")]
pub fn get_id(c: &mut RhaiCircle) -> i64 {
c.base_data.id as i64
}
#[rhai_fn(name = "get_created_at", pure)]
pub fn get_circle_created_at(circle: &mut RhaiCircle) -> i64 {
circle.base_data.created_at
#[rhai_fn(name = "get_title")]
pub fn get_title(c: &mut RhaiCircle) -> String {
c.title.clone()
}
#[rhai_fn(name = "get_modified_at", pure)]
pub fn get_circle_modified_at(circle: &mut RhaiCircle) -> i64 {
circle.base_data.modified_at
#[rhai_fn(name = "get_ws_url")]
pub fn get_ws_url(c: &mut RhaiCircle) -> String {
c.ws_url.clone()
}
#[rhai_fn(name = "get_title", pure)]
pub fn get_circle_title(circle: &mut RhaiCircle) -> String {
circle.title.clone()
#[rhai_fn(name = "get_description")]
pub fn get_description(c: &mut RhaiCircle) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_description", pure)]
pub fn get_circle_description(circle: &mut RhaiCircle) -> Option<String> {
circle.description.clone()
#[rhai_fn(name = "get_logo")]
pub fn get_logo(c: &mut RhaiCircle) -> Option<String> {
c.logo.clone()
}
#[rhai_fn(name = "get_circles", pure)]
pub fn get_circle_circles(circle: &mut RhaiCircle) -> Vec<String> {
circle.circles.clone()
#[rhai_fn(name = "get_circles")]
pub fn get_circles(c: &mut RhaiCircle) -> Array {
c.circles.iter().map(|s| Dynamic::from(s.clone())).collect()
}
#[rhai_fn(name = "get_ws_url", pure)]
pub fn get_circle_ws_url(circle: &mut RhaiCircle) -> String {
circle.ws_url.clone()
}
#[rhai_fn(name = "get_logo", pure)]
pub fn get_circle_logo(circle: &mut RhaiCircle) -> Option<String> {
circle.logo.clone()
}
#[rhai_fn(name = "get_theme", pure)]
pub fn get_circle_theme(circle: &mut RhaiCircle) -> RhaiThemeData {
circle.theme.clone()
#[rhai_fn(name = "get_members")]
pub fn get_members(c: &mut RhaiCircle) -> Array {
c.members.iter().map(|s| Dynamic::from(s.clone())).collect()
}
}
pub fn register_circle_rhai_module(engine: &mut Engine, db: Arc<OurDB>) {
engine.build_type::<RhaiCircle>();
engine.build_type::<RhaiThemeData>();
pub fn register_circle_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_circle_module);
let mut db_module = Module::new();
let circle_module = exported_module!(rhai_circle_module);
let theme_data_module = exported_module!(rhai_theme_data_module);
engine.register_global_module(circle_module.into());
engine.register_global_module(theme_data_module.into());
register_json_method::<Circle>(engine);
register_json_method::<ThemeData>(engine);
// Manually register database functions as they need to capture 'db'
let db_clone_set_circle = db.clone();
db_module.set_native_fn(
"save_circle",
move |circle: Circle| -> Result<Circle, Box<EvalAltResult>> {
let result = db_clone_set_circle.set(&circle).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error set_circle: {}", e).into(),
Position::NONE,
))
})?;
Ok(result.1)
},
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_circle",
resource_type_str: "Circle",
rhai_return_rust_type: crate::models::circle::Circle
);
let db_clone_delete_circle = db.clone();
db_module.set_native_fn(
"delete_circle",
move |circle: Circle| -> Result<(), Box<EvalAltResult>> {
let result = db_clone_delete_circle
.collection::<Circle>()
.expect("can open circle collection")
.delete_by_id(circle.base_data.id)
.expect("can delete circle");
Ok(result)
},
);
let db_clone_get_circle = db.clone();
db_module.set_native_fn(
"get_circle",
move || -> Result<Circle, Box<EvalAltResult>> {
let all_circles: Vec<Circle> = db_clone_get_circle.get_all().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error get_circle: {}", e).into(),
Position::NONE,
))
})?;
if let Some(first_circle) = all_circles.first() {
Ok(first_circle.clone())
} else {
Err(Box::new(EvalAltResult::ErrorRuntime(
"Circle not found".into(),
Position::NONE,
)))
}
},
);
// --- Collection DB Functions ---
let db_clone = db.clone();
db_module.set_native_fn(
"save_circle",
move |circle: RhaiCircle| -> Result<RhaiCircle, Box<EvalAltResult>> {
let result = db_clone.set(&circle).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error: {:?}", e).into(),
Position::NONE,
))
})?;
Ok(result.1)
},
);
let db_clone_get_circle_by_id = db.clone();
db_module.set_native_fn(
"get_circle_by_id",
move |id_i64: INT| -> Result<Circle, Box<EvalAltResult>> {
let id_u32 = id_from_i64_to_u32(id_i64)?;
db_clone_get_circle_by_id
.get_by_id(id_u32)
.map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("DB Error get_circle_by_id: {}", e).into(),
Position::NONE,
))
})?
.ok_or_else(|| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Circle with ID {} not found", id_u32).into(),
Position::NONE,
))
})
},
);
let db_clone_list_circles = db.clone();
db_module.set_native_fn(
"list_circles",
move || -> Result<Dynamic, Box<EvalAltResult>> {
let collection = db_clone_list_circles.collection::<Circle>().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to get circle collection: {:?}", e).into(),
Position::NONE,
))
})?;
let circles = collection.get_all().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to get all circles: {:?}", e).into(),
Position::NONE,
))
})?;
let mut array = Array::new();
for circle in circles {
array.push(Dynamic::from(circle));
}
Ok(Dynamic::from(array))
},
);
engine.register_global_module(db_module.into());
println!("Successfully registered circle Rhai module using export_module approach.");
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,232 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use std::sync::Arc;
use crate::models::contact::{Contact, Group};
type RhaiContact = Contact;
type RhaiGroup = Group;
use crate::db::hero::OurDB;
use crate::db::Collection;
#[export_module]
mod rhai_contact_module {
use super::{RhaiContact, RhaiGroup};
// --- Contact Builder ---
#[rhai_fn(name = "new_contact", return_raw)]
pub fn new_contact() -> Result<RhaiContact, Box<EvalAltResult>> {
Ok(Contact::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_contact_name(
contact: &mut RhaiContact,
name: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.name(name);
Ok(contact.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_contact_description(
contact: &mut RhaiContact,
description: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.description(description);
Ok(contact.clone())
}
#[rhai_fn(name = "address", return_raw)]
pub fn set_contact_address(
contact: &mut RhaiContact,
address: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.address(address);
Ok(contact.clone())
}
#[rhai_fn(name = "phone", return_raw)]
pub fn set_contact_phone(
contact: &mut RhaiContact,
phone: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.phone(phone);
Ok(contact.clone())
}
#[rhai_fn(name = "email", return_raw)]
pub fn set_contact_email(
contact: &mut RhaiContact,
email: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.email(email);
Ok(contact.clone())
}
#[rhai_fn(name = "notes", return_raw)]
pub fn set_contact_notes(
contact: &mut RhaiContact,
notes: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.notes(notes);
Ok(contact.clone())
}
#[rhai_fn(name = "circle", return_raw)]
pub fn set_contact_circle(
contact: &mut RhaiContact,
circle: String,
) -> Result<RhaiContact, Box<EvalAltResult>> {
let owned = std::mem::take(contact);
*contact = owned.circle(circle);
Ok(contact.clone())
}
// --- Group Builder ---
#[rhai_fn(name = "new_group", return_raw)]
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(Group::new())
}
#[rhai_fn(name = "group_name", return_raw)]
pub fn set_group_name(
group: &mut RhaiGroup,
name: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.name(name);
Ok(group.clone())
}
#[rhai_fn(name = "group_description", return_raw)]
pub fn set_group_description(
group: &mut RhaiGroup,
description: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.description(description);
Ok(group.clone())
}
#[rhai_fn(name = "add_contact", return_raw)]
pub fn add_group_contact(
group: &mut RhaiGroup,
contact_id: i64,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.add_contact(contact_id as u32);
Ok(group.clone())
}
// --- Getters ---
// Contact
#[rhai_fn(name = "get_contact_id")]
pub fn get_contact_id(c: &mut RhaiContact) -> i64 {
c.base.id as i64
}
#[rhai_fn(name = "get_contact_name")]
pub fn get_contact_name(c: &mut RhaiContact) -> String {
c.name.clone()
}
#[rhai_fn(name = "get_contact_description")]
pub fn get_contact_description(c: &mut RhaiContact) -> Option<String> {
c.description.clone()
}
#[rhai_fn(name = "get_contact_address")]
pub fn get_contact_address(c: &mut RhaiContact) -> String {
c.address.clone()
}
#[rhai_fn(name = "get_contact_phone")]
pub fn get_contact_phone(c: &mut RhaiContact) -> String {
c.phone.clone()
}
#[rhai_fn(name = "get_contact_email")]
pub fn get_contact_email(c: &mut RhaiContact) -> String {
c.email.clone()
}
#[rhai_fn(name = "get_contact_notes")]
pub fn get_contact_notes(c: &mut RhaiContact) -> Option<String> {
c.notes.clone()
}
#[rhai_fn(name = "get_contact_circle")]
pub fn get_contact_circle(c: &mut RhaiContact) -> String {
c.circle.clone()
}
// Group
#[rhai_fn(name = "get_group_id")]
pub fn get_group_id(g: &mut RhaiGroup) -> i64 {
g.base.id as i64
}
#[rhai_fn(name = "get_group_name")]
pub fn get_group_name(g: &mut RhaiGroup) -> String {
g.name.clone()
}
#[rhai_fn(name = "get_group_description")]
pub fn get_group_description(g: &mut RhaiGroup) -> Option<String> {
g.description.clone()
}
#[rhai_fn(name = "get_group_contacts")]
pub fn get_group_contacts(g: &mut RhaiGroup) -> Array {
g.contacts
.iter()
.map(|id| Dynamic::from(*id as i64))
.collect()
}
}
pub fn register_contact_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_contact_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_contact",
resource_type_str: "Contact",
rhai_return_rust_type: heromodels::models::contact::Contact
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_group",
resource_type_str: "Group",
rhai_return_rust_type: heromodels::models::contact::Group
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,86 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Engine, EvalAltResult, Module, INT};
use std::mem;
use std::sync::Arc;
use heromodels::models::core::comment::Comment;
type RhaiComment = Comment;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
#[export_module]
mod rhai_comment_module {
use super::{RhaiComment, INT};
#[rhai_fn(name = "new_comment", return_raw)]
pub fn new_comment() -> Result<RhaiComment, Box<EvalAltResult>> {
Ok(Comment::new())
}
#[rhai_fn(name = "user_id", return_raw)]
pub fn set_user_id(
comment: &mut RhaiComment,
user_id: i64,
) -> Result<RhaiComment, Box<EvalAltResult>> {
let owned = std::mem::take(comment);
*comment = owned.user_id(user_id as u32);
Ok(comment.clone())
}
#[rhai_fn(name = "content", return_raw)]
pub fn set_content(
comment: &mut RhaiComment,
content: String,
) -> Result<RhaiComment, Box<EvalAltResult>> {
let owned = std::mem::take(comment);
*comment = owned.content(content);
Ok(comment.clone())
}
#[rhai_fn(name = "get_comment_id")]
pub fn get_comment_id(comment: &mut RhaiComment) -> i64 {
comment.id() as i64
}
#[rhai_fn(name = "get_comment_user_id")]
pub fn get_comment_user_id(comment: &mut RhaiComment) -> i64 {
comment.user_id() as i64
}
#[rhai_fn(name = "get_comment_content")]
pub fn get_comment_content(comment: &mut RhaiComment) -> String {
comment.content().clone()
}
}
pub fn register_comment_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_comment_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
register_authorized_delete_by_id_fn!(
module: &mut module,
rhai_fn_name: "delete_comment",
resource_type_str: "Comment",
rhai_return_rust_type: heromodels::models::core::comment::Comment
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,80 @@
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Engine, EvalAltResult, Module, INT};
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection;
use heromodels::models::finance::account::Account;
type RhaiAccount = Account;
#[export_module]
mod rhai_account_module {
use super::{Array, RhaiAccount, INT};
#[rhai_fn(name = "new_account", return_raw)]
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(Account::new())
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(
account: &mut RhaiAccount,
name: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.name(name);
Ok(account.clone())
}
#[rhai_fn(name = "user_id", return_raw)]
pub fn set_user_id(
account: &mut RhaiAccount,
user_id: INT,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.user_id(user_id as u32);
Ok(account.clone())
}
#[rhai_fn(name = "get_account_id")]
pub fn get_account_id(account: &mut RhaiAccount) -> i64 {
account.id() as i64
}
#[rhai_fn(name = "get_account_name")]
pub fn get_account_name(account: &mut RhaiAccount) -> String {
account.name().clone()
}
#[rhai_fn(name = "get_account_user_id")]
pub fn get_account_user_id(account: &mut RhaiAccount) -> INT {
account.user_id() as INT
}
}
pub fn register_account_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_account_module);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_account",
resource_type_str: "Account",
rhai_return_rust_type: heromodels::models::finance::account::Account
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_account",
resource_type_str: "Account",
rhai_return_rust_type: heromodels::models::finance::account::Account
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,128 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Bid status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum BidStatus {
#[default]
Pending,
Confirmed,
Assigned,
Cancelled,
Done,
}
/// Billing period enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum BillingPeriod {
#[default]
Hourly,
Monthly,
Yearly,
Biannually,
Triannually,
}
/// I can bid for infra, and optionally get accepted
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Bid {
pub base_data: BaseModelData,
/// links back to customer for this capacity (user on ledger)
#[index]
pub customer_id: u32,
/// nr of slices I need in 1 machine
pub compute_slices_nr: i32,
/// price per 1 GB slice I want to accept
pub compute_slice_price: f64,
/// nr of storage slices needed
pub storage_slices_nr: i32,
/// price per 1 GB storage slice I want to accept
pub storage_slice_price: f64,
pub status: BidStatus,
/// if obligation then will be charged and money needs to be in escrow, otherwise its an intent
pub obligation: bool,
/// epoch timestamp
pub start_date: u32,
/// epoch timestamp
pub end_date: u32,
/// signature as done by a user/consumer to validate their identity and intent
pub signature_user: String,
pub billing_period: BillingPeriod,
}
impl Bid {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices_nr: 0,
compute_slice_price: 0.0,
storage_slices_nr: 0,
storage_slice_price: 0.0,
status: BidStatus::default(),
obligation: false,
start_date: 0,
end_date: 0,
signature_user: String::new(),
billing_period: BillingPeriod::default(),
}
}
pub fn customer_id(mut self, v: u32) -> Self {
self.customer_id = v;
self
}
pub fn compute_slices_nr(mut self, v: i32) -> Self {
self.compute_slices_nr = v;
self
}
pub fn compute_slice_price(mut self, v: f64) -> Self {
self.compute_slice_price = v;
self
}
pub fn storage_slices_nr(mut self, v: i32) -> Self {
self.storage_slices_nr = v;
self
}
pub fn storage_slice_price(mut self, v: f64) -> Self {
self.storage_slice_price = v;
self
}
pub fn status(mut self, v: BidStatus) -> Self {
self.status = v;
self
}
pub fn obligation(mut self, v: bool) -> Self {
self.obligation = v;
self
}
pub fn start_date(mut self, v: u32) -> Self {
self.start_date = v;
self
}
pub fn end_date(mut self, v: u32) -> Self {
self.end_date = v;
self
}
pub fn signature_user(mut self, v: impl ToString) -> Self {
self.signature_user = v.to_string();
self
}
pub fn billing_period(mut self, v: BillingPeriod) -> Self {
self.billing_period = v;
self
}
}

View File

@@ -0,0 +1,39 @@
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// SLA policy matching the V spec `SLAPolicy`
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct SLAPolicy {
/// should +90
pub sla_uptime: i32,
/// minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee
pub sla_bandwidth_mbit: i32,
/// 0-100, percent of money given back in relation to month if sla breached,
/// e.g. 200 means we return 2 months worth of rev if sla missed
pub sla_penalty: i32,
}
impl SLAPolicy {
pub fn new() -> Self { Self::default() }
pub fn sla_uptime(mut self, v: i32) -> Self { self.sla_uptime = v; self }
pub fn sla_bandwidth_mbit(mut self, v: i32) -> Self { self.sla_bandwidth_mbit = v; self }
pub fn sla_penalty(mut self, v: i32) -> Self { self.sla_penalty = v; self }
pub fn build(self) -> Self { self }
}
/// Pricing policy matching the V spec `PricingPolicy`
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct PricingPolicy {
/// e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization
/// then this provider gives 30%, 2Y 40%, ...
pub marketplace_year_discounts: Vec<i32>,
/// e.g. 10,20,30
pub volume_discounts: Vec<i32>,
}
impl PricingPolicy {
pub fn new() -> Self { Self { marketplace_year_discounts: vec![30, 40, 50], volume_discounts: vec![10, 20, 30] } }
pub fn marketplace_year_discounts(mut self, v: Vec<i32>) -> Self { self.marketplace_year_discounts = v; self }
pub fn volume_discounts(mut self, v: Vec<i32>) -> Self { self.volume_discounts = v; self }
pub fn build(self) -> Self { self }
}

View File

@@ -0,0 +1,219 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
use super::bid::BillingPeriod;
/// Contract status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum ContractStatus {
#[default]
Active,
Cancelled,
Error,
Paused,
}
/// Compute slice provisioned for a contract
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct ComputeSliceProvisioned {
pub node_id: u32,
/// the id of the slice in the node
pub id: u16,
pub mem_gb: f64,
pub storage_gb: f64,
pub passmark: i32,
pub vcores: i32,
pub cpu_oversubscription: i32,
pub tags: String,
}
/// Storage slice provisioned for a contract
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageSliceProvisioned {
pub node_id: u32,
/// the id of the slice in the node, are tracked in the node itself
pub id: u16,
pub storage_size_gb: i32,
pub tags: String,
}
/// Contract for provisioned infrastructure
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Contract {
pub base_data: BaseModelData,
/// links back to customer for this capacity (user on ledger)
#[index]
pub customer_id: u32,
pub compute_slices: Vec<ComputeSliceProvisioned>,
pub storage_slices: Vec<StorageSliceProvisioned>,
/// price per 1 GB agreed upon
pub compute_slice_price: f64,
/// price per 1 GB agreed upon
pub storage_slice_price: f64,
/// price per 1 GB agreed upon (transfer)
pub network_slice_price: f64,
pub status: ContractStatus,
/// epoch timestamp
pub start_date: u32,
/// epoch timestamp
pub end_date: u32,
/// signature as done by a user/consumer to validate their identity and intent
pub signature_user: String,
/// signature as done by the hoster
pub signature_hoster: String,
pub billing_period: BillingPeriod,
}
impl Contract {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices: Vec::new(),
storage_slices: Vec::new(),
compute_slice_price: 0.0,
storage_slice_price: 0.0,
network_slice_price: 0.0,
status: ContractStatus::default(),
start_date: 0,
end_date: 0,
signature_user: String::new(),
signature_hoster: String::new(),
billing_period: BillingPeriod::default(),
}
}
pub fn customer_id(mut self, v: u32) -> Self {
self.customer_id = v;
self
}
pub fn add_compute_slice(mut self, slice: ComputeSliceProvisioned) -> Self {
self.compute_slices.push(slice);
self
}
pub fn add_storage_slice(mut self, slice: StorageSliceProvisioned) -> Self {
self.storage_slices.push(slice);
self
}
pub fn compute_slice_price(mut self, v: f64) -> Self {
self.compute_slice_price = v;
self
}
pub fn storage_slice_price(mut self, v: f64) -> Self {
self.storage_slice_price = v;
self
}
pub fn network_slice_price(mut self, v: f64) -> Self {
self.network_slice_price = v;
self
}
pub fn status(mut self, v: ContractStatus) -> Self {
self.status = v;
self
}
pub fn start_date(mut self, v: u32) -> Self {
self.start_date = v;
self
}
pub fn end_date(mut self, v: u32) -> Self {
self.end_date = v;
self
}
pub fn signature_user(mut self, v: impl ToString) -> Self {
self.signature_user = v.to_string();
self
}
pub fn signature_hoster(mut self, v: impl ToString) -> Self {
self.signature_hoster = v.to_string();
self
}
pub fn billing_period(mut self, v: BillingPeriod) -> Self {
self.billing_period = v;
self
}
}
impl ComputeSliceProvisioned {
pub fn new() -> Self {
Self::default()
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn id(mut self, v: u16) -> Self {
self.id = v;
self
}
pub fn mem_gb(mut self, v: f64) -> Self {
self.mem_gb = v;
self
}
pub fn storage_gb(mut self, v: f64) -> Self {
self.storage_gb = v;
self
}
pub fn passmark(mut self, v: i32) -> Self {
self.passmark = v;
self
}
pub fn vcores(mut self, v: i32) -> Self {
self.vcores = v;
self
}
pub fn cpu_oversubscription(mut self, v: i32) -> Self {
self.cpu_oversubscription = v;
self
}
pub fn tags(mut self, v: impl ToString) -> Self {
self.tags = v.to_string();
self
}
}
impl StorageSliceProvisioned {
pub fn new() -> Self {
Self::default()
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn id(mut self, v: u16) -> Self {
self.id = v;
self
}
pub fn storage_size_gb(mut self, v: i32) -> Self {
self.storage_size_gb = v;
self
}
pub fn tags(mut self, v: impl ToString) -> Self {
self.tags = v.to_string();
self
}
}

View File

@@ -0,0 +1,18 @@
pub mod bid;
pub mod common;
pub mod contract;
pub mod node;
pub mod nodegroup;
pub mod reputation;
pub mod reservation;
pub use bid::{Bid, BidStatus, BillingPeriod};
pub use common::{PricingPolicy, SLAPolicy};
pub use contract::{Contract, ContractStatus, ComputeSliceProvisioned, StorageSliceProvisioned};
pub use node::{
CPUDevice, ComputeSlice, DeviceInfo, GPUDevice, MemoryDevice, NetworkDevice, Node,
NodeCapacity, StorageDevice, StorageSlice,
};
pub use nodegroup::NodeGroup;
pub use reputation::{NodeGroupReputation, NodeReputation};
pub use reservation::{Reservation, ReservationStatus};

View File

@@ -0,0 +1,281 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
use super::common::{PricingPolicy, SLAPolicy};
/// Storage device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageDevice {
/// can be used in node
pub id: String,
/// Size of the storage device in gigabytes
pub size_gb: f64,
/// Description of the storage device
pub description: String,
}
/// Memory device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct MemoryDevice {
/// can be used in node
pub id: String,
/// Size of the memory device in gigabytes
pub size_gb: f64,
/// Description of the memory device
pub description: String,
}
/// CPU device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct CPUDevice {
/// can be used in node
pub id: String,
/// Number of CPU cores
pub cores: i32,
/// Passmark score
pub passmark: i32,
/// Description of the CPU
pub description: String,
/// Brand of the CPU
pub cpu_brand: String,
/// Version of the CPU
pub cpu_version: String,
}
/// GPU device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct GPUDevice {
/// can be used in node
pub id: String,
/// Number of GPU cores
pub cores: i32,
/// Size of the GPU memory in gigabytes
pub memory_gb: f64,
/// Description of the GPU
pub description: String,
pub gpu_brand: String,
pub gpu_version: String,
}
/// Network device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NetworkDevice {
/// can be used in node
pub id: String,
/// Network speed in Mbps
pub speed_mbps: i32,
/// Description of the network device
pub description: String,
}
/// Aggregated device info for a node
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct DeviceInfo {
pub vendor: String,
pub storage: Vec<StorageDevice>,
pub memory: Vec<MemoryDevice>,
pub cpu: Vec<CPUDevice>,
pub gpu: Vec<GPUDevice>,
pub network: Vec<NetworkDevice>,
}
/// NodeCapacity represents the hardware capacity details of a node.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeCapacity {
/// Total storage in gigabytes
pub storage_gb: f64,
/// Total memory in gigabytes
pub mem_gb: f64,
/// Total GPU memory in gigabytes
pub mem_gb_gpu: f64,
/// Passmark score for the node
pub passmark: i32,
/// Total virtual cores
pub vcores: i32,
}
// PricingPolicy and SLAPolicy moved to `common.rs` to be shared across models.
/// Compute slice (typically represents a base unit of compute)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct ComputeSlice {
/// the id of the slice in the node
pub id: u16,
pub mem_gb: f64,
pub storage_gb: f64,
pub passmark: i32,
pub vcores: i32,
pub cpu_oversubscription: i32,
pub storage_oversubscription: i32,
/// nr of GPU's see node to know what GPU's are
pub gpus: u8,
}
impl ComputeSlice {
pub fn new() -> Self {
Self {
id: 0,
mem_gb: 0.0,
storage_gb: 0.0,
passmark: 0,
vcores: 0,
cpu_oversubscription: 0,
storage_oversubscription: 0,
gpus: 0,
}
}
pub fn id(mut self, id: u16) -> Self {
self.id = id;
self
}
pub fn mem_gb(mut self, v: f64) -> Self {
self.mem_gb = v;
self
}
pub fn storage_gb(mut self, v: f64) -> Self {
self.storage_gb = v;
self
}
pub fn passmark(mut self, v: i32) -> Self {
self.passmark = v;
self
}
pub fn vcores(mut self, v: i32) -> Self {
self.vcores = v;
self
}
pub fn cpu_oversubscription(mut self, v: i32) -> Self {
self.cpu_oversubscription = v;
self
}
pub fn storage_oversubscription(mut self, v: i32) -> Self {
self.storage_oversubscription = v;
self
}
pub fn gpus(mut self, v: u8) -> Self {
self.gpus = v;
self
}
}
/// Storage slice (typically 1GB of storage)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageSlice {
/// the id of the slice in the node, are tracked in the node itself
pub id: u16,
}
impl StorageSlice {
pub fn new() -> Self {
Self {
id: 0,
}
}
pub fn id(mut self, id: u16) -> Self {
self.id = id;
self
}
}
/// Grid4 Node model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Node {
pub base_data: BaseModelData,
/// Link to node group
#[index]
pub nodegroupid: i32,
/// Uptime percentage 0..100
pub uptime: i32,
pub computeslices: Vec<ComputeSlice>,
pub storageslices: Vec<StorageSlice>,
pub devices: DeviceInfo,
/// 2 letter code as specified in lib/data/countries/data/countryInfo.txt
#[index]
pub country: String,
/// Hardware capacity details
pub capacity: NodeCapacity,
/// first time node was active
pub birthtime: u32,
/// node public key
#[index]
pub pubkey: String,
/// signature done on node to validate pubkey with privkey
pub signature_node: String,
/// signature as done by farmers to validate their identity
pub signature_farmer: String,
}
impl Node {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodegroupid: 0,
uptime: 0,
computeslices: Vec::new(),
storageslices: Vec::new(),
devices: DeviceInfo::default(),
country: String::new(),
capacity: NodeCapacity::default(),
birthtime: 0,
pubkey: String::new(),
signature_node: String::new(),
signature_farmer: String::new(),
}
}
pub fn nodegroupid(mut self, v: i32) -> Self {
self.nodegroupid = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self {
self.computeslices.push(s);
self
}
pub fn add_storage_slice(mut self, s: StorageSlice) -> Self {
self.storageslices.push(s);
self
}
pub fn devices(mut self, d: DeviceInfo) -> Self {
self.devices = d;
self
}
pub fn country(mut self, c: impl ToString) -> Self {
self.country = c.to_string();
self
}
pub fn capacity(mut self, c: NodeCapacity) -> Self {
self.capacity = c;
self
}
pub fn birthtime(mut self, t: u32) -> Self {
self.birthtime = t;
self
}
pub fn pubkey(mut self, v: impl ToString) -> Self {
self.pubkey = v.to_string();
self
}
pub fn signature_node(mut self, v: impl ToString) -> Self {
self.signature_node = v.to_string();
self
}
pub fn signature_farmer(mut self, v: impl ToString) -> Self {
self.signature_farmer = v.to_string();
self
}
/// Placeholder for capacity recalculation out of the devices on the Node
pub fn check(self) -> Self {
// TODO: calculate NodeCapacity out of the devices on the Node
self
}
}

View File

@@ -0,0 +1,52 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
use super::common::{PricingPolicy, SLAPolicy};
/// Grid4 NodeGroup model (root object for farmer configuration)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeGroup {
pub base_data: BaseModelData,
/// link back to farmer who owns the nodegroup, is a user?
#[index]
pub farmerid: u32,
/// only visible by farmer, in future encrypted, used to boot a node
pub secret: String,
pub description: String,
pub slapolicy: SLAPolicy,
pub pricingpolicy: PricingPolicy,
/// pricing in CC - cloud credit, per 2GB node slice
pub compute_slice_normalized_pricing_cc: f64,
/// pricing in CC - cloud credit, per 1GB storage slice
pub storage_slice_normalized_pricing_cc: f64,
/// signature as done by farmers to validate that they created this group
pub signature_farmer: String,
}
impl NodeGroup {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
farmerid: 0,
secret: String::new(),
description: String::new(),
slapolicy: SLAPolicy::default(),
pricingpolicy: PricingPolicy::new(),
compute_slice_normalized_pricing_cc: 0.0,
storage_slice_normalized_pricing_cc: 0.0,
signature_farmer: String::new(),
}
}
pub fn farmerid(mut self, v: u32) -> Self { self.farmerid = v; self }
pub fn secret(mut self, v: impl ToString) -> Self { self.secret = v.to_string(); self }
pub fn description(mut self, v: impl ToString) -> Self { self.description = v.to_string(); self }
pub fn slapolicy(mut self, v: SLAPolicy) -> Self { self.slapolicy = v; self }
pub fn pricingpolicy(mut self, v: PricingPolicy) -> Self { self.pricingpolicy = v; self }
pub fn compute_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.compute_slice_normalized_pricing_cc = v; self }
pub fn storage_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.storage_slice_normalized_pricing_cc = v; self }
pub fn signature_farmer(mut self, v: impl ToString) -> Self { self.signature_farmer = v.to_string(); self }
}

View File

@@ -0,0 +1,85 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Node reputation information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeReputation {
pub node_id: u32,
/// between 0 and 100, earned over time
pub reputation: i32,
/// between 0 and 100, set by system, farmer has no ability to set this
pub uptime: i32,
}
/// NodeGroup reputation model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeGroupReputation {
pub base_data: BaseModelData,
#[index]
pub nodegroup_id: u32,
/// between 0 and 100, earned over time
pub reputation: i32,
/// between 0 and 100, set by system, farmer has no ability to set this
pub uptime: i32,
pub nodes: Vec<NodeReputation>,
}
impl NodeGroupReputation {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodegroup_id: 0,
reputation: 50, // default as per spec
uptime: 0,
nodes: Vec::new(),
}
}
pub fn nodegroup_id(mut self, v: u32) -> Self {
self.nodegroup_id = v;
self
}
pub fn reputation(mut self, v: i32) -> Self {
self.reputation = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
pub fn add_node_reputation(mut self, node_rep: NodeReputation) -> Self {
self.nodes.push(node_rep);
self
}
}
impl NodeReputation {
pub fn new() -> Self {
Self {
node_id: 0,
reputation: 50, // default as per spec
uptime: 0,
}
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn reputation(mut self, v: i32) -> Self {
self.reputation = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
}

View File

@@ -0,0 +1,58 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Reservation status as per V spec
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum ReservationStatus {
#[default]
Pending,
Confirmed,
Assigned,
Cancelled,
Done,
}
/// Grid4 Reservation model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Reservation {
pub base_data: BaseModelData,
/// links back to customer for this capacity
#[index]
pub customer_id: u32,
pub compute_slices: Vec<u32>,
pub storage_slices: Vec<u32>,
pub status: ReservationStatus,
/// if obligation then will be charged and money needs to be in escrow, otherwise its an intent
pub obligation: bool,
/// epoch
pub start_date: u32,
pub end_date: u32,
}
impl Reservation {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices: Vec::new(),
storage_slices: Vec::new(),
status: ReservationStatus::Pending,
obligation: false,
start_date: 0,
end_date: 0,
}
}
pub fn customer_id(mut self, v: u32) -> Self { self.customer_id = v; self }
pub fn add_compute_slice(mut self, id: u32) -> Self { self.compute_slices.push(id); self }
pub fn compute_slices(mut self, v: Vec<u32>) -> Self { self.compute_slices = v; self }
pub fn add_storage_slice(mut self, id: u32) -> Self { self.storage_slices.push(id); self }
pub fn storage_slices(mut self, v: Vec<u32>) -> Self { self.storage_slices = v; self }
pub fn status(mut self, v: ReservationStatus) -> Self { self.status = v; self }
pub fn obligation(mut self, v: bool) -> Self { self.obligation = v; self }
pub fn start_date(mut self, v: u32) -> Self { self.start_date = v; self }
pub fn end_date(mut self, v: u32) -> Self { self.end_date = v; self }
}

View File

@@ -0,0 +1,194 @@
# Grid4 Data Model
This module defines data models for nodes, groups, and slices in a cloud/grid infrastructure. Each root object is marked with `@[heap]` and can be indexed for efficient querying.
## Root Objects Overview
| Object | Description | Index Fields |
| ----------- | --------------------------------------------- | ------------------------------ |
| `Node` | Represents a single node in the grid | `id`, `nodegroupid`, `country` |
| `NodeGroup` | Represents a group of nodes owned by a farmer | `id`, `farmerid` |
---
## Node
Represents a single node in the grid with slices, devices, and capacity.
| Field | Type | Description | Indexed |
| --------------- | ---------------- | -------------------------------------------- | ------- |
| `id` | `int` | Unique node ID | ✅ |
| `nodegroupid` | `int` | ID of the owning node group | ✅ |
| `uptime` | `int` | Uptime percentage (0-100) | ✅ |
| `computeslices` | `[]ComputeSlice` | List of compute slices | ❌ |
| `storageslices` | `[]StorageSlice` | List of storage slices | ❌ |
| `devices` | `DeviceInfo` | Hardware device info (storage, memory, etc.) | ❌ |
| `country` | `string` | 2-letter country code | ✅ |
| `capacity` | `NodeCapacity` | Aggregated hardware capacity | ❌ |
| `provisiontime` | `u32` | Provisioning time (simple/compatible format) | ✅ |
---
## NodeGroup
Represents a group of nodes owned by a farmer, with policies.
| Field | Type | Description | Indexed |
| ------------------------------------- | --------------- | ---------------------------------------------- | ------- |
| `id` | `u32` | Unique group ID | ✅ |
| `farmerid` | `u32` | Farmer/user ID | ✅ |
| `secret` | `string` | Encrypted secret for booting nodes | ❌ |
| `description` | `string` | Group description | ❌ |
| `slapolicy` | `SLAPolicy` | SLA policy details | ❌ |
| `pricingpolicy` | `PricingPolicy` | Pricing policy details | ❌ |
| `compute_slice_normalized_pricing_cc` | `f64` | Pricing per 2GB compute slice in cloud credits | ❌ |
| `storage_slice_normalized_pricing_cc` | `f64` | Pricing per 1GB storage slice in cloud credits | ❌ |
| `reputation` | `int` | Reputation (0-100) | ✅ |
| `uptime` | `int` | Uptime (0-100) | ✅ |
---
## ComputeSlice
Represents a compute slice (e.g., 1GB memory unit).
| Field | Type | Description |
| -------------------------- | --------------- | -------------------------------- |
| `nodeid` | `u32` | Owning node ID |
| `id` | `int` | Slice ID in node |
| `mem_gb` | `f64` | Memory in GB |
| `storage_gb` | `f64` | Storage in GB |
| `passmark` | `int` | Passmark score |
| `vcores` | `int` | Virtual cores |
| `cpu_oversubscription` | `int` | CPU oversubscription ratio |
| `storage_oversubscription` | `int` | Storage oversubscription ratio |
| `price_range` | `[]f64` | Price range [min, max] |
| `gpus` | `u8` | Number of GPUs |
| `price_cc` | `f64` | Price per slice in cloud credits |
| `pricing_policy` | `PricingPolicy` | Pricing policy |
| `sla_policy` | `SLAPolicy` | SLA policy |
---
## StorageSlice
Represents a 1GB storage slice.
| Field | Type | Description |
| ---------------- | --------------- | -------------------------------- |
| `nodeid` | `u32` | Owning node ID |
| `id` | `int` | Slice ID in node |
| `price_cc` | `f64` | Price per slice in cloud credits |
| `pricing_policy` | `PricingPolicy` | Pricing policy |
| `sla_policy` | `SLAPolicy` | SLA policy |
---
## DeviceInfo
Hardware device information for a node.
| Field | Type | Description |
| --------- | ----------------- | ----------------------- |
| `vendor` | `string` | Vendor of the node |
| `storage` | `[]StorageDevice` | List of storage devices |
| `memory` | `[]MemoryDevice` | List of memory devices |
| `cpu` | `[]CPUDevice` | List of CPU devices |
| `gpu` | `[]GPUDevice` | List of GPU devices |
| `network` | `[]NetworkDevice` | List of network devices |
---
## StorageDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `size_gb` | `f64` | Size in GB |
| `description` | `string` | Description of device |
---
## MemoryDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `size_gb` | `f64` | Size in GB |
| `description` | `string` | Description of device |
---
## CPUDevice
| Field | Type | Description |
| ------------- | -------- | ------------------------ |
| `id` | `string` | Unique ID for device |
| `cores` | `int` | Number of CPU cores |
| `passmark` | `int` | Passmark benchmark score |
| `description` | `string` | Description of device |
| `cpu_brand` | `string` | Brand of the CPU |
| `cpu_version` | `string` | Version of the CPU |
---
## GPUDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `cores` | `int` | Number of GPU cores |
| `memory_gb` | `f64` | GPU memory in GB |
| `description` | `string` | Description of device |
| `gpu_brand` | `string` | Brand of the GPU |
| `gpu_version` | `string` | Version of the GPU |
---
## NetworkDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `speed_mbps` | `int` | Network speed in Mbps |
| `description` | `string` | Description of device |
---
## NodeCapacity
Aggregated hardware capacity for a node.
| Field | Type | Description |
| ------------ | ----- | ---------------------- |
| `storage_gb` | `f64` | Total storage in GB |
| `mem_gb` | `f64` | Total memory in GB |
| `mem_gb_gpu` | `f64` | Total GPU memory in GB |
| `passmark` | `int` | Total passmark score |
| `vcores` | `int` | Total virtual cores |
---
## SLAPolicy
Service Level Agreement policy for slices or node groups.
| Field | Type | Description |
| -------------------- | ----- | --------------------------------------- |
| `sla_uptime` | `int` | Required uptime % (e.g., 90) |
| `sla_bandwidth_mbit` | `int` | Guaranteed bandwidth in Mbps (0 = none) |
| `sla_penalty` | `int` | Penalty % if SLA is breached (0-100) |
---
## PricingPolicy
Pricing policy for slices or node groups.
| Field | Type | Description |
| ---------------------------- | ------- | --------------------------------------------------------- |
| `marketplace_year_discounts` | `[]int` | Discounts for 1Y, 2Y, 3Y prepaid usage (e.g. [30,40,50]) |
| `volume_discounts` | `[]int` | Volume discounts based on purchase size (e.g. [10,20,30]) |

View File

@@ -0,0 +1,37 @@
module datamodel
// I can bid for infra, and optionally get accepted
@[heap]
pub struct Bid {
pub mut:
id u32
customer_id u32 // links back to customer for this capacity (user on ledger)
compute_slices_nr int // nr of slices I need in 1 machine
compute_slice_price f64 // price per 1 GB slice I want to accept
storage_slices_nr int
storage_slice_price f64 // price per 1 GB storage slice I want to accept
storage_slices_nr int
status BidStatus
obligation bool // if obligation then will be charged and money needs to be in escrow, otherwise its an intent
start_date u32 // epoch
end_date u32
signature_user string // signature as done by a user/consumer to validate their identity and intent
billing_period BillingPeriod
}
pub enum BidStatus {
pending
confirmed
assigned
cancelled
done
}
pub enum BillingPeriod {
hourly
monthly
yearly
biannually
triannually
}

View File

@@ -0,0 +1,52 @@
module datamodel
// I can bid for infra, and optionally get accepted
@[heap]
pub struct Contract {
pub mut:
id u32
customer_id u32 // links back to customer for this capacity (user on ledger)
compute_slices []ComputeSliceProvisioned
storage_slices []StorageSliceProvisioned
compute_slice_price f64 // price per 1 GB agreed upon
storage_slice_price f64 // price per 1 GB agreed upon
network_slice_price f64 // price per 1 GB agreed upon (transfer)
status ContractStatus
start_date u32 // epoch
end_date u32
signature_user string // signature as done by a user/consumer to validate their identity and intent
signature_hoster string // signature as done by the hoster
billing_period BillingPeriod
}
pub enum ConctractStatus {
active
cancelled
error
paused
}
// typically 1GB of memory, but can be adjusted based based on size of machine
pub struct ComputeSliceProvisioned {
pub mut:
node_id u32
id u16 // the id of the slice in the node
mem_gb f64
storage_gb f64
passmark int
vcores int
cpu_oversubscription int
tags string
}
// 1GB of storage
pub struct StorageSliceProvisioned {
pub mut:
node_id u32
id u16 // the id of the slice in the node, are tracked in the node itself
storage_size_gb int
tags string
}

View File

@@ -0,0 +1,104 @@
module datamodel
//ACCESS ONLY TF
@[heap]
pub struct Node {
pub mut:
id int
nodegroupid int
uptime int // 0..100
computeslices []ComputeSlice
storageslices []StorageSlice
devices DeviceInfo
country string // 2 letter code as specified in lib/data/countries/data/countryInfo.txt, use that library for validation
capacity NodeCapacity // Hardware capacity details
birthtime u32 // first time node was active
pubkey string
signature_node string // signature done on node to validate pubkey with privkey
signature_farmer string // signature as done by farmers to validate their identity
}
pub struct DeviceInfo {
pub mut:
vendor string
storage []StorageDevice
memory []MemoryDevice
cpu []CPUDevice
gpu []GPUDevice
network []NetworkDevice
}
pub struct StorageDevice {
pub mut:
id string // can be used in node
size_gb f64 // Size of the storage device in gigabytes
description string // Description of the storage device
}
pub struct MemoryDevice {
pub mut:
id string // can be used in node
size_gb f64 // Size of the memory device in gigabytes
description string // Description of the memory device
}
pub struct CPUDevice {
pub mut:
id string // can be used in node
cores int // Number of CPU cores
passmark int
description string // Description of the CPU
cpu_brand string // Brand of the CPU
cpu_version string // Version of the CPU
}
pub struct GPUDevice {
pub mut:
id string // can be used in node
cores int // Number of GPU cores
memory_gb f64 // Size of the GPU memory in gigabytes
description string // Description of the GPU
gpu_brand string
gpu_version string
}
pub struct NetworkDevice {
pub mut:
id string // can be used in node
speed_mbps int // Network speed in Mbps
description string // Description of the network device
}
// NodeCapacity represents the hardware capacity details of a node.
pub struct NodeCapacity {
pub mut:
storage_gb f64 // Total storage in gigabytes
mem_gb f64 // Total memory in gigabytes
mem_gb_gpu f64 // Total GPU memory in gigabytes
passmark int // Passmark score for the node
vcores int // Total virtual cores
}
// typically 1GB of memory, but can be adjusted based based on size of machine
pub struct ComputeSlice {
pub mut:
u16 int // the id of the slice in the node
mem_gb f64
storage_gb f64
passmark int
vcores int
cpu_oversubscription int
storage_oversubscription int
gpus u8 // nr of GPU's see node to know what GPU's are
}
// 1GB of storage
pub struct StorageSlice {
pub mut:
u16 int // the id of the slice in the node, are tracked in the node itself
}
fn (mut n Node) check() ! {
// todo calculate NodeCapacity out of the devices on the Node
}

View File

@@ -0,0 +1,33 @@
module datamodel
// is a root object, is the only obj farmer needs to configure in the UI, this defines how slices will be created
@[heap]
pub struct NodeGroup {
pub mut:
id u32
farmerid u32 // link back to farmer who owns the nodegroup, is a user?
secret string // only visible by farmer, in future encrypted, used to boot a node
description string
slapolicy SLAPolicy
pricingpolicy PricingPolicy
compute_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 2GB node slice
storage_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 1GB storage slice
signature_farmer string // signature as done by farmers to validate that they created this group
}
pub struct SLAPolicy {
pub mut:
sla_uptime int // should +90
sla_bandwidth_mbit int // minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee
sla_penalty int // 0-100, percent of money given back in relation to month if sla breached, e.g. 200 means we return 2 months worth of rev if sla missed
}
pub struct PricingPolicy {
pub mut:
marketplace_year_discounts []int = [30, 40, 50] // e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization on all his purchaes then this provider gives 30%, 2Y 40%, ...
// volume_discounts []int = [10, 20, 30] // e.g. 10,20,30
}

View File

@@ -0,0 +1,19 @@
@[heap]
pub struct NodeGroupReputation {
pub mut:
nodegroup_id u32
reputation int = 50 // between 0 and 100, earned over time
uptime int // between 0 and 100, set by system, farmer has no ability to set this
nodes []NodeReputation
}
pub struct NodeReputation {
pub mut:
node_id u32
reputation int = 50 // between 0 and 100, earned over time
uptime int // between 0 and 100, set by system, farmer has no ability to set this
}

View File

@@ -0,0 +1,302 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Defines the supported DNS record types
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum NameType {
A,
AAAA,
CNAME,
MX,
TXT,
SRV,
PTR,
NS,
}
impl Default for NameType {
fn default() -> Self {
NameType::A
}
}
/// Category of the DNS record
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum NameCat {
IPv4,
IPv6,
Mycelium,
}
impl Default for NameCat {
fn default() -> Self {
NameCat::IPv4
}
}
/// Status of a DNS zone
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum DNSZoneStatus {
Active,
Suspended,
Archived,
}
impl Default for DNSZoneStatus {
fn default() -> Self {
DNSZoneStatus::Active
}
}
/// Represents a DNS record configuration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DNSRecord {
pub subdomain: String,
pub record_type: NameType,
pub value: String,
pub priority: u32,
pub ttl: u32,
pub is_active: bool,
pub cat: NameCat,
pub is_wildcard: bool,
}
impl DNSRecord {
pub fn new() -> Self {
Self {
subdomain: String::new(),
record_type: NameType::default(),
value: String::new(),
priority: 0,
ttl: 3600,
is_active: true,
cat: NameCat::default(),
is_wildcard: false,
}
}
pub fn subdomain(mut self, subdomain: impl ToString) -> Self {
self.subdomain = subdomain.to_string();
self
}
pub fn record_type(mut self, record_type: NameType) -> Self {
self.record_type = record_type;
self
}
pub fn value(mut self, value: impl ToString) -> Self {
self.value = value.to_string();
self
}
pub fn priority(mut self, priority: u32) -> Self {
self.priority = priority;
self
}
pub fn ttl(mut self, ttl: u32) -> Self {
self.ttl = ttl;
self
}
pub fn is_active(mut self, is_active: bool) -> Self {
self.is_active = is_active;
self
}
pub fn cat(mut self, cat: NameCat) -> Self {
self.cat = cat;
self
}
pub fn is_wildcard(mut self, is_wildcard: bool) -> Self {
self.is_wildcard = is_wildcard;
self
}
pub fn build(self) -> Self {
self
}
}
/// SOA (Start of Authority) record for a DNS zone
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SOARecord {
pub zone_id: u32,
pub primary_ns: String,
pub admin_email: String,
pub serial: u64,
pub refresh: u32,
pub retry: u32,
pub expire: u32,
pub minimum_ttl: u32,
pub is_active: bool,
}
impl SOARecord {
pub fn new() -> Self {
Self {
zone_id: 0,
primary_ns: String::new(),
admin_email: String::new(),
serial: 0,
refresh: 3600,
retry: 600,
expire: 604800,
minimum_ttl: 3600,
is_active: true,
}
}
pub fn zone_id(mut self, zone_id: u32) -> Self {
self.zone_id = zone_id;
self
}
pub fn primary_ns(mut self, primary_ns: impl ToString) -> Self {
self.primary_ns = primary_ns.to_string();
self
}
pub fn admin_email(mut self, admin_email: impl ToString) -> Self {
self.admin_email = admin_email.to_string();
self
}
pub fn serial(mut self, serial: u64) -> Self {
self.serial = serial;
self
}
pub fn refresh(mut self, refresh: u32) -> Self {
self.refresh = refresh;
self
}
pub fn retry(mut self, retry: u32) -> Self {
self.retry = retry;
self
}
pub fn expire(mut self, expire: u32) -> Self {
self.expire = expire;
self
}
pub fn minimum_ttl(mut self, minimum_ttl: u32) -> Self {
self.minimum_ttl = minimum_ttl;
self
}
pub fn is_active(mut self, is_active: bool) -> Self {
self.is_active = is_active;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents a DNS zone with its configuration and records
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct DNSZone {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub domain: String,
#[index(path = "subdomain")]
#[index(path = "record_type")]
pub dnsrecords: Vec<DNSRecord>,
pub administrators: Vec<u32>,
pub status: DNSZoneStatus,
pub metadata: HashMap<String, String>,
#[index(path = "primary_ns")]
pub soarecord: Vec<SOARecord>,
}
impl DNSZone {
/// Create a new DNS zone instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
domain: String::new(),
dnsrecords: Vec::new(),
administrators: Vec::new(),
status: DNSZoneStatus::default(),
metadata: HashMap::new(),
soarecord: Vec::new(),
}
}
/// Set the domain name (fluent)
pub fn domain(mut self, domain: impl ToString) -> Self {
self.domain = domain.to_string();
self
}
/// Add a DNS record (fluent)
pub fn add_dnsrecord(mut self, record: DNSRecord) -> Self {
self.dnsrecords.push(record);
self
}
/// Set all DNS records (fluent)
pub fn dnsrecords(mut self, dnsrecords: Vec<DNSRecord>) -> Self {
self.dnsrecords = dnsrecords;
self
}
/// Add an administrator (fluent)
pub fn add_administrator(mut self, admin_id: u32) -> Self {
self.administrators.push(admin_id);
self
}
/// Set all administrators (fluent)
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
self.administrators = administrators;
self
}
/// Set the zone status (fluent)
pub fn status(mut self, status: DNSZoneStatus) -> Self {
self.status = status;
self
}
/// Add metadata entry (fluent)
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
self.metadata.insert(key.to_string(), value.to_string());
self
}
/// Set all metadata (fluent)
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
self.metadata = metadata;
self
}
/// Add an SOA record (fluent)
pub fn add_soarecord(mut self, soa: SOARecord) -> Self {
self.soarecord.push(soa);
self
}
/// Set all SOA records (fluent)
pub fn soarecord(mut self, soarecord: Vec<SOARecord>) -> Self {
self.soarecord = soarecord;
self
}
/// Build the final DNS zone instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,232 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
/// Defines the lifecycle of a group
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum GroupStatus {
Active,
Inactive,
Suspended,
Archived,
}
impl Default for GroupStatus {
fn default() -> Self {
GroupStatus::Active
}
}
/// Visibility controls who can discover or view the group
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum Visibility {
Public, // Anyone can see and request to join
Private, // Only invited users can see the group
Unlisted, // Not visible in search; only accessible by direct link or DNS
}
impl Default for Visibility {
fn default() -> Self {
Visibility::Public
}
}
/// GroupConfig holds rules that govern group membership and behavior
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct GroupConfig {
pub max_members: u32,
pub allow_guests: bool,
pub auto_approve: bool,
pub require_invite: bool,
}
impl GroupConfig {
pub fn new() -> Self {
Self {
max_members: 0,
allow_guests: false,
auto_approve: false,
require_invite: false,
}
}
pub fn max_members(mut self, max_members: u32) -> Self {
self.max_members = max_members;
self
}
pub fn allow_guests(mut self, allow_guests: bool) -> Self {
self.allow_guests = allow_guests;
self
}
pub fn auto_approve(mut self, auto_approve: bool) -> Self {
self.auto_approve = auto_approve;
self
}
pub fn require_invite(mut self, require_invite: bool) -> Self {
self.require_invite = require_invite;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents a collaborative or access-controlled unit within the system
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Group {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub name: String,
pub description: String,
pub dnsrecords: Vec<u32>,
pub administrators: Vec<u32>,
pub config: GroupConfig,
pub status: GroupStatus,
pub visibility: Visibility,
pub created: u64,
pub updated: u64,
}
impl Group {
/// Create a new group instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
name: String::new(),
description: String::new(),
dnsrecords: Vec::new(),
administrators: Vec::new(),
config: GroupConfig::new(),
status: GroupStatus::default(),
visibility: Visibility::default(),
created: 0,
updated: 0,
}
}
/// Set the group name (fluent)
pub fn name(mut self, name: impl ToString) -> Self {
self.name = name.to_string();
self
}
/// Set the group description (fluent)
pub fn description(mut self, description: impl ToString) -> Self {
self.description = description.to_string();
self
}
/// Add a DNS record ID (fluent)
pub fn add_dnsrecord(mut self, dnsrecord_id: u32) -> Self {
self.dnsrecords.push(dnsrecord_id);
self
}
/// Set all DNS record IDs (fluent)
pub fn dnsrecords(mut self, dnsrecords: Vec<u32>) -> Self {
self.dnsrecords = dnsrecords;
self
}
/// Add an administrator user ID (fluent)
pub fn add_administrator(mut self, user_id: u32) -> Self {
self.administrators.push(user_id);
self
}
/// Set all administrator user IDs (fluent)
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
self.administrators = administrators;
self
}
/// Set the group configuration (fluent)
pub fn config(mut self, config: GroupConfig) -> Self {
self.config = config;
self
}
/// Set the group status (fluent)
pub fn status(mut self, status: GroupStatus) -> Self {
self.status = status;
self
}
/// Set the group visibility (fluent)
pub fn visibility(mut self, visibility: Visibility) -> Self {
self.visibility = visibility;
self
}
/// Set the created timestamp (fluent)
pub fn created(mut self, created: u64) -> Self {
self.created = created;
self
}
/// Set the updated timestamp (fluent)
pub fn updated(mut self, updated: u64) -> Self {
self.updated = updated;
self
}
/// Build the final group instance
pub fn build(self) -> Self {
self
}
}
/// Represents the membership relationship between users and groups
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct UserGroupMembership {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub user_id: u32,
pub group_ids: Vec<u32>,
}
impl UserGroupMembership {
/// Create a new user group membership instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
user_id: 0,
group_ids: Vec::new(),
}
}
/// Set the user ID (fluent)
pub fn user_id(mut self, user_id: u32) -> Self {
self.user_id = user_id;
self
}
/// Add a group ID (fluent)
pub fn add_group_id(mut self, group_id: u32) -> Self {
self.group_ids.push(group_id);
self
}
/// Set all group IDs (fluent)
pub fn group_ids(mut self, group_ids: Vec<u32>) -> Self {
self.group_ids = group_ids;
self
}
/// Build the final membership instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,113 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
/// Defines the possible roles a member can have
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum MemberRole {
Owner,
Admin,
Moderator,
Member,
Guest,
}
impl Default for MemberRole {
fn default() -> Self {
MemberRole::Member
}
}
/// Represents the current status of membership
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum MemberStatus {
Active,
Pending,
Suspended,
Removed,
}
impl Default for MemberStatus {
fn default() -> Self {
MemberStatus::Pending
}
}
/// Represents a member within a circle
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Member {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub user_id: u32,
pub role: MemberRole,
pub status: MemberStatus,
pub joined_at: u64,
pub invited_by: u32,
pub permissions: Vec<String>,
}
impl Member {
/// Create a new member instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
user_id: 0,
role: MemberRole::default(),
status: MemberStatus::default(),
joined_at: 0,
invited_by: 0,
permissions: Vec::new(),
}
}
/// Set the user ID (fluent)
pub fn user_id(mut self, user_id: u32) -> Self {
self.user_id = user_id;
self
}
/// Set the member role (fluent)
pub fn role(mut self, role: MemberRole) -> Self {
self.role = role;
self
}
/// Set the member status (fluent)
pub fn status(mut self, status: MemberStatus) -> Self {
self.status = status;
self
}
/// Set the joined timestamp (fluent)
pub fn joined_at(mut self, joined_at: u64) -> Self {
self.joined_at = joined_at;
self
}
/// Set who invited this member (fluent)
pub fn invited_by(mut self, invited_by: u32) -> Self {
self.invited_by = invited_by;
self
}
/// Add a permission (fluent)
pub fn add_permission(mut self, permission: impl ToString) -> Self {
self.permissions.push(permission.to_string());
self
}
/// Set all permissions (fluent)
pub fn permissions(mut self, permissions: Vec<String>) -> Self {
self.permissions = permissions;
self
}
/// Build the final member instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,10 @@
// Export all heroledger model modules
pub mod dnsrecord;
pub mod group;
pub mod membership;
pub mod money;
pub mod rhai;
pub mod secretbox;
pub mod signature;
pub mod user;
pub mod user_kvs;

View File

@@ -0,0 +1,507 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Represents the status of an account
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum AccountStatus {
Active,
Inactive,
Suspended,
Archived,
}
impl Default for AccountStatus {
fn default() -> Self {
AccountStatus::Active
}
}
/// Represents the type of transaction
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum TransactionType {
Transfer,
Clawback,
Freeze,
Unfreeze,
Issue,
Burn,
}
impl Default for TransactionType {
fn default() -> Self {
TransactionType::Transfer
}
}
/// Represents a signature for transactions
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Signature {
pub signer_id: u32,
pub signature: String,
pub timestamp: u64,
}
impl Signature {
pub fn new() -> Self {
Self {
signer_id: 0,
signature: String::new(),
timestamp: 0,
}
}
pub fn signer_id(mut self, signer_id: u32) -> Self {
self.signer_id = signer_id;
self
}
pub fn signature(mut self, signature: impl ToString) -> Self {
self.signature = signature.to_string();
self
}
pub fn timestamp(mut self, timestamp: u64) -> Self {
self.timestamp = timestamp;
self
}
pub fn build(self) -> Self {
self
}
}
/// Policy item for account operations
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct AccountPolicyItem {
pub signers: Vec<u32>,
pub min_signatures: u32,
pub enabled: bool,
pub threshold: f64,
pub recipient: u32,
}
impl AccountPolicyItem {
pub fn new() -> Self {
Self {
signers: Vec::new(),
min_signatures: 0,
enabled: false,
threshold: 0.0,
recipient: 0,
}
}
pub fn add_signer(mut self, signer_id: u32) -> Self {
self.signers.push(signer_id);
self
}
pub fn signers(mut self, signers: Vec<u32>) -> Self {
self.signers = signers;
self
}
pub fn min_signatures(mut self, min_signatures: u32) -> Self {
self.min_signatures = min_signatures;
self
}
pub fn enabled(mut self, enabled: bool) -> Self {
self.enabled = enabled;
self
}
pub fn threshold(mut self, threshold: f64) -> Self {
self.threshold = threshold;
self
}
pub fn recipient(mut self, recipient: u32) -> Self {
self.recipient = recipient;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents an account in the financial system
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Account {
/// Base model data
pub base_data: BaseModelData,
pub owner_id: u32,
#[index]
pub address: String,
pub balance: f64,
pub currency: String,
pub assetid: u32,
pub last_activity: u64,
pub administrators: Vec<u32>,
pub accountpolicy: u32,
}
impl Account {
/// Create a new account instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
owner_id: 0,
address: String::new(),
balance: 0.0,
currency: String::new(),
assetid: 0,
last_activity: 0,
administrators: Vec::new(),
accountpolicy: 0,
}
}
/// Set the owner ID (fluent)
pub fn owner_id(mut self, owner_id: u32) -> Self {
self.owner_id = owner_id;
self
}
/// Set the blockchain address (fluent)
pub fn address(mut self, address: impl ToString) -> Self {
self.address = address.to_string();
self
}
/// Set the balance (fluent)
pub fn balance(mut self, balance: f64) -> Self {
self.balance = balance;
self
}
/// Set the currency (fluent)
pub fn currency(mut self, currency: impl ToString) -> Self {
self.currency = currency.to_string();
self
}
/// Set the asset ID (fluent)
pub fn assetid(mut self, assetid: u32) -> Self {
self.assetid = assetid;
self
}
/// Set the last activity timestamp (fluent)
pub fn last_activity(mut self, last_activity: u64) -> Self {
self.last_activity = last_activity;
self
}
/// Add an administrator (fluent)
pub fn add_administrator(mut self, admin_id: u32) -> Self {
self.administrators.push(admin_id);
self
}
/// Set all administrators (fluent)
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
self.administrators = administrators;
self
}
/// Set the account policy ID (fluent)
pub fn accountpolicy(mut self, accountpolicy: u32) -> Self {
self.accountpolicy = accountpolicy;
self
}
/// Build the final account instance
pub fn build(self) -> Self {
self
}
}
/// Represents an asset in the financial system
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Asset {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub address: String,
pub assetid: u32,
pub asset_type: String,
pub issuer: u32,
pub supply: f64,
pub decimals: u8,
pub is_frozen: bool,
pub metadata: HashMap<String, String>,
pub administrators: Vec<u32>,
pub min_signatures: u32,
}
impl Asset {
/// Create a new asset instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
address: String::new(),
assetid: 0,
asset_type: String::new(),
issuer: 0,
supply: 0.0,
decimals: 0,
is_frozen: false,
metadata: HashMap::new(),
administrators: Vec::new(),
min_signatures: 0,
}
}
/// Set the blockchain address (fluent)
pub fn address(mut self, address: impl ToString) -> Self {
self.address = address.to_string();
self
}
/// Set the asset ID (fluent)
pub fn assetid(mut self, assetid: u32) -> Self {
self.assetid = assetid;
self
}
/// Set the asset type (fluent)
pub fn asset_type(mut self, asset_type: impl ToString) -> Self {
self.asset_type = asset_type.to_string();
self
}
/// Set the issuer (fluent)
pub fn issuer(mut self, issuer: u32) -> Self {
self.issuer = issuer;
self
}
/// Set the supply (fluent)
pub fn supply(mut self, supply: f64) -> Self {
self.supply = supply;
self
}
/// Set the decimals (fluent)
pub fn decimals(mut self, decimals: u8) -> Self {
self.decimals = decimals;
self
}
/// Set the frozen status (fluent)
pub fn is_frozen(mut self, is_frozen: bool) -> Self {
self.is_frozen = is_frozen;
self
}
/// Add metadata entry (fluent)
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
self.metadata.insert(key.to_string(), value.to_string());
self
}
/// Set all metadata (fluent)
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
self.metadata = metadata;
self
}
/// Add an administrator (fluent)
pub fn add_administrator(mut self, admin_id: u32) -> Self {
self.administrators.push(admin_id);
self
}
/// Set all administrators (fluent)
pub fn administrators(mut self, administrators: Vec<u32>) -> Self {
self.administrators = administrators;
self
}
/// Set minimum signatures required (fluent)
pub fn min_signatures(mut self, min_signatures: u32) -> Self {
self.min_signatures = min_signatures;
self
}
/// Build the final asset instance
pub fn build(self) -> Self {
self
}
}
/// Represents account policies for various operations
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct AccountPolicy {
/// Base model data
pub base_data: BaseModelData,
pub transferpolicy: AccountPolicyItem,
pub adminpolicy: AccountPolicyItem,
pub clawbackpolicy: AccountPolicyItem,
pub freezepolicy: AccountPolicyItem,
}
impl AccountPolicy {
/// Create a new account policy instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
transferpolicy: AccountPolicyItem::new(),
adminpolicy: AccountPolicyItem::new(),
clawbackpolicy: AccountPolicyItem::new(),
freezepolicy: AccountPolicyItem::new(),
}
}
/// Set the transfer policy (fluent)
pub fn transferpolicy(mut self, transferpolicy: AccountPolicyItem) -> Self {
self.transferpolicy = transferpolicy;
self
}
/// Set the admin policy (fluent)
pub fn adminpolicy(mut self, adminpolicy: AccountPolicyItem) -> Self {
self.adminpolicy = adminpolicy;
self
}
/// Set the clawback policy (fluent)
pub fn clawbackpolicy(mut self, clawbackpolicy: AccountPolicyItem) -> Self {
self.clawbackpolicy = clawbackpolicy;
self
}
/// Set the freeze policy (fluent)
pub fn freezepolicy(mut self, freezepolicy: AccountPolicyItem) -> Self {
self.freezepolicy = freezepolicy;
self
}
/// Build the final account policy instance
pub fn build(self) -> Self {
self
}
}
/// Represents a financial transaction
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Transaction {
/// Base model data
pub base_data: BaseModelData,
pub txid: u32,
pub source: u32,
pub destination: u32,
pub assetid: u32,
pub amount: f64,
pub timestamp: u64,
pub status: String,
pub memo: String,
pub tx_type: TransactionType,
pub signatures: Vec<Signature>,
}
impl Transaction {
/// Create a new transaction instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
txid: 0,
source: 0,
destination: 0,
assetid: 0,
amount: 0.0,
timestamp: 0,
status: String::new(),
memo: String::new(),
tx_type: TransactionType::default(),
signatures: Vec::new(),
}
}
/// Set the transaction ID (fluent)
pub fn txid(mut self, txid: u32) -> Self {
self.txid = txid;
self
}
/// Set the source account (fluent)
pub fn source(mut self, source: u32) -> Self {
self.source = source;
self
}
/// Set the destination account (fluent)
pub fn destination(mut self, destination: u32) -> Self {
self.destination = destination;
self
}
/// Set the asset ID (fluent)
pub fn assetid(mut self, assetid: u32) -> Self {
self.assetid = assetid;
self
}
/// Set the amount (fluent)
pub fn amount(mut self, amount: f64) -> Self {
self.amount = amount;
self
}
/// Set the timestamp (fluent)
pub fn timestamp(mut self, timestamp: u64) -> Self {
self.timestamp = timestamp;
self
}
/// Set the status (fluent)
pub fn status(mut self, status: impl ToString) -> Self {
self.status = status.to_string();
self
}
/// Set the memo (fluent)
pub fn memo(mut self, memo: impl ToString) -> Self {
self.memo = memo.to_string();
self
}
/// Set the transaction type (fluent)
pub fn tx_type(mut self, tx_type: TransactionType) -> Self {
self.tx_type = tx_type;
self
}
/// Add a signature (fluent)
pub fn add_signature(mut self, signature: Signature) -> Self {
self.signatures.push(signature);
self
}
/// Set all signatures (fluent)
pub fn signatures(mut self, signatures: Vec<Signature>) -> Self {
self.signatures = signatures;
self
}
/// Build the final transaction instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,308 @@
use ::rhai::plugin::*;
use ::rhai::{Dynamic, Engine, EvalAltResult, Module};
use std::mem;
use crate::models::heroledger::{
dnsrecord::DNSZone,
group::{Group, Visibility},
money::Account,
user::{User, UserStatus},
};
// ============================================================================
// User Module
// ============================================================================
type RhaiUser = User;
#[export_module]
mod rhai_user_module {
use crate::models::heroledger::user::User;
use super::RhaiUser;
#[rhai_fn(name = "new_user", return_raw)]
pub fn new_user() -> Result<RhaiUser, Box<EvalAltResult>> {
Ok(User::new(0))
}
#[rhai_fn(name = "username", return_raw)]
pub fn set_username(
user: &mut RhaiUser,
username: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.username(username);
Ok(user.clone())
}
#[rhai_fn(name = "add_email", return_raw)]
pub fn add_email(user: &mut RhaiUser, email: String) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.add_email(email);
Ok(user.clone())
}
#[rhai_fn(name = "pubkey", return_raw)]
pub fn set_pubkey(user: &mut RhaiUser, pubkey: String) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.pubkey(pubkey);
Ok(user.clone())
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_status(user: &mut RhaiUser, status: String) -> Result<RhaiUser, Box<EvalAltResult>> {
let status_enum = match status.as_str() {
"Active" => UserStatus::Active,
"Inactive" => UserStatus::Inactive,
"Suspended" => UserStatus::Suspended,
"Archived" => UserStatus::Archived,
_ => return Err(format!("Invalid user status: {}", status).into()),
};
let owned = std::mem::take(user);
*user = owned.status(status_enum);
Ok(user.clone())
}
#[rhai_fn(name = "save_user", return_raw)]
pub fn save_user(user: &mut RhaiUser) -> Result<RhaiUser, Box<EvalAltResult>> {
// This would integrate with the database save functionality
// For now, just return the user as-is
Ok(user.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(user: &mut RhaiUser) -> i64 {
user.base_data.id as i64
}
#[rhai_fn(name = "get_username")]
pub fn get_username(user: &mut RhaiUser) -> String {
user.username.clone()
}
#[rhai_fn(name = "get_email")]
pub fn get_email(user: &mut RhaiUser) -> String {
if let Some(first_email) = user.email.first() {
first_email.clone()
} else {
String::new()
}
}
#[rhai_fn(name = "get_pubkey")]
pub fn get_pubkey(user: &mut RhaiUser) -> String {
user.pubkey.clone()
}
}
// ============================================================================
// Group Module
// ============================================================================
type RhaiGroup = Group;
#[export_module]
mod rhai_group_module {
use super::RhaiGroup;
#[rhai_fn(name = "new_group", return_raw)]
pub fn new_group() -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(Group::new(0))
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(group: &mut RhaiGroup, name: String) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.name(name);
Ok(group.clone())
}
#[rhai_fn(name = "description", return_raw)]
pub fn set_description(
group: &mut RhaiGroup,
description: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.description(description);
Ok(group.clone())
}
#[rhai_fn(name = "visibility", return_raw)]
pub fn set_visibility(
group: &mut RhaiGroup,
visibility: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let visibility_enum = match visibility.as_str() {
"Public" => Visibility::Public,
"Private" => Visibility::Private,
_ => return Err(format!("Invalid visibility: {}", visibility).into()),
};
let owned = std::mem::take(group);
*group = owned.visibility(visibility_enum);
Ok(group.clone())
}
#[rhai_fn(name = "save_group", return_raw)]
pub fn save_group(group: &mut RhaiGroup) -> Result<RhaiGroup, Box<EvalAltResult>> {
Ok(group.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(group: &mut RhaiGroup) -> i64 {
group.base_data.id as i64
}
#[rhai_fn(name = "get_name")]
pub fn get_name(group: &mut RhaiGroup) -> String {
group.name.clone()
}
#[rhai_fn(name = "get_description")]
pub fn get_description(group: &mut RhaiGroup) -> String {
group.description.clone()
}
}
// ============================================================================
// Account Module (from money.rs)
// ============================================================================
type RhaiAccount = Account;
#[export_module]
mod rhai_account_module {
use super::RhaiAccount;
#[rhai_fn(name = "new_account", return_raw)]
pub fn new_account() -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(Account::new(0))
}
#[rhai_fn(name = "owner_id", return_raw)]
pub fn set_owner_id(
account: &mut RhaiAccount,
owner_id: i64,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.owner_id(owner_id as u32);
Ok(account.clone())
}
#[rhai_fn(name = "address", return_raw)]
pub fn set_address(
account: &mut RhaiAccount,
address: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.address(address);
Ok(account.clone())
}
#[rhai_fn(name = "currency", return_raw)]
pub fn set_currency(
account: &mut RhaiAccount,
currency: String,
) -> Result<RhaiAccount, Box<EvalAltResult>> {
let owned = std::mem::take(account);
*account = owned.currency(currency);
Ok(account.clone())
}
#[rhai_fn(name = "save_account", return_raw)]
pub fn save_account(account: &mut RhaiAccount) -> Result<RhaiAccount, Box<EvalAltResult>> {
Ok(account.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(account: &mut RhaiAccount) -> i64 {
account.base_data.id as i64
}
#[rhai_fn(name = "get_address")]
pub fn get_address(account: &mut RhaiAccount) -> String {
account.address.clone()
}
#[rhai_fn(name = "get_currency")]
pub fn get_currency(account: &mut RhaiAccount) -> String {
account.currency.clone()
}
}
// ============================================================================
// DNS Zone Module
// ============================================================================
type RhaiDNSZone = DNSZone;
#[export_module]
mod rhai_dns_zone_module {
use super::RhaiDNSZone;
#[rhai_fn(name = "new_dns_zone", return_raw)]
pub fn new_dns_zone() -> Result<RhaiDNSZone, Box<EvalAltResult>> {
Ok(DNSZone::new(0))
}
#[rhai_fn(name = "domain", return_raw)]
pub fn set_domain(
zone: &mut RhaiDNSZone,
domain: String,
) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
let owned = std::mem::take(zone);
*zone = owned.domain(domain);
Ok(zone.clone())
}
#[rhai_fn(name = "save_dns_zone", return_raw)]
pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
Ok(zone.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(zone: &mut RhaiDNSZone) -> i64 {
zone.base_data.id as i64
}
#[rhai_fn(name = "get_domain")]
pub fn get_domain(zone: &mut RhaiDNSZone) -> String {
zone.domain.clone()
}
}
// ============================================================================
// Registration Functions
// ============================================================================
// Registration functions
pub fn register_user_functions(engine: &mut Engine) {
let module = exported_module!(rhai_user_module);
engine.register_static_module("user", module.into());
}
pub fn register_group_functions(engine: &mut Engine) {
let module = exported_module!(rhai_group_module);
engine.register_static_module("group", module.into());
}
pub fn register_account_functions(engine: &mut Engine) {
let module = exported_module!(rhai_account_module);
engine.register_static_module("account", module.into());
}
pub fn register_dnszone_functions(engine: &mut Engine) {
let module = exported_module!(rhai_dns_zone_module);
engine.register_static_module("dnszone", module.into());
}
/// Register all heroledger Rhai modules with the engine
pub fn register_heroledger_rhai_modules(engine: &mut Engine) {
register_user_functions(engine);
register_group_functions(engine);
register_account_functions(engine);
register_dnszone_functions(engine);
}

View File

@@ -0,0 +1,140 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
/// Category of the secret box
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum SecretBoxCategory {
Profile,
}
impl Default for SecretBoxCategory {
fn default() -> Self {
SecretBoxCategory::Profile
}
}
/// Status of a notary
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum NotaryStatus {
Active,
Inactive,
Suspended,
Archived,
Error,
}
impl Default for NotaryStatus {
fn default() -> Self {
NotaryStatus::Active
}
}
/// Represents an encrypted secret box for storing sensitive data
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SecretBox {
pub notary_id: u32,
pub value: String,
pub version: u16,
pub timestamp: u64,
pub cat: SecretBoxCategory,
}
impl SecretBox {
pub fn new() -> Self {
Self {
notary_id: 0,
value: String::new(),
version: 1,
timestamp: 0,
cat: SecretBoxCategory::default(),
}
}
pub fn notary_id(mut self, notary_id: u32) -> Self {
self.notary_id = notary_id;
self
}
pub fn value(mut self, value: impl ToString) -> Self {
self.value = value.to_string();
self
}
pub fn version(mut self, version: u16) -> Self {
self.version = version;
self
}
pub fn timestamp(mut self, timestamp: u64) -> Self {
self.timestamp = timestamp;
self
}
pub fn cat(mut self, cat: SecretBoxCategory) -> Self {
self.cat = cat;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents a notary who can decrypt secret boxes
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Notary {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub userid: u32,
pub status: NotaryStatus,
pub myceliumaddress: String,
#[index]
pub pubkey: String,
}
impl Notary {
/// Create a new notary instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
userid: 0,
status: NotaryStatus::default(),
myceliumaddress: String::new(),
pubkey: String::new(),
}
}
/// Set the user ID (fluent)
pub fn userid(mut self, userid: u32) -> Self {
self.userid = userid;
self
}
/// Set the notary status (fluent)
pub fn status(mut self, status: NotaryStatus) -> Self {
self.status = status;
self
}
/// Set the mycelium address (fluent)
pub fn myceliumaddress(mut self, myceliumaddress: impl ToString) -> Self {
self.myceliumaddress = myceliumaddress.to_string();
self
}
/// Set the public key (fluent)
pub fn pubkey(mut self, pubkey: impl ToString) -> Self {
self.pubkey = pubkey.to_string();
self
}
/// Build the final notary instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,118 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
/// Status of a signature
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum SignatureStatus {
Active,
Inactive,
Pending,
Revoked,
}
impl Default for SignatureStatus {
fn default() -> Self {
SignatureStatus::Pending
}
}
/// Type of object being signed
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ObjectType {
Account,
DNSRecord,
Membership,
User,
Transaction,
KYC,
}
impl Default for ObjectType {
fn default() -> Self {
ObjectType::User
}
}
/// Represents a cryptographic signature for various objects
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Signature {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub signature_id: u32,
#[index]
pub user_id: u32,
pub value: String,
#[index]
pub objectid: u32,
pub objecttype: ObjectType,
pub status: SignatureStatus,
pub timestamp: u64,
}
impl Signature {
/// Create a new signature instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
signature_id: 0,
user_id: 0,
value: String::new(),
objectid: 0,
objecttype: ObjectType::default(),
status: SignatureStatus::default(),
timestamp: 0,
}
}
/// Set the signature ID (fluent)
pub fn signature_id(mut self, signature_id: u32) -> Self {
self.signature_id = signature_id;
self
}
/// Set the user ID (fluent)
pub fn user_id(mut self, user_id: u32) -> Self {
self.user_id = user_id;
self
}
/// Set the signature value (fluent)
pub fn value(mut self, value: impl ToString) -> Self {
self.value = value.to_string();
self
}
/// Set the object ID (fluent)
pub fn objectid(mut self, objectid: u32) -> Self {
self.objectid = objectid;
self
}
/// Set the object type (fluent)
pub fn objecttype(mut self, objecttype: ObjectType) -> Self {
self.objecttype = objecttype;
self
}
/// Set the signature status (fluent)
pub fn status(mut self, status: SignatureStatus) -> Self {
self.status = status;
self
}
/// Set the timestamp (fluent)
pub fn timestamp(mut self, timestamp: u64) -> Self {
self.timestamp = timestamp;
self
}
/// Build the final signature instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,368 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Represents the status of a user in the system
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum UserStatus {
Active,
Inactive,
Suspended,
Archived,
}
impl Default for UserStatus {
fn default() -> Self {
UserStatus::Active
}
}
/// Represents the KYC status of a user
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum KYCStatus {
Pending,
Approved,
Rejected,
}
impl Default for KYCStatus {
fn default() -> Self {
KYCStatus::Pending
}
}
/// User profile information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct UserProfile {
pub user_id: u32,
pub full_name: String,
pub bio: String,
pub profile_pic: String,
pub links: HashMap<String, String>,
pub metadata: HashMap<String, String>,
}
impl UserProfile {
pub fn new() -> Self {
Self {
user_id: 0,
full_name: String::new(),
bio: String::new(),
profile_pic: String::new(),
links: HashMap::new(),
metadata: HashMap::new(),
}
}
pub fn user_id(mut self, user_id: u32) -> Self {
self.user_id = user_id;
self
}
pub fn full_name(mut self, full_name: impl ToString) -> Self {
self.full_name = full_name.to_string();
self
}
pub fn bio(mut self, bio: impl ToString) -> Self {
self.bio = bio.to_string();
self
}
pub fn profile_pic(mut self, profile_pic: impl ToString) -> Self {
self.profile_pic = profile_pic.to_string();
self
}
pub fn add_link(mut self, key: impl ToString, value: impl ToString) -> Self {
self.links.insert(key.to_string(), value.to_string());
self
}
pub fn links(mut self, links: HashMap<String, String>) -> Self {
self.links = links;
self
}
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
self.metadata.insert(key.to_string(), value.to_string());
self
}
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
self.metadata = metadata;
self
}
pub fn build(self) -> Self {
self
}
}
/// KYC (Know Your Customer) information for a user
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct KYCInfo {
pub user_id: u32,
pub full_name: String,
pub date_of_birth: u64,
pub address: String,
pub phone_number: String,
pub id_number: String,
pub id_type: String,
pub id_expiry: u64,
pub kyc_status: KYCStatus,
pub kyc_verified: bool,
pub kyc_verified_by: u32,
pub kyc_verified_at: u64,
pub kyc_rejected_reason: String,
pub kyc_signature: u32,
pub metadata: HashMap<String, String>,
}
impl KYCInfo {
pub fn new() -> Self {
Self {
user_id: 0,
full_name: String::new(),
date_of_birth: 0,
address: String::new(),
phone_number: String::new(),
id_number: String::new(),
id_type: String::new(),
id_expiry: 0,
kyc_status: KYCStatus::default(),
kyc_verified: false,
kyc_verified_by: 0,
kyc_verified_at: 0,
kyc_rejected_reason: String::new(),
kyc_signature: 0,
metadata: HashMap::new(),
}
}
pub fn user_id(mut self, user_id: u32) -> Self {
self.user_id = user_id;
self
}
pub fn full_name(mut self, full_name: impl ToString) -> Self {
self.full_name = full_name.to_string();
self
}
pub fn date_of_birth(mut self, date_of_birth: u64) -> Self {
self.date_of_birth = date_of_birth;
self
}
pub fn address(mut self, address: impl ToString) -> Self {
self.address = address.to_string();
self
}
pub fn phone_number(mut self, phone_number: impl ToString) -> Self {
self.phone_number = phone_number.to_string();
self
}
pub fn id_number(mut self, id_number: impl ToString) -> Self {
self.id_number = id_number.to_string();
self
}
pub fn id_type(mut self, id_type: impl ToString) -> Self {
self.id_type = id_type.to_string();
self
}
pub fn id_expiry(mut self, id_expiry: u64) -> Self {
self.id_expiry = id_expiry;
self
}
pub fn kyc_status(mut self, kyc_status: KYCStatus) -> Self {
self.kyc_status = kyc_status;
self
}
pub fn kyc_verified(mut self, kyc_verified: bool) -> Self {
self.kyc_verified = kyc_verified;
self
}
pub fn kyc_verified_by(mut self, kyc_verified_by: u32) -> Self {
self.kyc_verified_by = kyc_verified_by;
self
}
pub fn kyc_verified_at(mut self, kyc_verified_at: u64) -> Self {
self.kyc_verified_at = kyc_verified_at;
self
}
pub fn kyc_rejected_reason(mut self, kyc_rejected_reason: impl ToString) -> Self {
self.kyc_rejected_reason = kyc_rejected_reason.to_string();
self
}
pub fn kyc_signature(mut self, kyc_signature: u32) -> Self {
self.kyc_signature = kyc_signature;
self
}
pub fn add_metadata(mut self, key: impl ToString, value: impl ToString) -> Self {
self.metadata.insert(key.to_string(), value.to_string());
self
}
pub fn metadata(mut self, metadata: HashMap<String, String>) -> Self {
self.metadata = metadata;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents a secret box for storing encrypted data
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SecretBox {
pub data: Vec<u8>,
pub nonce: Vec<u8>,
}
impl SecretBox {
pub fn new() -> Self {
Self {
data: Vec::new(),
nonce: Vec::new(),
}
}
pub fn data(mut self, data: Vec<u8>) -> Self {
self.data = data;
self
}
pub fn nonce(mut self, nonce: Vec<u8>) -> Self {
self.nonce = nonce;
self
}
pub fn build(self) -> Self {
self
}
}
/// Represents a user in the heroledger system
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct User {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub username: String,
#[index]
pub pubkey: String,
pub email: Vec<String>,
pub status: UserStatus,
pub userprofile: Vec<SecretBox>,
pub kyc: Vec<SecretBox>,
}
impl Default for User {
fn default() -> Self {
Self {
base_data: BaseModelData::new(),
username: String::new(),
pubkey: String::new(),
email: Vec::new(),
status: UserStatus::default(),
userprofile: Vec::new(),
kyc: Vec::new(),
}
}
}
impl User {
/// Create a new user instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
username: String::new(),
pubkey: String::new(),
email: Vec::new(),
status: UserStatus::default(),
userprofile: Vec::new(),
kyc: Vec::new(),
}
}
/// Get the user ID
pub fn id(&self) -> u32 {
self.base_data.id
}
/// Set the username (fluent)
pub fn username(mut self, username: impl ToString) -> Self {
self.username = username.to_string();
self
}
/// Set the public key (fluent)
pub fn pubkey(mut self, pubkey: impl ToString) -> Self {
self.pubkey = pubkey.to_string();
self
}
/// Add an email address (fluent)
pub fn add_email(mut self, email: impl ToString) -> Self {
self.email.push(email.to_string());
self
}
/// Set all email addresses (fluent)
pub fn email(mut self, email: Vec<String>) -> Self {
self.email = email;
self
}
/// Set the user status (fluent)
pub fn status(mut self, status: UserStatus) -> Self {
self.status = status;
self
}
/// Add a user profile secret box (fluent)
pub fn add_userprofile(mut self, profile: SecretBox) -> Self {
self.userprofile.push(profile);
self
}
/// Set all user profile secret boxes (fluent)
pub fn userprofile(mut self, userprofile: Vec<SecretBox>) -> Self {
self.userprofile = userprofile;
self
}
/// Add a KYC secret box (fluent)
pub fn add_kyc(mut self, kyc: SecretBox) -> Self {
self.kyc.push(kyc);
self
}
/// Set all KYC secret boxes (fluent)
pub fn kyc(mut self, kyc: Vec<SecretBox>) -> Self {
self.kyc = kyc;
self
}
/// Build the final user instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -0,0 +1,116 @@
use super::secretbox::SecretBox;
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
/// Represents a per-user key-value store
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct UserKVS {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub userid: u32,
pub name: String,
}
impl UserKVS {
/// Create a new user KVS instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
userid: 0,
name: String::new(),
}
}
/// Set the user ID (fluent)
pub fn userid(mut self, userid: u32) -> Self {
self.userid = userid;
self
}
/// Set the KVS name (fluent)
pub fn name(mut self, name: impl ToString) -> Self {
self.name = name.to_string();
self
}
/// Build the final user KVS instance
pub fn build(self) -> Self {
self
}
}
/// Represents an item in a user's key-value store
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct UserKVSItem {
/// Base model data
pub base_data: BaseModelData,
#[index]
pub userkvs_id: u32,
pub key: String,
pub value: String,
pub secretbox: Vec<SecretBox>,
pub timestamp: u64,
}
impl UserKVSItem {
/// Create a new user KVS item instance
pub fn new(id: u32) -> Self {
let mut base_data = BaseModelData::new();
base_data.update_id(id);
Self {
base_data,
userkvs_id: 0,
key: String::new(),
value: String::new(),
secretbox: Vec::new(),
timestamp: 0,
}
}
/// Set the user KVS ID (fluent)
pub fn userkvs_id(mut self, userkvs_id: u32) -> Self {
self.userkvs_id = userkvs_id;
self
}
/// Set the key (fluent)
pub fn key(mut self, key: impl ToString) -> Self {
self.key = key.to_string();
self
}
/// Set the value (fluent)
pub fn value(mut self, value: impl ToString) -> Self {
self.value = value.to_string();
self
}
/// Add a secret box (fluent)
pub fn add_secretbox(mut self, secretbox: SecretBox) -> Self {
self.secretbox.push(secretbox);
self
}
/// Set all secret boxes (fluent)
pub fn secretbox(mut self, secretbox: Vec<SecretBox>) -> Self {
self.secretbox = secretbox;
self
}
/// Set the timestamp (fluent)
pub fn timestamp(mut self, timestamp: u64) -> Self {
self.timestamp = timestamp;
self
}
/// Build the final user KVS item instance
pub fn build(self) -> Self {
self
}
}

View File

@@ -46,4 +46,4 @@ pub struct IdenfyVerificationData {
pub doc_issuing_country: Option<String>,
#[serde(rename = "manuallyDataChanged")]
pub manually_data_changed: Option<bool>,
}
}

View File

@@ -2,4 +2,4 @@
pub mod kyc;
pub use kyc::*;
pub use kyc::*;

View File

@@ -0,0 +1,156 @@
use derive::FromVec;
use heromodels::db::Db;
use macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn, register_authorized_list_fn,
};
use rhai::plugin::*;
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module, Position, TypeBuilder};
use serde::Serialize;
use serde_json;
use std::mem;
use std::sync::Arc;
use heromodels::db::hero::OurDB;
use heromodels::db::Collection as DbCollectionTrait;
use heromodels::models::library::collection::Collection as RhaiCollection;
use heromodels::models::library::items::{
Book as RhaiBook, Image as RhaiImage, Markdown as RhaiMarkdown, Pdf as RhaiPdf,
Slide as RhaiSlide, Slideshow as RhaiSlideshow, TocEntry as RhaiTocEntry,
};
/// Registers a `.json()` method for any type `T` that implements the required traits.
fn register_json_method<T>(engine: &mut Engine)
where
T: CustomType + Clone + Serialize,
{
let to_json_fn = |obj: &mut T| -> Result<String, Box<EvalAltResult>> {
match serde_json::to_string_pretty(obj) {
Ok(json_str) => Ok(json_str),
Err(e) => Err(format!("Failed to serialize to JSON: {}", e).into()),
}
};
engine.register_fn("json", to_json_fn);
}
// Wrapper types for arrays
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "CollectionArray")]
pub struct RhaiCollectionArray(pub Vec<RhaiCollection>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "ImageArray")]
pub struct RhaiImageArray(pub Vec<RhaiImage>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "PdfArray")]
pub struct RhaiPdfArray(pub Vec<RhaiPdf>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "MarkdownArray")]
pub struct RhaiMarkdownArray(pub Vec<RhaiMarkdown>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "BookArray")]
pub struct RhaiBookArray(pub Vec<RhaiBook>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "SlideshowArray")]
pub struct RhaiSlideshowArray(pub Vec<RhaiSlideshow>);
#[derive(Debug, Clone, Serialize, CustomType, FromVec)]
#[rhai_type(name = "TocEntryArray")]
pub struct RhaiTocEntryArray(pub Vec<RhaiTocEntry>);
#[export_module]
mod rhai_library_module {
use super::*;
// --- Collection Functions ---
#[rhai_fn(name = "new_collection", return_raw)]
pub fn new_collection() -> Result<RhaiCollection, Box<EvalAltResult>> {
Ok(RhaiCollection::new())
}
#[rhai_fn(name = "collection_title", return_raw)]
pub fn collection_title(
collection: &mut RhaiCollection,
title: String,
) -> Result<RhaiCollection, Box<EvalAltResult>> {
let owned = std::mem::take(collection);
*collection = owned.title(title);
Ok(collection.clone())
}
#[rhai_fn(name = "collection_description", return_raw)]
pub fn collection_description(
collection: &mut RhaiCollection,
description: String,
) -> Result<RhaiCollection, Box<EvalAltResult>> {
let owned = std::mem::take(collection);
*collection = owned.description(description);
Ok(collection.clone())
}
#[rhai_fn(name = "get_collection_id")]
pub fn get_collection_id(collection: &mut RhaiCollection) -> i64 {
collection.id() as i64
}
#[rhai_fn(name = "get_collection_title")]
pub fn get_collection_title(collection: &mut RhaiCollection) -> String {
collection.title().clone()
}
// --- Image Functions ---
#[rhai_fn(name = "new_image", return_raw)]
pub fn new_image() -> Result<RhaiImage, Box<EvalAltResult>> {
Ok(RhaiImage::new())
}
#[rhai_fn(name = "image_title", return_raw)]
pub fn image_title(
image: &mut RhaiImage,
title: String,
) -> Result<RhaiImage, Box<EvalAltResult>> {
let owned = std::mem::take(image);
*image = owned.title(title);
Ok(image.clone())
}
#[rhai_fn(name = "get_image_id")]
pub fn get_image_id(image: &mut RhaiImage) -> i64 {
image.id() as i64
}
// Additional functions would continue here...
}
pub fn register_library_rhai_module(engine: &mut Engine) {
let mut module = exported_module!(rhai_library_module);
register_json_method::<RhaiCollection>(engine);
register_json_method::<RhaiImage>(engine);
register_json_method::<RhaiPdf>(engine);
register_json_method::<RhaiMarkdown>(engine);
register_json_method::<RhaiBook>(engine);
register_json_method::<RhaiSlideshow>(engine);
register_json_method::<RhaiTocEntry>(engine);
register_json_method::<RhaiCollectionArray>(engine);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_collection",
resource_type_str: "Collection",
rhai_return_rust_type: heromodels::models::library::collection::Collection
);
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_collection",
resource_type_str: "Collection",
rhai_return_rust_type: heromodels::models::library::collection::Collection
);
engine.register_global_module(module.into());
}

View File

@@ -0,0 +1,11 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Address {
pub street: String,
pub city: String,
pub state: Option<String>,
pub postal_code: String,
pub country: String,
pub company: Option<String>,
}

View File

@@ -0,0 +1,2 @@
// Export location models
pub mod address;

View File

@@ -10,12 +10,16 @@ pub mod contact;
pub mod finance;
pub mod flow;
pub mod governance;
pub mod grid4;
pub mod heroledger;
pub mod identity;
pub mod legal;
pub mod library;
pub mod location;
pub mod object;
pub mod projects;
pub mod payment;
pub mod identity;
pub mod projects;
// pub mod tfmarketplace;
// Re-export key types for convenience
pub use core::Comment;
@@ -35,3 +39,4 @@ pub use legal::{Contract, ContractRevision, ContractSigner, ContractStatus, Sign
pub use library::collection::Collection;
pub use library::items::{Image, Markdown, Pdf};
pub use projects::{Project, Status};
pub use heroledger::*;

View File

@@ -1,5 +1,6 @@
// Export contact module
// Export object module
pub mod object;
pub mod object_rhai_dsl;
// Re-export contact, Group from the inner contact module (contact.rs) within src/models/contact/mod.rs
// Re-export Object from the inner object module (object.rs) within src/models/object/mod.rs
pub use self::object::Object;

View File

@@ -0,0 +1,50 @@
use super::Object;
use rhai::plugin::*;
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module};
type RhaiObject = Object;
#[export_module]
pub mod generated_rhai_module {
use super::*;
/// Create a new Object
#[rhai_fn(name = "new_object")]
pub fn new_object() -> RhaiObject {
Object::new()
}
/// Set the title of an Object
#[rhai_fn(name = "object_title")]
pub fn object_title(object: &mut RhaiObject, title: String) -> RhaiObject {
let mut result = object.clone();
result.title = title;
result
}
/// Set the description of an Object
#[rhai_fn(name = "object_description")]
pub fn object_description(object: &mut RhaiObject, description: String) -> RhaiObject {
let mut result = object.clone();
result.description = description;
result
}
/// Get the ID of an Object
#[rhai_fn(name = "get_object_id")]
pub fn get_object_id(object: &mut RhaiObject) -> i64 {
object.id() as i64
}
/// Get the title of an Object
#[rhai_fn(name = "get_object_title")]
pub fn get_object_title(object: &mut RhaiObject) -> String {
object.title.clone()
}
/// Get the description of an Object
#[rhai_fn(name = "get_object_description")]
pub fn get_object_description(object: &mut RhaiObject) -> String {
object.description.clone()
}
}

View File

@@ -0,0 +1,27 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::object::Object;
use macros::{register_authorized_create_by_id_fn, register_authorized_get_by_id_fn};
use rhai::{exported_module, Engine, EvalAltResult, FuncRegistration, Module};
use std::sync::Arc;
pub fn register_object_fns(engine: &mut Engine) {
let mut module = Module::new();
register_authorized_get_by_id_fn!(
module: &mut module,
rhai_fn_name: "get_object_by_id",
resource_type_str: "Object",
rhai_return_rust_type: heromodels::models::object::Object
);
register_authorized_create_by_id_fn!(
module: &mut module,
rhai_fn_name: "save_object",
resource_type_str: "Object",
rhai_return_rust_type: heromodels::models::object::Object
);
engine.register_global_module(module.into());
engine.register_type_with_name::<Object>("Object");
}

View File

@@ -2,4 +2,4 @@
pub mod stripe;
pub use stripe::*;
pub use stripe::*;

View File

@@ -0,0 +1,49 @@
use rhai::plugin::*;
use rhai::{Dynamic, Engine, EvalAltResult, Module};
// Simplified payment module - contains the core Stripe integration
// This is a condensed version of the original payment.rs DSL file
#[export_module]
mod rhai_payment_module {
// Payment configuration and basic functions
#[rhai_fn(name = "configure_stripe", return_raw)]
pub fn configure_stripe(api_key: String) -> Result<String, Box<EvalAltResult>> {
Ok(format!("Stripe configured with key: {}...", &api_key[..8]))
}
// Product functions
#[rhai_fn(name = "new_product", return_raw)]
pub fn new_product() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("product_created"))
}
// Price functions
#[rhai_fn(name = "new_price", return_raw)]
pub fn new_price() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("price_created"))
}
// Subscription functions
#[rhai_fn(name = "new_subscription", return_raw)]
pub fn new_subscription() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("subscription_created"))
}
// Payment intent functions
#[rhai_fn(name = "new_payment_intent", return_raw)]
pub fn new_payment_intent() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("payment_intent_created"))
}
// Coupon functions
#[rhai_fn(name = "new_coupon", return_raw)]
pub fn new_coupon() -> Result<Dynamic, Box<EvalAltResult>> {
Ok(Dynamic::from("coupon_created"))
}
}
pub fn register_payment_rhai_module(engine: &mut Engine) {
let module = exported_module!(rhai_payment_module);
engine.register_global_module(module.into());
}

View File

@@ -27,4 +27,4 @@ pub struct StripeEventData {
pub struct StripeEventRequest {
pub id: Option<String>,
pub idempotency_key: Option<String>,
}
}

43
heromodels/test.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
set -euo pipefail
# Config matches examples/tests
PGHOST=${PGHOST:-localhost}
PGPORT=${PGPORT:-5432}
PGUSER=${PGUSER:-postgres}
PGPASSWORD=${PGPASSWORD:-test123}
export PGPASSWORD
echo "[test.sh] Checking Postgres at ${PGHOST}:${PGPORT} (user=${PGUSER})..."
# Require pg_isready
if ! command -v pg_isready >/dev/null 2>&1; then
echo "[test.sh] ERROR: pg_isready not found. Install PostgreSQL client tools (e.g., brew install libpq && brew link --force libpq)." >&2
exit 1
fi
# Wait for Postgres to be ready (30s timeout)
ATTEMPTS=30
until pg_isready -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" >/dev/null 2>&1; do
((ATTEMPTS--)) || {
echo "[test.sh] ERROR: Postgres not ready after 30s. Ensure it's running with user=$PGUSER password=$PGPASSWORD host=$PGHOST port=$PGPORT." >&2
exit 1
}
sleep 1
echo "[test.sh] Waiting for Postgres..."
done
echo "[test.sh] Postgres is ready. Running tests..."
# Run fast OurDB test first (no Postgres dependency)
echo "[test.sh] Running OurDB test: grid4_ourdb"
cargo test -p heromodels --test grid4_ourdb
# Run Postgres-backed tests (marked ignored)
echo "[test.sh] Running Postgres test: heroledger_postgres (ignored)"
cargo test -p heromodels --test heroledger_postgres -- --ignored
echo "[test.sh] Running Postgres test: grid4_postgres (ignored)"
cargo test -p heromodels --test grid4_postgres -- --ignored
echo "[test.sh] Done."

View File

@@ -0,0 +1,117 @@
use serde_json;
use heromodels::models::grid4::{
ComputeSlice, DeviceInfo, Node, NodeCapacity, PricingPolicy, Reservation, ReservationStatus,
SLAPolicy, StorageDevice, StorageSlice,
};
#[test]
fn build_and_serde_roundtrip_compute_storage_slices() {
let pricing = PricingPolicy::new()
.marketplace_year_discounts(vec![20, 30, 40])
.volume_discounts(vec![5, 10, 15])
.build();
let sla = SLAPolicy::new()
.sla_uptime(99)
.sla_bandwidth_mbit(1000)
.sla_penalty(150)
.build();
let cs = ComputeSlice::new()
.nodeid(42)
.slice_id(1)
.mem_gb(16.0)
.storage_gb(200.0)
.passmark(5000)
.vcores(8)
.cpu_oversubscription(2)
.storage_oversubscription(1)
.price_range(vec![0.5, 2.0])
.gpus(1)
.price_cc(1.25)
.pricing_policy(pricing.clone())
.sla_policy(sla.clone());
let ss = StorageSlice::new()
.nodeid(42)
.slice_id(2)
.price_cc(0.15)
.pricing_policy(pricing)
.sla_policy(sla);
// serde roundtrip compute slice
let s = serde_json::to_string(&cs).expect("serialize compute slice");
let cs2: ComputeSlice = serde_json::from_str(&s).expect("deserialize compute slice");
assert_eq!(cs, cs2);
// serde roundtrip storage slice
let s2 = serde_json::to_string(&ss).expect("serialize storage slice");
let ss2: StorageSlice = serde_json::from_str(&s2).expect("deserialize storage slice");
assert_eq!(ss, ss2);
}
#[test]
fn build_and_serde_roundtrip_node() {
let dev = DeviceInfo {
vendor: "AcmeVendor".into(),
storage: vec![StorageDevice { id: "sda".into(), size_gb: 512.0, description: "NVMe".into() }],
memory: vec![],
cpu: vec![],
gpu: vec![],
network: vec![],
};
let cap = NodeCapacity { storage_gb: 2048.0, mem_gb: 128.0, mem_gb_gpu: 24.0, passmark: 12000, vcores: 32 };
let cs = ComputeSlice::new().nodeid(1).slice_id(1).mem_gb(8.0).storage_gb(100.0).passmark(2500).vcores(4);
let ss = StorageSlice::new().nodeid(1).slice_id(2).price_cc(0.2);
let node = Node::new()
.nodegroupid(7)
.uptime(99)
.add_compute_slice(cs)
.add_storage_slice(ss)
.devices(dev)
.country("NL")
.capacity(cap)
.provisiontime(1710000000)
.pubkey("node_pubkey")
.signature_node("sig_node")
.signature_farmer("sig_farmer");
let s = serde_json::to_string(&node).expect("serialize node");
let node2: Node = serde_json::from_str(&s).expect("deserialize node");
assert_eq!(node.nodegroupid, node2.nodegroupid);
assert_eq!(node.uptime, node2.uptime);
assert_eq!(node.country, node2.country);
assert_eq!(node.pubkey, node2.pubkey);
assert_eq!(node.signature_node, node2.signature_node);
assert_eq!(node.signature_farmer, node2.signature_farmer);
assert_eq!(node.computeslices.len(), node2.computeslices.len());
assert_eq!(node.storageslices.len(), node2.storageslices.len());
}
#[test]
fn build_and_serde_roundtrip_reservation() {
let reservation = Reservation::new()
.customer_id(1234)
.add_compute_slice(11)
.add_storage_slice(22)
.status(ReservationStatus::Confirmed)
.obligation(true)
.start_date(1_710_000_000)
.end_date(1_720_000_000);
let s = serde_json::to_string(&reservation).expect("serialize reservation");
let reservation2: Reservation = serde_json::from_str(&s).expect("deserialize reservation");
assert_eq!(reservation.customer_id, reservation2.customer_id);
assert_eq!(reservation.status, reservation2.status);
assert_eq!(reservation.obligation, reservation2.obligation);
assert_eq!(reservation.start_date, reservation2.start_date);
assert_eq!(reservation.end_date, reservation2.end_date);
assert_eq!(reservation.compute_slices, reservation2.compute_slices);
assert_eq!(reservation.storage_slices, reservation2.storage_slices);
}

View File

@@ -0,0 +1,82 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels_core::Model;
use std::sync::Arc;
fn create_test_db() -> Arc<OurDB> {
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let path = format!("/tmp/grid4_node_test_{}", ts);
let _ = std::fs::remove_dir_all(&path);
Arc::new(OurDB::new(path, true).expect("create OurDB"))
}
#[test]
fn grid4_node_basic_roundtrip_and_indexes() {
let db = create_test_db();
let nodes = db.collection::<Node>().expect("open node collection");
// Clean any leftover
if let Ok(existing) = nodes.get_all() {
for n in existing {
let _ = nodes.delete_by_id(n.get_id());
}
}
// Build a node with some compute slices and device info
let cs = ComputeSlice::new()
.nodeid(1)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(5000)
.vcores(16)
.gpus(1)
.price_cc(0.25);
let dev = DeviceInfo {
vendor: "ACME".into(),
..Default::default()
};
let n = Node::new()
.nodegroupid(42)
.uptime(99)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("PUB_NODE_1")
.build();
let (id, stored) = nodes.set(&n).expect("store node");
assert!(id > 0);
assert_eq!(stored.country, "BE");
// get by id
let fetched = nodes.get_by_id(id).expect("get by id").expect("exists");
assert_eq!(fetched.pubkey, "PUB_NODE_1");
// query by top-level indexes
let by_country = nodes.get::<country, _>("BE").expect("query country");
assert_eq!(by_country.len(), 1);
assert_eq!(by_country[0].get_id(), id);
let by_group = nodes.get::<nodegroupid, _>(&42).expect("query group");
assert_eq!(by_group.len(), 1);
let by_pubkey = nodes.get::<pubkey, _>("PUB_NODE_1").expect("query pubkey");
assert_eq!(by_pubkey.len(), 1);
// update
let updated = fetched.clone().country("NL");
let (_, back) = nodes.set(&updated).expect("update node");
assert_eq!(back.country, "NL");
// delete
nodes.delete_by_id(id).expect("delete");
assert!(nodes.get_by_id(id).expect("get after delete").is_none());
}

View File

@@ -0,0 +1,125 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels_core::Model;
// Requires local Postgres (user=postgres password=test123 host=localhost port=5432)
// Run with: cargo test -p heromodels --test grid4_postgres -- --ignored
#[test]
#[ignore]
fn grid4_node_postgres_roundtrip_like_example() {
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("can connect to Postgres");
let nodes = db.collection::<Node>().expect("open node collection");
// Clean existing
if let Ok(existing) = nodes.get_all() {
for n in existing {
let _ = nodes.delete_by_id(n.get_id());
}
}
// Build and store multiple nodes via builder and then persist via collection.set(), like examples
let cs1 = ComputeSlice::new()
.nodeid(10)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(5000)
.vcores(16)
.gpus(1)
.price_cc(0.25);
let cs2 = ComputeSlice::new()
.nodeid(10)
.slice_id(2)
.mem_gb(64.0)
.storage_gb(2048.0)
.passmark(7000)
.vcores(24)
.gpus(2)
.price_cc(0.50);
let cs3 = ComputeSlice::new()
.nodeid(11)
.slice_id(1)
.mem_gb(16.0)
.storage_gb(256.0)
.passmark(3000)
.vcores(8)
.gpus(0)
.price_cc(0.10);
let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() };
let n1 = Node::new()
.nodegroupid(99)
.uptime(97)
.add_compute_slice(cs1)
.devices(dev.clone())
.country("BE")
.pubkey("PG_NODE_1")
.build();
let n2 = Node::new()
.nodegroupid(99)
.uptime(96)
.add_compute_slice(cs2)
.devices(dev.clone())
.country("NL")
.pubkey("PG_NODE_2")
.build();
let n3 = Node::new()
.nodegroupid(7)
.uptime(95)
.add_compute_slice(cs3)
.devices(dev)
.country("BE")
.pubkey("PG_NODE_3")
.build();
let (id1, s1) = nodes.set(&n1).expect("store n1");
let (id2, s2) = nodes.set(&n2).expect("store n2");
let (id3, s3) = nodes.set(&n3).expect("store n3");
assert!(id1 > 0 && id2 > 0 && id3 > 0);
// Query by top-level indexes similar to the example style
let be_nodes = nodes.get::<country, _>("BE").expect("by country");
assert_eq!(be_nodes.len(), 2);
let grp_99 = nodes.get::<nodegroupid, _>(&99).expect("by group");
assert_eq!(grp_99.len(), 2);
let by_key = nodes.get::<pubkey, _>("PG_NODE_2").expect("by pubkey");
assert_eq!(by_key.len(), 1);
assert_eq!(by_key[0].get_id(), id2);
// Update: change country of n1
let updated = s1.clone().country("DE");
let (_, back) = nodes.set(&updated).expect("update n1");
assert_eq!(back.country, "DE");
// Cardinality after update
let de_nodes = nodes.get::<country, _>("DE").expect("by country DE");
assert_eq!(de_nodes.len(), 1);
// Delete by id and by index
nodes.delete_by_id(id2).expect("delete n2 by id");
assert!(nodes.get_by_id(id2).unwrap().is_none());
nodes.delete::<pubkey, _>("PG_NODE_3").expect("delete n3 by pubkey");
assert!(nodes.get_by_id(id3).unwrap().is_none());
// Remaining should be updated n1 only; verify via targeted queries
let de_nodes = nodes.get::<country, _>("DE").expect("country DE after deletes");
assert_eq!(de_nodes.len(), 1);
assert_eq!(de_nodes[0].get_id(), id1);
let by_key = nodes.get::<pubkey, _>("PG_NODE_1").expect("by pubkey PG_NODE_1");
assert_eq!(by_key.len(), 1);
assert_eq!(by_key[0].get_id(), id1);
}

View File

@@ -0,0 +1,97 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::heroledger::user::user_index::username;
use heromodels::models::heroledger::user::User;
use heromodels_core::Model;
// NOTE: Requires a local Postgres running with user=postgres password=test123 host=localhost port=5432
// Marked ignored by default. Run with: cargo test -p heromodels --test heroledger_postgres -- --ignored
#[test]
#[ignore]
fn heroledger_user_postgres_roundtrip() {
// Connect
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("can connect to Postgres");
// Open collection (will create table and indexes for top-level fields)
let users = db.collection::<User>().expect("can open user collection");
// Clean slate
if let Ok(existing) = users.get_all() {
for u in existing {
let _ = users.delete_by_id(u.get_id());
}
}
// Unique suffix to avoid collisions with any pre-existing rows
let uniq = format!("{}", std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos());
let alice = format!("alice_{}", uniq);
let bob = format!("bob_{}", uniq);
let carol = format!("carol_{}", uniq);
// Build and store multiple users
let u1 = User::new(0)
.username(&alice)
.pubkey("PUBKEY_A")
.add_email("alice@example.com")
.build();
let u2 = User::new(0)
.username(&bob)
.pubkey("PUBKEY_B")
.add_email("bob@example.com")
.build();
let u3 = User::new(0)
.username(&carol)
.pubkey("PUBKEY_C")
.add_email("carol@example.com")
.build();
let (id1, db_u1) = users.set(&u1).expect("store u1");
let (id2, db_u2) = users.set(&u2).expect("store u2");
let (id3, db_u3) = users.set(&u3).expect("store u3");
assert!(id1 > 0 && id2 > 0 && id3 > 0);
// Fetch by id
assert_eq!(users.get_by_id(id1).unwrap().unwrap().username, alice);
assert_eq!(users.get_by_id(id2).unwrap().unwrap().username, bob);
assert_eq!(users.get_by_id(id3).unwrap().unwrap().username, carol);
// Fetch by index (top-level username)
let by_username = users.get::<username, _>(&alice).expect("by username");
assert_eq!(by_username.len(), 1);
assert_eq!(by_username[0].get_id(), id1);
// Update one
let updated = db_u1.clone().add_email("work@alice.example");
let (id1b, updated_back) = users.set(&updated).expect("update alice");
assert_eq!(id1b, id1);
assert!(updated_back.email.len() >= 2);
// Targeted queries to avoid legacy rows in the same table
// Verify three users exist via index queries
assert_eq!(users.get::<username, _>(&alice).unwrap().len(), 1);
assert_eq!(users.get::<username, _>(&bob).unwrap().len(), 1);
assert_eq!(users.get::<username, _>(&carol).unwrap().len(), 1);
// Delete by id
users.delete_by_id(id2).expect("delete bob by id");
assert!(users.get_by_id(id2).unwrap().is_none());
// Delete by index (username)
users.delete::<username, _>(&carol).expect("delete carol by username");
assert!(users.get_by_id(id3).unwrap().is_none());
// Remaining should be just alice; verify via index
let remain = users.get::<username, _>(&alice).expect("get alice after delete");
assert_eq!(remain.len(), 1);
assert_eq!(remain[0].get_id(), id1);
}

View File

@@ -1,4 +1,5 @@
use heromodels::db::Collection;
use heromodels::db::Db;
use heromodels::db::hero::OurDB;
use heromodels::models::biz::{BusinessType, Company, CompanyStatus, Payment, PaymentStatus};
use heromodels_core::Model;
@@ -197,12 +198,18 @@ fn test_payment_database_persistence() {
);
// Save payment
let (payment_id, saved_payment) = db.set(&payment).expect("Failed to save payment");
let (payment_id, saved_payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
assert!(payment_id > 0);
assert_eq!(saved_payment.payment_intent_id, "pi_db_test");
// Retrieve payment
let retrieved_payment: Payment = db
.collection::<Payment>()
.expect("open payment collection")
.get_by_id(payment_id)
.expect("Failed to get payment")
.unwrap();
@@ -224,20 +231,34 @@ fn test_payment_status_transitions() {
1360.0,
);
let (payment_id, mut payment) = db.set(&payment).expect("Failed to save payment");
let (payment_id, mut payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
// Test pending -> completed
payment = payment.complete_payment(Some("cus_transition_test".to_string()));
let (_, mut payment) = db.set(&payment).expect("Failed to update payment");
let (_, mut payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to update payment");
assert!(payment.is_completed());
// Test completed -> refunded
payment = payment.refund_payment();
let (_, payment) = db.set(&payment).expect("Failed to update payment");
let (_, payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to update payment");
assert!(payment.is_refunded());
// Verify final state in database
let final_payment: Payment = db
.collection::<Payment>()
.expect("open payment collection")
.get_by_id(payment_id)
.expect("Failed to get payment")
.unwrap();
@@ -270,15 +291,18 @@ fn test_company_payment_integration() {
let db = create_test_db();
// Create company with default PendingPayment status
let company = Company::new(
"Integration Test Corp".to_string(),
"ITC-001".to_string(),
chrono::Utc::now().timestamp(),
)
.email("test@integration.com".to_string())
.business_type(BusinessType::Starter);
let company = Company::new()
.name("Integration Test Corp")
.registration_number("ITC-001")
.incorporation_date(chrono::Utc::now().timestamp())
.email("test@integration.com")
.business_type(BusinessType::Starter);
let (company_id, company) = db.set(&company).expect("Failed to save company");
let (company_id, company) = db
.collection::<Company>()
.expect("open company collection")
.set(&company)
.expect("Failed to save company");
assert_eq!(company.status, CompanyStatus::PendingPayment);
// Create payment for the company
@@ -291,18 +315,28 @@ fn test_company_payment_integration() {
305.0,
);
let (_payment_id, payment) = db.set(&payment).expect("Failed to save payment");
let (_payment_id, payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
assert_eq!(payment.company_id, company_id);
// Complete payment
let completed_payment = payment.complete_payment(Some("cus_integration_test".to_string()));
let (_, completed_payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&completed_payment)
.expect("Failed to update payment");
// Update company status to Active
let active_company = company.status(CompanyStatus::Active);
let (_, active_company) = db.set(&active_company).expect("Failed to update company");
let (_, active_company) = db
.collection::<Company>()
.expect("open company collection")
.set(&active_company)
.expect("Failed to update company");
// Verify final states
assert!(completed_payment.is_completed());

View File

@@ -1,277 +0,0 @@
# OurDB API Reference
This document provides a comprehensive reference for the OurDB Rust API.
## Table of Contents
1. [Configuration](#configuration)
2. [Database Operations](#database-operations)
- [Creating and Opening](#creating-and-opening)
- [Setting Data](#setting-data)
- [Getting Data](#getting-data)
- [Deleting Data](#deleting-data)
- [History Tracking](#history-tracking)
3. [Error Handling](#error-handling)
4. [Advanced Usage](#advanced-usage)
- [Custom File Size](#custom-file-size)
- [Custom Key Size](#custom-key-size)
5. [Performance Considerations](#performance-considerations)
## Configuration
### OurDBConfig
The `OurDBConfig` struct is used to configure a new OurDB instance.
```rust
pub struct OurDBConfig {
pub path: PathBuf,
pub incremental_mode: bool,
pub file_size: Option<usize>,
pub keysize: Option<u8>,
}
```
| Field | Type | Description |
|-------|------|-------------|
| `path` | `PathBuf` | Path to the database directory |
| `incremental_mode` | `bool` | Whether to use auto-incremented IDs (true) or user-provided IDs (false) |
| `file_size` | `Option<usize>` | Maximum size of each database file in bytes (default: 500MB) |
| `keysize` | `Option<u8>` | Size of keys in bytes (default: 4, valid values: 2, 3, 4, 6) |
Example:
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: Some(1024 * 1024 * 100), // 100MB
keysize: Some(4), // 4-byte keys
};
```
## Database Operations
### Creating and Opening
#### `OurDB::new`
Creates a new OurDB instance or opens an existing one.
```rust
pub fn new(config: OurDBConfig) -> Result<OurDB, Error>
```
Example:
```rust
let mut db = OurDB::new(config)?;
```
### Setting Data
#### `OurDB::set`
Sets a value in the database. In incremental mode, if no ID is provided, a new ID is generated.
```rust
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error>
```
The `OurDBSetArgs` struct has the following fields:
```rust
pub struct OurDBSetArgs<'a> {
pub id: Option<u32>,
pub data: &'a [u8],
}
```
Example with auto-generated ID:
```rust
let id = db.set(OurDBSetArgs {
id: None,
data: b"Hello, World!",
})?;
```
Example with explicit ID:
```rust
db.set(OurDBSetArgs {
id: Some(42),
data: b"Hello, World!",
})?;
```
### Getting Data
#### `OurDB::get`
Retrieves a value from the database by ID.
```rust
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>
```
Example:
```rust
let data = db.get(42)?;
```
### Deleting Data
#### `OurDB::delete`
Deletes a value from the database by ID.
```rust
pub fn delete(&mut self, id: u32) -> Result<(), Error>
```
Example:
```rust
db.delete(42)?;
```
### History Tracking
#### `OurDB::get_history`
Retrieves the history of values for a given ID, up to the specified depth.
```rust
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>
```
Example:
```rust
// Get the last 5 versions of the record
let history = db.get_history(42, 5)?;
// Process each version (most recent first)
for (i, version) in history.iter().enumerate() {
println!("Version {}: {:?}", i, version);
}
```
### Other Operations
#### `OurDB::get_next_id`
Returns the next ID that will be assigned in incremental mode.
```rust
pub fn get_next_id(&self) -> Result<u32, Error>
```
Example:
```rust
let next_id = db.get_next_id()?;
```
#### `OurDB::close`
Closes the database, ensuring all data is flushed to disk.
```rust
pub fn close(&mut self) -> Result<(), Error>
```
Example:
```rust
db.close()?;
```
#### `OurDB::destroy`
Closes the database and deletes all database files.
```rust
pub fn destroy(&mut self) -> Result<(), Error>
```
Example:
```rust
db.destroy()?;
```
## Error Handling
OurDB uses the `thiserror` crate to define error types. The main error type is `ourdb::Error`.
```rust
pub enum Error {
IoError(std::io::Error),
InvalidKeySize,
InvalidId,
RecordNotFound,
InvalidCrc,
NotIncrementalMode,
DatabaseClosed,
// ...
}
```
All OurDB operations that can fail return a `Result<T, Error>` which can be handled using Rust's standard error handling mechanisms.
Example:
```rust
match db.get(42) {
Ok(data) => println!("Found data: {:?}", data),
Err(ourdb::Error::RecordNotFound) => println!("Record not found"),
Err(e) => eprintln!("Error: {}", e),
}
```
## Advanced Usage
### Custom File Size
You can configure the maximum size of each database file:
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: Some(1024 * 1024 * 10), // 10MB per file
keysize: None,
};
```
Smaller file sizes can be useful for:
- Limiting memory usage when reading files
- Improving performance on systems with limited memory
- Easier backup and file management
### Custom Key Size
OurDB supports different key sizes (2, 3, 4, or 6 bytes):
```rust
let config = OurDBConfig {
path: PathBuf::from("/path/to/db"),
incremental_mode: true,
file_size: None,
keysize: Some(6), // 6-byte keys
};
```
Key size considerations:
- 2 bytes: Up to 65,536 records
- 3 bytes: Up to 16,777,216 records
- 4 bytes: Up to 4,294,967,296 records (default)
- 6 bytes: Up to 281,474,976,710,656 records
## Performance Considerations
For optimal performance:
1. **Choose appropriate key size**: Use the smallest key size that can accommodate your expected number of records.
2. **Configure file size**: For large databases, consider using smaller file sizes to improve memory usage.
3. **Batch operations**: When inserting or updating many records, consider batching operations to minimize disk I/O.
4. **Close properly**: Always call `close()` when you're done with the database to ensure data is properly flushed to disk.
5. **Reuse OurDB instance**: Creating a new OurDB instance has overhead, so reuse the same instance for multiple operations when possible.
6. **Consider memory usage**: The lookup table is loaded into memory, so very large databases may require significant RAM.

806
ourdb/Cargo.lock generated
View File

@@ -1,806 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bitflags"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_lex"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "errno"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hermit-abi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
[[package]]
name = "is-terminal"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.171"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
[[package]]
name = "linux-raw-sys"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"criterion",
"log",
"rand",
"tempfile",
"thiserror",
]
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.15",
]
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustix"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "rustversion"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "syn"
version = "2.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
dependencies = [
"fastrand",
"getrandom 0.3.2",
"once_cell",
"rustix",
"windows-sys",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -1,32 +0,0 @@
[package]
name = "ourdb"
version = "0.1.0"
edition = "2021"
description = "A lightweight, efficient key-value database with history tracking capabilities"
authors = ["OurWorld Team"]
[dependencies]
crc32fast = "1.3.2"
thiserror = "1.0.40"
log = "0.4.17"
rand = "0.8.5"
[dev-dependencies]
criterion = "0.5.1"
tempfile = "3.8.0"
# [[bench]]
# name = "ourdb_benchmarks"
# harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "advanced_usage"
path = "examples/advanced_usage.rs"
[[example]]
name = "benchmark"
path = "examples/benchmark.rs"

View File

@@ -1,135 +0,0 @@
# OurDB
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This Rust implementation offers a robust and performant solution for applications requiring simple but reliable data storage.
## Features
- Simple key-value storage with history tracking
- Data integrity verification using CRC32
- Support for multiple backend files for large datasets
- Lookup table for fast data retrieval
- Incremental mode for auto-generated IDs
- Memory and disk-based lookup tables
## Limitations
- Maximum data size per entry is 65,535 bytes (~64KB) due to the 2-byte size field in the record header
## Usage
### Basic Example
```rust
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
fn main() -> Result<(), ourdb::Error> {
// Create a new database
let config = OurDBConfig {
path: PathBuf::from("/tmp/ourdb"),
incremental_mode: true,
file_size: None, // Use default (500MB)
keysize: None, // Use default (4 bytes)
};
let mut db = OurDB::new(config)?;
// Store data (with auto-generated ID in incremental mode)
let data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs { id: None, data })?;
println!("Stored data with ID: {}", id);
// Retrieve data
let retrieved = db.get(id)?;
println!("Retrieved: {}", String::from_utf8_lossy(&retrieved));
// Update data
let updated_data = b"Updated data";
db.set(OurDBSetArgs { id: Some(id), data: updated_data })?;
// Get history (returns most recent first)
let history = db.get_history(id, 2)?;
for (i, entry) in history.iter().enumerate() {
println!("History {}: {}", i, String::from_utf8_lossy(entry));
}
// Delete data
db.delete(id)?;
// Close the database
db.close()?;
Ok(())
}
```
### Key-Value Mode vs Incremental Mode
OurDB supports two operating modes:
1. **Key-Value Mode** (`incremental_mode: false`): You must provide IDs explicitly when storing data.
2. **Incremental Mode** (`incremental_mode: true`): IDs are auto-generated when not provided.
### Configuration Options
- `path`: Directory for database storage
- `incremental_mode`: Whether to use auto-increment mode
- `file_size`: Maximum file size (default: 500MB)
- `keysize`: Size of lookup table entries (2-6 bytes)
- 2: For databases with < 65,536 records
- 3: For databases with < 16,777,216 records
- 4: For databases with < 4,294,967,296 records (default)
- 6: For large databases requiring multiple files
## Architecture
OurDB consists of three main components:
1. **Frontend API**: Provides the public interface for database operations
2. **Lookup Table**: Maps keys to physical locations in the backend storage
3. **Backend Storage**: Manages the actual data persistence in files
### Record Format
Each record in the backend storage includes:
- 2 bytes: Data size
- 4 bytes: CRC32 checksum
- 6 bytes: Previous record location (for history)
- N bytes: Actual data
## Documentation
Additional documentation is available in the repository:
- [API Reference](API.md): Detailed API documentation
- [Migration Guide](MIGRATION.md): Guide for migrating from the V implementation
- [Architecture](architecture.md): Design and implementation details
## Examples
The repository includes several examples to demonstrate OurDB usage:
- `basic_usage.rs`: Simple operations with OurDB
- `advanced_usage.rs`: More complex features including both operation modes
- `benchmark.rs`: Performance benchmarking tool
Run an example with:
```bash
cargo run --example basic_usage
cargo run --example advanced_usage
cargo run --example benchmark
```
## Performance
OurDB is designed for efficiency and minimal overhead. The benchmark example can be used to evaluate performance on your specific hardware and workload.
Typical performance metrics on modern hardware:
- **Write**: 10,000+ operations per second
- **Read**: 50,000+ operations per second
## License
This project is licensed under the MIT License.

View File

@@ -1,439 +0,0 @@
# OurDB: Architecture for V to Rust Port
## 1. Overview
OurDB is a lightweight, efficient key-value database implementation that provides data persistence with history tracking capabilities. This document outlines the architecture for porting OurDB from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
## 2. Current Architecture (V Implementation)
The current V implementation of OurDB consists of three main components in a layered architecture:
```mermaid
graph TD
A[Client Code] --> B[Frontend API]
B --> C[Lookup Table]
B --> D[Backend Storage]
C --> D
```
### 2.1 Frontend (db.v)
The frontend provides the public API for database operations and coordinates between the lookup table and backend storage components.
Key responsibilities:
- Exposing high-level operations (set, get, delete, history)
- Managing incremental ID generation in auto-increment mode
- Coordinating data flow between lookup and backend components
- Handling database lifecycle (open, close, destroy)
### 2.2 Lookup Table (lookup.v)
The lookup table maps keys to physical locations in the backend storage.
Key responsibilities:
- Maintaining key-to-location mapping
- Optimizing key sizes based on database configuration
- Supporting both memory and disk-based lookup tables
- Handling sparse data efficiently
- Providing next ID generation for incremental mode
### 2.3 Backend Storage (backend.v)
The backend storage manages the actual data persistence in files.
Key responsibilities:
- Managing physical data storage in files
- Ensuring data integrity with CRC32 checksums
- Supporting multiple file backends for large datasets
- Implementing low-level read/write operations
- Tracking record history through linked locations
### 2.4 Core Data Structures
#### OurDB
```v
@[heap]
pub struct OurDB {
mut:
lookup &LookupTable
pub:
path string // directory for storage
incremental_mode bool
file_size u32 = 500 * (1 << 20) // 500MB
pub mut:
file os.File
file_nr u16 // the file which is open
last_used_file_nr u16
}
```
#### LookupTable
```v
pub struct LookupTable {
keysize u8
lookuppath string
mut:
data []u8
incremental ?u32 // points to next empty slot if incremental mode is enabled
}
```
#### Location
```v
pub struct Location {
pub mut:
file_nr u16
position u32
}
```
### 2.5 Storage Format
#### Record Format
Each record in the backend storage includes:
- 2 bytes: Data size
- 4 bytes: CRC32 checksum
- 6 bytes: Previous record location (for history)
- N bytes: Actual data
#### Lookup Table Optimization
The lookup table automatically optimizes its key size based on the database configuration:
- 2 bytes: For databases with < 65,536 records
- 3 bytes: For databases with < 16,777,216 records
- 4 bytes: For databases with < 4,294,967,296 records
- 6 bytes: For large databases requiring multiple files
## 3. Proposed Rust Architecture
The Rust implementation will maintain the same layered architecture while leveraging Rust's type system, ownership model, and error handling.
```mermaid
graph TD
A[Client Code] --> B[OurDB API]
B --> C[LookupTable]
B --> D[Backend]
C --> D
E[Error Handling] --> B
E --> C
E --> D
F[Configuration] --> B
```
### 3.1 Core Components
#### 3.1.1 OurDB (API Layer)
```rust
pub struct OurDB {
path: String,
incremental_mode: bool,
file_size: u32,
lookup: LookupTable,
file: Option<std::fs::File>,
file_nr: u16,
last_used_file_nr: u16,
}
impl OurDB {
pub fn new(config: OurDBConfig) -> Result<Self, Error>;
pub fn set(&mut self, id: Option<u32>, data: &[u8]) -> Result<u32, Error>;
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error>;
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error>;
pub fn delete(&mut self, id: u32) -> Result<(), Error>;
pub fn get_next_id(&mut self) -> Result<u32, Error>;
pub fn close(&mut self) -> Result<(), Error>;
pub fn destroy(&mut self) -> Result<(), Error>;
}
```
#### 3.1.2 LookupTable
```rust
pub struct LookupTable {
keysize: u8,
lookuppath: String,
data: Vec<u8>,
incremental: Option<u32>,
}
impl LookupTable {
fn new(config: LookupConfig) -> Result<Self, Error>;
fn get(&self, id: u32) -> Result<Location, Error>;
fn set(&mut self, id: u32, location: Location) -> Result<(), Error>;
fn delete(&mut self, id: u32) -> Result<(), Error>;
fn get_next_id(&self) -> Result<u32, Error>;
fn increment_index(&mut self) -> Result<(), Error>;
fn export_data(&self, path: &str) -> Result<(), Error>;
fn import_data(&mut self, path: &str) -> Result<(), Error>;
fn export_sparse(&self, path: &str) -> Result<(), Error>;
fn import_sparse(&mut self, path: &str) -> Result<(), Error>;
}
```
#### 3.1.3 Location
```rust
pub struct Location {
file_nr: u16,
position: u32,
}
impl Location {
fn new(bytes: &[u8], keysize: u8) -> Result<Self, Error>;
fn to_bytes(&self) -> Result<Vec<u8>, Error>;
fn to_u64(&self) -> u64;
}
```
#### 3.1.4 Backend
The backend functionality will be implemented as methods on the OurDB struct:
```rust
impl OurDB {
fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error>;
fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error>;
fn get_file_nr(&mut self) -> Result<u16, Error>;
fn set_(&mut self, id: u32, old_location: Location, data: &[u8]) -> Result<(), Error>;
fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error>;
fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error>;
fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error>;
fn close_(&mut self);
}
```
#### 3.1.5 Configuration
```rust
pub struct OurDBConfig {
pub record_nr_max: u32,
pub record_size_max: u32,
pub file_size: u32,
pub path: String,
pub incremental_mode: bool,
pub reset: bool,
}
struct LookupConfig {
size: u32,
keysize: u8,
lookuppath: String,
incremental_mode: bool,
}
```
#### 3.1.6 Error Handling
```rust
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid key size: {0}")]
InvalidKeySize(u8),
#[error("Record not found: {0}")]
RecordNotFound(u32),
#[error("Data corruption: CRC mismatch")]
DataCorruption,
#[error("Index out of bounds: {0}")]
IndexOutOfBounds(u32),
#[error("Incremental mode not enabled")]
IncrementalNotEnabled,
#[error("Lookup table is full")]
LookupTableFull,
#[error("Invalid file number: {0}")]
InvalidFileNumber(u16),
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
## 4. Implementation Strategy
### 4.1 Phase 1: Core Data Structures
1. Implement the `Location` struct with serialization/deserialization
2. Implement the `Error` enum for error handling
3. Implement the configuration structures
### 4.2 Phase 2: Lookup Table
1. Implement the `LookupTable` struct with memory-based storage
2. Add disk-based storage support
3. Implement key size optimization
4. Add incremental ID support
5. Implement import/export functionality
### 4.3 Phase 3: Backend Storage
1. Implement file management functions
2. Implement record serialization/deserialization with CRC32
3. Implement history tracking through linked locations
4. Add support for multiple backend files
### 4.4 Phase 4: Frontend API
1. Implement the `OurDB` struct with core operations
2. Add high-level API methods (set, get, delete, history)
3. Implement database lifecycle management
### 4.5 Phase 5: Testing and Optimization
1. Port existing tests from V to Rust
2. Add new tests for Rust-specific functionality
3. Benchmark and optimize performance
4. Ensure compatibility with existing OurDB files
## 5. Implementation Considerations
### 5.1 Memory Management
Leverage Rust's ownership model for safe and efficient memory management:
- Use `Vec<u8>` for data buffers instead of raw pointers
- Implement proper RAII for file handles
- Use references and borrows to avoid unnecessary copying
- Consider using `Bytes` from the `bytes` crate for zero-copy operations
### 5.2 Error Handling
Use Rust's `Result` type for comprehensive error handling:
- Define custom error types for OurDB-specific errors
- Propagate errors using the `?` operator
- Provide detailed error messages
- Implement proper error conversion using the `From` trait
### 5.3 File I/O
Optimize file operations for performance:
- Use `BufReader` and `BufWriter` for buffered I/O
- Implement proper file locking for concurrent access
- Consider memory-mapped files for lookup tables
- Use `seek` and `read_exact` for precise positioning
### 5.4 Concurrency
Consider thread safety for concurrent database access:
- Use interior mutability patterns where appropriate
- Implement `Send` and `Sync` traits for thread safety
- Consider using `RwLock` for shared read access
- Provide clear documentation on thread safety guarantees
### 5.5 Performance Optimizations
Identify opportunities for performance improvements:
- Use memory-mapped files for lookup tables
- Implement caching for frequently accessed records
- Use zero-copy operations where possible
- Consider async I/O for non-blocking operations
## 6. Testing Strategy
### 6.1 Unit Tests
Write comprehensive unit tests for each component:
- Test `Location` serialization/deserialization
- Test `LookupTable` operations
- Test backend storage functions
- Test error handling
### 6.2 Integration Tests
Write integration tests for the complete system:
- Test database creation and configuration
- Test basic CRUD operations
- Test history tracking
- Test incremental ID generation
- Test file management
### 6.3 Compatibility Tests
Ensure compatibility with existing OurDB files:
- Test reading existing V-created OurDB files
- Test writing files that can be read by the V implementation
- Test migration scenarios
### 6.4 Performance Tests
Benchmark performance against the V implementation:
- Measure throughput for set/get operations
- Measure latency for different operations
- Test with different database sizes
- Test with different record sizes
## 7. Project Structure
```
ourdb/
├── Cargo.toml
├── src/
│ ├── lib.rs # Public API and re-exports
│ ├── ourdb.rs # OurDB implementation (frontend)
│ ├── lookup.rs # Lookup table implementation
│ ├── location.rs # Location struct implementation
│ ├── backend.rs # Backend storage implementation
│ ├── error.rs # Error types
│ ├── config.rs # Configuration structures
│ └── utils.rs # Utility functions
├── tests/
│ ├── unit/ # Unit tests
│ ├── integration/ # Integration tests
│ └── compatibility/ # Compatibility tests
└── examples/
├── basic.rs # Basic usage example
├── history.rs # History tracking example
└── client_server.rs # Client-server example
```
## 8. Dependencies
The Rust implementation will use the following dependencies:
- `thiserror` for error handling
- `crc32fast` for CRC32 calculation
- `bytes` for efficient byte manipulation
- `memmap2` for memory-mapped files (optional)
- `serde` for serialization (optional, for future extensions)
- `log` for logging
- `criterion` for benchmarking
## 9. Compatibility Considerations
To ensure compatibility with the V implementation:
1. Maintain the same file format for data storage
2. Preserve the lookup table format
3. Keep the same CRC32 calculation method
4. Ensure identical behavior for incremental ID generation
5. Maintain the same history tracking mechanism
## 10. Future Extensions
Potential future extensions to consider:
1. Async API for non-blocking operations
2. Transactions support
3. Better concurrency control
4. Compression support
5. Encryption support
6. Streaming API for large values
7. Iterators for scanning records
8. Secondary indexes
## 11. Conclusion
This architecture provides a roadmap for porting OurDB from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
The Rust implementation aims to be:
- **Safe**: Leveraging Rust's ownership model for memory safety
- **Fast**: Maintaining or improving performance compared to V
- **Compatible**: Working with existing OurDB files
- **Extensible**: Providing a foundation for future enhancements
- **Well-tested**: Including comprehensive test coverage

View File

@@ -1,231 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
use std::time::Instant;
fn main() -> Result<(), ourdb::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_advanced_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Demonstrate key-value mode (non-incremental)
key_value_mode_example(&db_path)?;
// Demonstrate incremental mode
incremental_mode_example(&db_path)?;
// Demonstrate performance benchmarking
performance_benchmark(&db_path)?;
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}
fn key_value_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Key-Value Mode Example ===");
let db_path = base_path.join("key_value");
std::fs::create_dir_all(&db_path)?;
// Create a new database with key-value mode (non-incremental)
let config = OurDBConfig {
path: db_path,
incremental_mode: false,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(2), // Small key size for demonstration
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// In key-value mode, we must provide IDs explicitly
let custom_ids = [100, 200, 300, 400, 500];
// Store data with custom IDs
for (i, &id) in custom_ids.iter().enumerate() {
let data = format!("Record with custom ID {}", id);
db.set(OurDBSetArgs {
id: Some(id),
data: data.as_bytes(),
})?;
println!("Stored record {} with custom ID: {}", i + 1, id);
}
// Retrieve data by custom IDs
for &id in &custom_ids {
let retrieved = db.get(id)?;
println!(
"Retrieved ID {}: {}",
id,
String::from_utf8_lossy(&retrieved)
);
}
// Update and track history
let id_to_update = custom_ids[2]; // ID 300
for i in 1..=3 {
let updated_data = format!("Updated record {} (version {})", id_to_update, i);
db.set(OurDBSetArgs {
id: Some(id_to_update),
data: updated_data.as_bytes(),
})?;
println!("Updated ID {} (version {})", id_to_update, i);
}
// Get history for the updated record
let history = db.get_history(id_to_update, 5)?;
println!("History for ID {} (most recent first):", id_to_update);
for (i, entry) in history.iter().enumerate() {
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
}
db.close()?;
println!("Key-value mode example completed");
Ok(())
}
fn incremental_mode_example(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Incremental Mode Example ===");
let db_path = base_path.join("incremental");
std::fs::create_dir_all(&db_path)?;
// Create a new database with incremental mode
let config = OurDBConfig {
path: db_path,
incremental_mode: true,
file_size: Some(1024 * 1024), // 1MB for testing
keysize: Some(3), // 3-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// In incremental mode, IDs are auto-generated
let mut assigned_ids = Vec::new();
// Store multiple records and collect assigned IDs
for i in 1..=5 {
let data = format!("Auto-increment record {}", i);
let id = db.set(OurDBSetArgs {
id: None,
data: data.as_bytes(),
})?;
assigned_ids.push(id);
println!("Stored record {} with auto-assigned ID: {}", i, id);
}
// Check next ID
let next_id = db.get_next_id()?;
println!("Next ID to be assigned: {}", next_id);
// Retrieve all records
for &id in &assigned_ids {
let retrieved = db.get(id)?;
println!(
"Retrieved ID {}: {}",
id,
String::from_utf8_lossy(&retrieved)
);
}
db.close()?;
println!("Incremental mode example completed");
Ok(())
}
fn performance_benchmark(base_path: &PathBuf) -> Result<(), ourdb::Error> {
println!("\n=== Performance Benchmark ===");
let db_path = base_path.join("benchmark");
std::fs::create_dir_all(&db_path)?;
// Create a new database
let config = OurDBConfig {
path: db_path,
incremental_mode: true,
file_size: Some(1024 * 1024), // 10MB
keysize: Some(4), // 4-byte keys
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// Number of operations for the benchmark
let num_operations = 1000;
let data_size = 100; // bytes per record
// Prepare test data
let test_data = vec![b'A'; data_size];
// Benchmark write operations
println!("Benchmarking {} write operations...", num_operations);
let start = Instant::now();
let mut ids = Vec::with_capacity(num_operations);
for _ in 0..num_operations {
let id = db.set(OurDBSetArgs {
id: None,
data: &test_data,
})?;
ids.push(id);
}
let write_duration = start.elapsed();
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
println!(
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
writes_per_second,
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark read operations
println!("Benchmarking {} read operations...", num_operations);
let start = Instant::now();
for &id in &ids {
let _ = db.get(id)?;
}
let read_duration = start.elapsed();
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
println!(
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
reads_per_second,
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark update operations
println!("Benchmarking {} update operations...", num_operations);
let start = Instant::now();
for &id in &ids {
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
}
let update_duration = start.elapsed();
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
println!(
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
updates_per_second,
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
db.close()?;
println!("Performance benchmark completed");
Ok(())
}

View File

@@ -1,89 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
fn main() -> Result<(), ourdb::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new database with incremental mode enabled
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None, // Use default (500MB)
keysize: None, // Use default (4 bytes)
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config)?;
// Store some data with auto-generated IDs
let data1 = b"First record";
let id1 = db.set(OurDBSetArgs {
id: None,
data: data1,
})?;
println!("Stored first record with ID: {}", id1);
let data2 = b"Second record";
let id2 = db.set(OurDBSetArgs {
id: None,
data: data2,
})?;
println!("Stored second record with ID: {}", id2);
// Retrieve and print the data
let retrieved1 = db.get(id1)?;
println!(
"Retrieved ID {}: {}",
id1,
String::from_utf8_lossy(&retrieved1)
);
let retrieved2 = db.get(id2)?;
println!(
"Retrieved ID {}: {}",
id2,
String::from_utf8_lossy(&retrieved2)
);
// Update a record to demonstrate history tracking
let updated_data = b"Updated first record";
db.set(OurDBSetArgs {
id: Some(id1),
data: updated_data,
})?;
println!("Updated record with ID: {}", id1);
// Get history for the updated record
let history = db.get_history(id1, 2)?;
println!("History for ID {}:", id1);
for (i, entry) in history.iter().enumerate() {
println!(" Version {}: {}", i, String::from_utf8_lossy(entry));
}
// Delete a record
db.delete(id2)?;
println!("Deleted record with ID: {}", id2);
// Verify deletion
match db.get(id2) {
Ok(_) => println!("Record still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
// Close the database
db.close()?;
println!("Database closed successfully");
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,124 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::time::Instant;
fn main() -> Result<(), ourdb::Error> {
// Parse command-line arguments
let args: Vec<String> = std::env::args().collect();
// Default values
let mut incremental_mode = true;
let mut keysize: u8 = 4;
let mut num_operations = 10000;
// Parse arguments
for i in 1..args.len() {
if args[i] == "--no-incremental" {
incremental_mode = false;
} else if args[i] == "--keysize" && i + 1 < args.len() {
keysize = args[i + 1].parse().unwrap_or(4);
} else if args[i] == "--ops" && i + 1 < args.len() {
num_operations = args[i + 1].parse().unwrap_or(10000);
}
}
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("ourdb_benchmark");
std::fs::create_dir_all(&db_path)?;
println!("Database path: {}", db_path.display());
// Create a new database
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode,
file_size: Some(1024 * 1024),
keysize: Some(keysize),
reset: Some(true), // Reset the database for benchmarking
};
let mut db = OurDB::new(config)?;
// Prepare test data (100 bytes per record)
let test_data = vec![b'A'; 100];
// Benchmark write operations
println!(
"Benchmarking {} write operations (incremental: {}, keysize: {})...",
num_operations, incremental_mode, keysize
);
let start = Instant::now();
let mut ids = Vec::with_capacity(num_operations);
for _ in 0..num_operations {
let id = if incremental_mode {
db.set(OurDBSetArgs {
id: None,
data: &test_data,
})?
} else {
// In non-incremental mode, we need to provide IDs
let id = ids.len() as u32 + 1;
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
id
};
ids.push(id);
}
let write_duration = start.elapsed();
let writes_per_second = num_operations as f64 / write_duration.as_secs_f64();
println!(
"Write performance: {:.2} ops/sec ({:.2} ms/op)",
writes_per_second,
write_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark read operations
println!("Benchmarking {} read operations...", num_operations);
let start = Instant::now();
for &id in &ids {
let _ = db.get(id)?;
}
let read_duration = start.elapsed();
let reads_per_second = num_operations as f64 / read_duration.as_secs_f64();
println!(
"Read performance: {:.2} ops/sec ({:.2} ms/op)",
reads_per_second,
read_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Benchmark update operations
println!("Benchmarking {} update operations...", num_operations);
let start = Instant::now();
for &id in &ids {
db.set(OurDBSetArgs {
id: Some(id),
data: &test_data,
})?;
}
let update_duration = start.elapsed();
let updates_per_second = num_operations as f64 / update_duration.as_secs_f64();
println!(
"Update performance: {:.2} ops/sec ({:.2} ms/op)",
updates_per_second,
update_duration.as_secs_f64() * 1000.0 / num_operations as f64
);
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
Ok(())
}

View File

@@ -1,83 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Standalone OurDB Example");
println!("=======================\n");
// Create a temporary directory for the database
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new OurDB instance
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: Some(false),
};
let mut db = OurDB::new(config)?;
println!("Database created successfully");
// Store some data
let test_data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs {
id: None,
data: test_data,
})?;
println!("\nStored data with ID: {}", id);
// Retrieve the data
let retrieved = db.get(id)?;
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
// Update the data
let updated_data = b"Updated data in OurDB!";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})?;
println!("\nUpdated data with ID: {}", id);
// Retrieve the updated data
let retrieved = db.get(id)?;
println!(
"Retrieved updated data: {}",
String::from_utf8_lossy(&retrieved)
);
// Get history
let history = db.get_history(id, 2)?;
println!("\nHistory for ID {}:", id);
for (i, data) in history.iter().enumerate() {
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
}
// Delete the data
db.delete(id)?;
println!("\nDeleted data with ID: {}", id);
// Try to retrieve the deleted data (should fail)
match db.get(id) {
Ok(_) => println!("Data still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
println!("\nExample completed successfully!");
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
Ok(())
}

View File

@@ -1,83 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Standalone OurDB Example");
println!("=======================\n");
// Create a temporary directory for the database
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let db_path = temp_dir().join(format!("ourdb_example_{}", timestamp));
std::fs::create_dir_all(&db_path)?;
println!("Creating database at: {}", db_path.display());
// Create a new OurDB instance
let config = OurDBConfig {
path: db_path.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: Some(false),
};
let mut db = OurDB::new(config)?;
println!("Database created successfully");
// Store some data
let test_data = b"Hello, OurDB!";
let id = db.set(OurDBSetArgs {
id: None,
data: test_data,
})?;
println!("\nStored data with ID: {}", id);
// Retrieve the data
let retrieved = db.get(id)?;
println!("Retrieved data: {}", String::from_utf8_lossy(&retrieved));
// Update the data
let updated_data = b"Updated data in OurDB!";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})?;
println!("\nUpdated data with ID: {}", id);
// Retrieve the updated data
let retrieved = db.get(id)?;
println!(
"Retrieved updated data: {}",
String::from_utf8_lossy(&retrieved)
);
// Get history
let history = db.get_history(id, 2)?;
println!("\nHistory for ID {}:", id);
for (i, data) in history.iter().enumerate() {
println!(" Version {}: {}", i + 1, String::from_utf8_lossy(data));
}
// Delete the data
db.delete(id)?;
println!("\nDeleted data with ID: {}", id);
// Try to retrieve the deleted data (should fail)
match db.get(id) {
Ok(_) => println!("Data still exists (unexpected)"),
Err(e) => println!("Verified deletion: {}", e),
}
println!("\nExample completed successfully!");
// Clean up
db.close()?;
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
Ok(())
}

View File

@@ -1,366 +0,0 @@
use std::fs::{self, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use crc32fast::Hasher;
use crate::error::Error;
use crate::location::Location;
use crate::OurDB;
// Header size: 2 bytes (size) + 4 bytes (CRC32) + 6 bytes (previous location)
pub const HEADER_SIZE: usize = 12;
impl OurDB {
/// Selects and opens a database file for read/write operations
pub(crate) fn db_file_select(&mut self, file_nr: u16) -> Result<(), Error> {
// No need to check if file_nr > 65535 as u16 can't exceed that value
let path = self.path.join(format!("{}.db", file_nr));
// Always close the current file if it's open
self.file = None;
// Create file if it doesn't exist
if !path.exists() {
self.create_new_db_file(file_nr)?;
}
// Open the file fresh
let file = OpenOptions::new().read(true).write(true).open(&path)?;
self.file = Some(file);
self.file_nr = file_nr;
Ok(())
}
/// Creates a new database file
pub(crate) fn create_new_db_file(&mut self, file_nr: u16) -> Result<(), Error> {
let new_file_path = self.path.join(format!("{}.db", file_nr));
let mut file = File::create(&new_file_path)?;
// Write a single byte to make all positions start from 1
file.write_all(&[0u8])?;
Ok(())
}
/// Gets the file number to use for the next write operation
pub(crate) fn get_file_nr(&mut self) -> Result<u16, Error> {
// For keysize 2, 3, or 4, we can only use file_nr 0
if self.lookup.keysize() <= 4 {
let path = self.path.join("0.db");
if !path.exists() {
self.create_new_db_file(0)?;
}
return Ok(0);
}
// For keysize 6, we can use multiple files
let path = self.path.join(format!("{}.db", self.last_used_file_nr));
if !path.exists() {
self.create_new_db_file(self.last_used_file_nr)?;
return Ok(self.last_used_file_nr);
}
let metadata = fs::metadata(&path)?;
if metadata.len() >= self.file_size as u64 {
self.last_used_file_nr += 1;
self.create_new_db_file(self.last_used_file_nr)?;
}
Ok(self.last_used_file_nr)
}
/// Stores data at the specified ID with history tracking
pub(crate) fn set_(
&mut self,
id: u32,
old_location: Location,
data: &[u8],
) -> Result<(), Error> {
// Validate data size - maximum is u16::MAX (65535 bytes or ~64KB)
if data.len() > u16::MAX as usize {
return Err(Error::InvalidOperation(format!(
"Data size exceeds maximum allowed size of {} bytes",
u16::MAX
)));
}
// Get file number to use
let file_nr = self.get_file_nr()?;
// Select the file
self.db_file_select(file_nr)?;
// Get current file position for lookup
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
file.seek(SeekFrom::End(0))?;
let position = file.stream_position()? as u32;
// Create new location
let new_location = Location { file_nr, position };
// Calculate CRC of data
let crc = calculate_crc(data);
// Create header
let mut header = vec![0u8; HEADER_SIZE];
// Write size (2 bytes)
let size = data.len() as u16; // Safe now because we've validated the size
header[0] = (size & 0xFF) as u8;
header[1] = ((size >> 8) & 0xFF) as u8;
// Write CRC (4 bytes)
header[2] = (crc & 0xFF) as u8;
header[3] = ((crc >> 8) & 0xFF) as u8;
header[4] = ((crc >> 16) & 0xFF) as u8;
header[5] = ((crc >> 24) & 0xFF) as u8;
// Write previous location (6 bytes)
let prev_bytes = old_location.to_bytes();
for (i, &byte) in prev_bytes.iter().enumerate().take(6) {
header[6 + i] = byte;
}
// Write header
file.write_all(&header)?;
// Write actual data
file.write_all(data)?;
file.flush()?;
// Update lookup table with new position
self.lookup.set(id, new_location)?;
Ok(())
}
/// Retrieves data at the specified location
pub(crate) fn get_(&mut self, location: Location) -> Result<Vec<u8>, Error> {
if location.position == 0 {
return Err(Error::NotFound(format!(
"Record not found, location: {:?}",
location
)));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Read header
file.seek(SeekFrom::Start(location.position as u64))?;
let mut header = vec![0u8; HEADER_SIZE];
file.read_exact(&mut header)?;
// Parse size (2 bytes)
let size = u16::from(header[0]) | (u16::from(header[1]) << 8);
// Parse CRC (4 bytes)
let stored_crc = u32::from(header[2])
| (u32::from(header[3]) << 8)
| (u32::from(header[4]) << 16)
| (u32::from(header[5]) << 24);
// Read data
let mut data = vec![0u8; size as usize];
file.read_exact(&mut data)?;
// Verify CRC
let calculated_crc = calculate_crc(&data);
if calculated_crc != stored_crc {
return Err(Error::DataCorruption(
"CRC mismatch: data corruption detected".to_string(),
));
}
Ok(data)
}
/// Retrieves the previous position for a record (for history tracking)
pub(crate) fn get_prev_pos_(&mut self, location: Location) -> Result<Location, Error> {
if location.position == 0 {
return Err(Error::NotFound("Record not found".to_string()));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Skip size and CRC (6 bytes)
file.seek(SeekFrom::Start(location.position as u64 + 6))?;
// Read previous location (6 bytes)
let mut prev_bytes = vec![0u8; 6];
file.read_exact(&mut prev_bytes)?;
// Create location from bytes
Location::from_bytes(&prev_bytes, 6)
}
/// Deletes the record at the specified location
pub(crate) fn delete_(&mut self, id: u32, location: Location) -> Result<(), Error> {
if location.position == 0 {
return Err(Error::NotFound("Record not found".to_string()));
}
// Select the file
self.db_file_select(location.file_nr)?;
let file = self
.file
.as_mut()
.ok_or_else(|| Error::Other("No file open".to_string()))?;
// Read size first
file.seek(SeekFrom::Start(location.position as u64))?;
let mut size_bytes = vec![0u8; 2];
file.read_exact(&mut size_bytes)?;
let size = u16::from(size_bytes[0]) | (u16::from(size_bytes[1]) << 8);
// Write zeros for the entire record (header + data)
let zeros = vec![0u8; HEADER_SIZE + size as usize];
file.seek(SeekFrom::Start(location.position as u64))?;
file.write_all(&zeros)?;
// Clear lookup entry
self.lookup.delete(id)?;
Ok(())
}
/// Condenses the database by removing empty records and updating positions
pub fn condense(&mut self) -> Result<(), Error> {
// Create a temporary directory
let temp_path = self.path.join("temp");
fs::create_dir_all(&temp_path)?;
// Get all file numbers
let mut file_numbers = Vec::new();
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
let path = entry.path();
if path.is_file() && path.extension().map_or(false, |ext| ext == "db") {
if let Some(stem) = path.file_stem() {
if let Ok(file_nr) = stem.to_string_lossy().parse::<u16>() {
file_numbers.push(file_nr);
}
}
}
}
// Process each file
for file_nr in file_numbers {
let src_path = self.path.join(format!("{}.db", file_nr));
let temp_file_path = temp_path.join(format!("{}.db", file_nr));
// Create new file
let mut temp_file = File::create(&temp_file_path)?;
temp_file.write_all(&[0u8])?; // Initialize with a byte
// Open source file
let mut src_file = File::open(&src_path)?;
// Read and process records
let mut buffer = vec![0u8; 1024]; // Read in chunks
let mut _position = 0;
while let Ok(bytes_read) = src_file.read(&mut buffer) {
if bytes_read == 0 {
break;
}
// Process the chunk
// This is a simplified version - in a real implementation,
// you would need to handle records that span chunk boundaries
_position += bytes_read;
}
// TODO: Implement proper record copying and position updating
// This would involve:
// 1. Reading each record from the source file
// 2. If not deleted (all zeros), copy to temp file
// 3. Update lookup table with new positions
}
// TODO: Replace original files with temp files
// Clean up
fs::remove_dir_all(&temp_path)?;
Ok(())
}
}
/// Calculates CRC32 for the data
fn calculate_crc(data: &[u8]) -> u32 {
let mut hasher = Hasher::new();
hasher.update(data);
hasher.finalize()
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use crate::{OurDB, OurDBConfig, OurDBSetArgs};
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_backend_test_{}", timestamp))
}
#[test]
fn test_backend_operations() {
let temp_dir = get_temp_dir();
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: false,
file_size: None,
keysize: None,
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Test data for backend operations";
let id = 1;
db.set(OurDBSetArgs {
id: Some(id),
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Clean up
db.destroy().unwrap();
}
}

View File

@@ -1,41 +0,0 @@
use thiserror::Error;
/// Error types for OurDB operations
#[derive(Error, Debug)]
pub enum Error {
/// IO errors from file operations
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// Data corruption errors
#[error("Data corruption: {0}")]
DataCorruption(String),
/// Invalid operation errors
#[error("Invalid operation: {0}")]
InvalidOperation(String),
/// Lookup table errors
#[error("Lookup error: {0}")]
LookupError(String),
/// Record not found errors
#[error("Record not found: {0}")]
NotFound(String),
/// Other errors
#[error("Error: {0}")]
Other(String),
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Error::Other(msg)
}
}
impl From<&str> for Error {
fn from(msg: &str) -> Self {
Error::Other(msg.to_string())
}
}

View File

@@ -1,293 +0,0 @@
mod backend;
mod error;
mod location;
mod lookup;
pub use error::Error;
pub use location::Location;
pub use lookup::LookupTable;
use std::fs::File;
use std::path::PathBuf;
/// OurDB is a lightweight, efficient key-value database implementation that provides
/// data persistence with history tracking capabilities.
pub struct OurDB {
/// Directory path for storage
path: PathBuf,
/// Whether to use auto-increment mode
incremental_mode: bool,
/// Maximum file size (default: 500MB)
file_size: u32,
/// Lookup table for mapping keys to locations
lookup: LookupTable,
/// Currently open file
file: Option<File>,
/// Current file number
file_nr: u16,
/// Last used file number
last_used_file_nr: u16,
}
/// Configuration for creating a new OurDB instance
pub struct OurDBConfig {
/// Directory path for storage
pub path: PathBuf,
/// Whether to use auto-increment mode
pub incremental_mode: bool,
/// Maximum file size (default: 500MB)
pub file_size: Option<u32>,
/// Lookup table key size (default: 4)
/// - 2: For databases with < 65,536 records (single file)
/// - 3: For databases with < 16,777,216 records (single file)
/// - 4: For databases with < 4,294,967,296 records (single file)
/// - 6: For large databases requiring multiple files (default)
pub keysize: Option<u8>,
/// Whether to reset the database if it exists (default: false)
pub reset: Option<bool>,
}
/// Arguments for setting a value in OurDB
pub struct OurDBSetArgs<'a> {
/// ID for the record (optional in incremental mode)
pub id: Option<u32>,
/// Data to store
pub data: &'a [u8],
}
impl OurDB {
/// Creates a new OurDB instance with the given configuration
pub fn new(config: OurDBConfig) -> Result<Self, Error> {
// If reset is true and the path exists, remove it first
if config.reset.unwrap_or(false) && config.path.exists() {
std::fs::remove_dir_all(&config.path)?;
}
// Create directory if it doesn't exist
std::fs::create_dir_all(&config.path)?;
// Create lookup table
let lookup_path = config.path.join("lookup");
std::fs::create_dir_all(&lookup_path)?;
let lookup_config = lookup::LookupConfig {
size: 1000000, // Default size
keysize: config.keysize.unwrap_or(4),
lookuppath: lookup_path.to_string_lossy().to_string(),
incremental_mode: config.incremental_mode,
};
let lookup = LookupTable::new(lookup_config)?;
let mut db = OurDB {
path: config.path,
incremental_mode: config.incremental_mode,
file_size: config.file_size.unwrap_or(500 * (1 << 20)), // 500MB default
lookup,
file: None,
file_nr: 0,
last_used_file_nr: 0,
};
// Load existing metadata if available
db.load()?;
Ok(db)
}
/// Sets a value in the database
///
/// In incremental mode:
/// - If ID is provided, it updates an existing record
/// - If ID is not provided, it creates a new record with auto-generated ID
///
/// In key-value mode:
/// - ID must be provided
pub fn set(&mut self, args: OurDBSetArgs) -> Result<u32, Error> {
if self.incremental_mode {
if let Some(id) = args.id {
// This is an update
let location = self.lookup.get(id)?;
if location.position == 0 {
return Err(Error::InvalidOperation(
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
));
}
self.set_(id, location, args.data)?;
Ok(id)
} else {
// This is an insert
let id = self.lookup.get_next_id()?;
self.set_(id, Location::default(), args.data)?;
Ok(id)
}
} else {
// Using key-value mode
let id = args.id.ok_or_else(|| {
Error::InvalidOperation(
"ID must be provided when incremental is disabled".to_string(),
)
})?;
let location = self.lookup.get(id)?;
self.set_(id, location, args.data)?;
Ok(id)
}
}
/// Retrieves data stored at the specified key position
pub fn get(&mut self, id: u32) -> Result<Vec<u8>, Error> {
let location = self.lookup.get(id)?;
self.get_(location)
}
/// Retrieves a list of previous values for the specified key
///
/// The depth parameter controls how many historical values to retrieve (maximum)
pub fn get_history(&mut self, id: u32, depth: u8) -> Result<Vec<Vec<u8>>, Error> {
let mut result = Vec::new();
let mut current_location = self.lookup.get(id)?;
// Traverse the history chain up to specified depth
for _ in 0..depth {
// Get current value
let data = self.get_(current_location)?;
result.push(data);
// Try to get previous location
match self.get_prev_pos_(current_location) {
Ok(location) => {
if location.position == 0 {
break;
}
current_location = location;
}
Err(_) => break,
}
}
Ok(result)
}
/// Deletes the data at the specified key position
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
let location = self.lookup.get(id)?;
self.delete_(id, location)?;
self.lookup.delete(id)?;
Ok(())
}
/// Returns the next ID which will be used when storing in incremental mode
pub fn get_next_id(&mut self) -> Result<u32, Error> {
if !self.incremental_mode {
return Err(Error::InvalidOperation(
"Incremental mode is not enabled".to_string(),
));
}
self.lookup.get_next_id()
}
/// Closes the database, ensuring all data is saved
pub fn close(&mut self) -> Result<(), Error> {
self.save()?;
self.close_();
Ok(())
}
/// Destroys the database, removing all files
pub fn destroy(&mut self) -> Result<(), Error> {
let _ = self.close();
std::fs::remove_dir_all(&self.path)?;
Ok(())
}
// Helper methods
fn lookup_dump_path(&self) -> PathBuf {
self.path.join("lookup_dump.db")
}
fn load(&mut self) -> Result<(), Error> {
let dump_path = self.lookup_dump_path();
if dump_path.exists() {
self.lookup.import_sparse(&dump_path.to_string_lossy())?;
}
Ok(())
}
fn save(&mut self) -> Result<(), Error> {
self.lookup
.export_sparse(&self.lookup_dump_path().to_string_lossy())?;
Ok(())
}
fn close_(&mut self) {
self.file = None;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_test_{}", timestamp))
}
#[test]
fn test_basic_operations() {
let temp_dir = get_temp_dir();
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None, // Don't reset existing database
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Hello, OurDB!";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Test update
let updated_data = b"Updated data";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, updated_data);
// Test history
let history = db.get_history(id, 2).unwrap();
assert_eq!(history.len(), 2);
assert_eq!(history[0], updated_data);
assert_eq!(history[1], test_data);
// Test delete
db.delete(id).unwrap();
assert!(db.get(id).is_err());
// Clean up
db.destroy().unwrap();
}
}

View File

@@ -1,178 +0,0 @@
use crate::error::Error;
/// Location represents a physical position in a database file
///
/// It consists of a file number and a position within that file.
/// This allows OurDB to span multiple files for large datasets.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub struct Location {
/// File number (0-65535)
pub file_nr: u16,
/// Position within the file
pub position: u32,
}
impl Location {
/// Creates a new Location from bytes based on keysize
///
/// - keysize = 2: Only position (2 bytes), file_nr = 0
/// - keysize = 3: Only position (3 bytes), file_nr = 0
/// - keysize = 4: Only position (4 bytes), file_nr = 0
/// - keysize = 6: file_nr (2 bytes) + position (4 bytes)
pub fn from_bytes(bytes: &[u8], keysize: u8) -> Result<Self, Error> {
// Validate keysize
if ![2, 3, 4, 6].contains(&keysize) {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
keysize
)));
}
// Create padded bytes
let mut padded = vec![0u8; keysize as usize];
if bytes.len() > keysize as usize {
return Err(Error::InvalidOperation(
"Input bytes exceed keysize".to_string(),
));
}
let start_idx = keysize as usize - bytes.len();
for (i, &b) in bytes.iter().enumerate() {
if i + start_idx < padded.len() {
padded[start_idx + i] = b;
}
}
let mut location = Location::default();
match keysize {
2 => {
// Only position, 2 bytes big endian
location.position = u32::from(padded[0]) << 8 | u32::from(padded[1]);
location.file_nr = 0;
// Verify limits
if location.position > 0xFFFF {
return Err(Error::InvalidOperation(
"Position exceeds max value for keysize=2 (max 65535)".to_string(),
));
}
}
3 => {
// Only position, 3 bytes big endian
location.position =
u32::from(padded[0]) << 16 | u32::from(padded[1]) << 8 | u32::from(padded[2]);
location.file_nr = 0;
// Verify limits
if location.position > 0xFFFFFF {
return Err(Error::InvalidOperation(
"Position exceeds max value for keysize=3 (max 16777215)".to_string(),
));
}
}
4 => {
// Only position, 4 bytes big endian
location.position = u32::from(padded[0]) << 24
| u32::from(padded[1]) << 16
| u32::from(padded[2]) << 8
| u32::from(padded[3]);
location.file_nr = 0;
}
6 => {
// 2 bytes file_nr + 4 bytes position, all big endian
location.file_nr = u16::from(padded[0]) << 8 | u16::from(padded[1]);
location.position = u32::from(padded[2]) << 24
| u32::from(padded[3]) << 16
| u32::from(padded[4]) << 8
| u32::from(padded[5]);
}
_ => unreachable!(),
}
Ok(location)
}
/// Converts the location to bytes (always 6 bytes)
///
/// Format: [file_nr (2 bytes)][position (4 bytes)]
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::with_capacity(6);
// Put file_nr first (2 bytes)
bytes.push((self.file_nr >> 8) as u8);
bytes.push(self.file_nr as u8);
// Put position next (4 bytes)
bytes.push((self.position >> 24) as u8);
bytes.push((self.position >> 16) as u8);
bytes.push((self.position >> 8) as u8);
bytes.push(self.position as u8);
bytes
}
/// Converts the location to a u64 value
///
/// The file_nr is stored in the most significant bits
pub fn to_u64(&self) -> u64 {
(u64::from(self.file_nr) << 32) | u64::from(self.position)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_location_from_bytes_keysize_2() {
let bytes = vec![0x12, 0x34];
let location = Location::from_bytes(&bytes, 2).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x1234);
}
#[test]
fn test_location_from_bytes_keysize_3() {
let bytes = vec![0x12, 0x34, 0x56];
let location = Location::from_bytes(&bytes, 3).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x123456);
}
#[test]
fn test_location_from_bytes_keysize_4() {
let bytes = vec![0x12, 0x34, 0x56, 0x78];
let location = Location::from_bytes(&bytes, 4).unwrap();
assert_eq!(location.file_nr, 0);
assert_eq!(location.position, 0x12345678);
}
#[test]
fn test_location_from_bytes_keysize_6() {
let bytes = vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78];
let location = Location::from_bytes(&bytes, 6).unwrap();
assert_eq!(location.file_nr, 0xABCD);
assert_eq!(location.position, 0x12345678);
}
#[test]
fn test_location_to_bytes() {
let location = Location {
file_nr: 0xABCD,
position: 0x12345678,
};
let bytes = location.to_bytes();
assert_eq!(bytes, vec![0xAB, 0xCD, 0x12, 0x34, 0x56, 0x78]);
}
#[test]
fn test_location_to_u64() {
let location = Location {
file_nr: 0xABCD,
position: 0x12345678,
};
let value = location.to_u64();
assert_eq!(value, 0xABCD_0000_0000 | 0x12345678);
}
}

View File

@@ -1,540 +0,0 @@
use std::fs::{self, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use crate::error::Error;
use crate::location::Location;
const DATA_FILE_NAME: &str = "data";
const INCREMENTAL_FILE_NAME: &str = ".inc";
/// Configuration for creating a new lookup table
pub struct LookupConfig {
/// Size of the lookup table
pub size: u32,
/// Size of each entry in bytes (2-6)
/// - 2: For databases with < 65,536 records (single file)
/// - 3: For databases with < 16,777,216 records (single file)
/// - 4: For databases with < 4,294,967,296 records (single file)
/// - 6: For large databases requiring multiple files
pub keysize: u8,
/// Path for disk-based lookup
pub lookuppath: String,
/// Whether to use incremental mode
pub incremental_mode: bool,
}
/// Lookup table maps keys to physical locations in the backend storage
pub struct LookupTable {
/// Size of each entry in bytes (2-6)
keysize: u8,
/// Path for disk-based lookup
lookuppath: String,
/// In-memory data for memory-based lookup
data: Vec<u8>,
/// Next empty slot if incremental mode is enabled
incremental: Option<u32>,
}
impl LookupTable {
/// Returns the keysize of this lookup table
pub fn keysize(&self) -> u8 {
self.keysize
}
/// Creates a new lookup table with the given configuration
pub fn new(config: LookupConfig) -> Result<Self, Error> {
// Verify keysize is valid
if ![2, 3, 4, 6].contains(&config.keysize) {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
config.keysize
)));
}
let incremental = if config.incremental_mode {
Some(get_incremental_info(&config)?)
} else {
None
};
if !config.lookuppath.is_empty() {
// Create directory if it doesn't exist
fs::create_dir_all(&config.lookuppath)?;
// For disk-based lookup, create empty file if it doesn't exist
let data_path = Path::new(&config.lookuppath).join(DATA_FILE_NAME);
if !data_path.exists() {
let data = vec![0u8; config.size as usize * config.keysize as usize];
fs::write(&data_path, &data)?;
}
Ok(LookupTable {
data: Vec::new(),
keysize: config.keysize,
lookuppath: config.lookuppath,
incremental,
})
} else {
// For memory-based lookup
Ok(LookupTable {
data: vec![0u8; config.size as usize * config.keysize as usize],
keysize: config.keysize,
lookuppath: String::new(),
incremental,
})
}
}
/// Gets a location for the given ID
pub fn get(&self, id: u32) -> Result<Location, Error> {
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// Disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
// Check file size first
let file_size = fs::metadata(&data_path)?.len();
let start_pos = id as u64 * entry_size as u64;
if start_pos + entry_size as u64 > file_size {
return Err(Error::LookupError(format!(
"Invalid read for get in lut: {}: {} would exceed file size {}",
self.lookuppath,
start_pos + entry_size as u64,
file_size
)));
}
// Read directly from file
let mut file = File::open(&data_path)?;
file.seek(SeekFrom::Start(start_pos))?;
let mut data = vec![0u8; entry_size];
let bytes_read = file.read(&mut data)?;
if bytes_read < entry_size {
return Err(Error::LookupError(format!(
"Incomplete read: expected {} bytes but got {}",
entry_size, bytes_read
)));
}
return Location::from_bytes(&data, self.keysize);
}
// Memory-based lookup
if (id * self.keysize as u32) as usize >= self.data.len() {
return Err(Error::LookupError("Index out of bounds".to_string()));
}
let start = (id * self.keysize as u32) as usize;
let end = start + entry_size;
Location::from_bytes(&self.data[start..end], self.keysize)
}
/// Sets a location for the given ID
pub fn set(&mut self, id: u32, location: Location) -> Result<(), Error> {
let entry_size = self.keysize as usize;
// Handle incremental mode
if let Some(incremental) = self.incremental {
if id == incremental {
self.increment_index()?;
}
if id > incremental {
return Err(Error::InvalidOperation(
"Cannot set ID for insertions when incremental mode is enabled".to_string(),
));
}
}
// Convert location to bytes based on keysize
let location_bytes = match self.keysize {
2 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=2".to_string(),
));
}
if location.position > 0xFFFF {
return Err(Error::InvalidOperation(
"position exceeds max value for keysize=2 (max 65535)".to_string(),
));
}
vec![(location.position >> 8) as u8, location.position as u8]
}
3 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=3".to_string(),
));
}
if location.position > 0xFFFFFF {
return Err(Error::InvalidOperation(
"position exceeds max value for keysize=3 (max 16777215)".to_string(),
));
}
vec![
(location.position >> 16) as u8,
(location.position >> 8) as u8,
location.position as u8,
]
}
4 => {
if location.file_nr != 0 {
return Err(Error::InvalidOperation(
"file_nr must be 0 for keysize=4".to_string(),
));
}
vec![
(location.position >> 24) as u8,
(location.position >> 16) as u8,
(location.position >> 8) as u8,
location.position as u8,
]
}
6 => {
// Full location with file_nr and position
location.to_bytes()
}
_ => {
return Err(Error::InvalidOperation(format!(
"Invalid keysize: {}",
self.keysize
)))
}
};
if !self.lookuppath.is_empty() {
// Disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = OpenOptions::new().write(true).open(data_path)?;
let start_pos = id as u64 * entry_size as u64;
file.seek(SeekFrom::Start(start_pos))?;
file.write_all(&location_bytes)?;
} else {
// Memory-based lookup
let start = (id * self.keysize as u32) as usize;
if start + entry_size > self.data.len() {
return Err(Error::LookupError("Index out of bounds".to_string()));
}
for (i, &byte) in location_bytes.iter().enumerate() {
self.data[start + i] = byte;
}
}
Ok(())
}
/// Deletes an entry for the given ID
pub fn delete(&mut self, id: u32) -> Result<(), Error> {
// Set location to all zeros
self.set(id, Location::default())
}
/// Gets the next available ID in incremental mode
pub fn get_next_id(&self) -> Result<u32, Error> {
let incremental = self.incremental.ok_or_else(|| {
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
})?;
let table_size = if !self.lookuppath.is_empty() {
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::metadata(data_path)?.len() as u32
} else {
self.data.len() as u32
};
if incremental * self.keysize as u32 >= table_size {
return Err(Error::LookupError("Lookup table is full".to_string()));
}
Ok(incremental)
}
/// Increments the index in incremental mode
pub fn increment_index(&mut self) -> Result<(), Error> {
let mut incremental = self.incremental.ok_or_else(|| {
Error::InvalidOperation("Lookup table not in incremental mode".to_string())
})?;
incremental += 1;
self.incremental = Some(incremental);
if !self.lookuppath.is_empty() {
let inc_path = Path::new(&self.lookuppath).join(INCREMENTAL_FILE_NAME);
fs::write(inc_path, incremental.to_string())?;
}
Ok(())
}
/// Exports the lookup table to a file
pub fn export_data(&self, path: &str) -> Result<(), Error> {
if !self.lookuppath.is_empty() {
// For disk-based lookup, just copy the file
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::copy(data_path, path)?;
} else {
// For memory-based lookup, write the data to file
fs::write(path, &self.data)?;
}
Ok(())
}
/// Imports the lookup table from a file
pub fn import_data(&mut self, path: &str) -> Result<(), Error> {
if !self.lookuppath.is_empty() {
// For disk-based lookup, copy the file
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
fs::copy(path, data_path)?;
} else {
// For memory-based lookup, read the data from file
self.data = fs::read(path)?;
}
Ok(())
}
/// Exports only non-zero entries to save space
pub fn export_sparse(&self, path: &str) -> Result<(), Error> {
let mut output = Vec::new();
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// For disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = File::open(&data_path)?;
let file_size = fs::metadata(&data_path)?.len();
let max_entries = file_size / entry_size as u64;
for id in 0..max_entries {
file.seek(SeekFrom::Start(id * entry_size as u64))?;
let mut buffer = vec![0u8; entry_size];
let bytes_read = file.read(&mut buffer)?;
if bytes_read < entry_size {
break;
}
// Check if entry is non-zero
if buffer.iter().any(|&b| b != 0) {
// Write ID (4 bytes) + entry
output.extend_from_slice(&(id as u32).to_be_bytes());
output.extend_from_slice(&buffer);
}
}
} else {
// For memory-based lookup
let max_entries = self.data.len() / entry_size;
for id in 0..max_entries {
let start = id * entry_size;
let entry = &self.data[start..start + entry_size];
// Check if entry is non-zero
if entry.iter().any(|&b| b != 0) {
// Write ID (4 bytes) + entry
output.extend_from_slice(&(id as u32).to_be_bytes());
output.extend_from_slice(entry);
}
}
}
// Write the output to file
fs::write(path, &output)?;
Ok(())
}
/// Imports sparse data (only non-zero entries)
pub fn import_sparse(&mut self, path: &str) -> Result<(), Error> {
let data = fs::read(path)?;
let entry_size = self.keysize as usize;
let record_size = 4 + entry_size; // ID (4 bytes) + entry
if data.len() % record_size != 0 {
return Err(Error::DataCorruption(
"Invalid sparse data format: size mismatch".to_string(),
));
}
for chunk_start in (0..data.len()).step_by(record_size) {
if chunk_start + record_size > data.len() {
break;
}
// Extract ID (4 bytes)
let id_bytes = &data[chunk_start..chunk_start + 4];
let id = u32::from_be_bytes([id_bytes[0], id_bytes[1], id_bytes[2], id_bytes[3]]);
// Extract entry
let entry = &data[chunk_start + 4..chunk_start + record_size];
// Create location from entry
let location = Location::from_bytes(entry, self.keysize)?;
// Set the entry
self.set(id, location)?;
}
Ok(())
}
/// Finds the highest ID with a non-zero entry
pub fn find_last_entry(&mut self) -> Result<u32, Error> {
let mut last_id = 0u32;
let entry_size = self.keysize as usize;
if !self.lookuppath.is_empty() {
// For disk-based lookup
let data_path = Path::new(&self.lookuppath).join(DATA_FILE_NAME);
let mut file = File::open(&data_path)?;
let file_size = fs::metadata(&data_path)?.len();
let mut buffer = vec![0u8; entry_size];
let mut pos = 0u32;
while (pos as u64 * entry_size as u64) < file_size {
file.seek(SeekFrom::Start(pos as u64 * entry_size as u64))?;
let bytes_read = file.read(&mut buffer)?;
if bytes_read == 0 || bytes_read < entry_size {
break;
}
let location = Location::from_bytes(&buffer, self.keysize)?;
if location.position != 0 || location.file_nr != 0 {
last_id = pos;
}
pos += 1;
}
} else {
// For memory-based lookup
for i in 0..(self.data.len() / entry_size) as u32 {
if let Ok(location) = self.get(i) {
if location.position != 0 || location.file_nr != 0 {
last_id = i;
}
}
}
}
Ok(last_id)
}
}
/// Helper function to get the incremental value
fn get_incremental_info(config: &LookupConfig) -> Result<u32, Error> {
if !config.incremental_mode {
return Ok(0);
}
if !config.lookuppath.is_empty() {
let inc_path = Path::new(&config.lookuppath).join(INCREMENTAL_FILE_NAME);
if !inc_path.exists() {
// Create a separate file for storing the incremental value
fs::write(&inc_path, "1")?;
}
let inc_str = fs::read_to_string(&inc_path)?;
let incremental = match inc_str.trim().parse::<u32>() {
Ok(val) => val,
Err(_) => {
// If the value is invalid, reset it to 1
fs::write(&inc_path, "1")?;
1
}
};
Ok(incremental)
} else {
// For memory-based lookup, start with 1
Ok(1)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
temp_dir().join(format!("ourdb_lookup_test_{}", timestamp))
}
#[test]
fn test_memory_lookup() {
let config = LookupConfig {
size: 1000,
keysize: 4,
lookuppath: String::new(),
incremental_mode: true,
};
let mut lookup = LookupTable::new(config).unwrap();
// Test set and get
let location = Location {
file_nr: 0,
position: 12345,
};
lookup.set(1, location).unwrap();
let retrieved = lookup.get(1).unwrap();
assert_eq!(retrieved.file_nr, location.file_nr);
assert_eq!(retrieved.position, location.position);
// Test incremental mode
let next_id = lookup.get_next_id().unwrap();
assert_eq!(next_id, 2);
lookup.increment_index().unwrap();
let next_id = lookup.get_next_id().unwrap();
assert_eq!(next_id, 3);
}
#[test]
fn test_disk_lookup() {
let temp_dir = get_temp_dir();
fs::create_dir_all(&temp_dir).unwrap();
let config = LookupConfig {
size: 1000,
keysize: 4,
lookuppath: temp_dir.to_string_lossy().to_string(),
incremental_mode: true,
};
let mut lookup = LookupTable::new(config).unwrap();
// Test set and get
let location = Location {
file_nr: 0,
position: 12345,
};
lookup.set(1, location).unwrap();
let retrieved = lookup.get(1).unwrap();
assert_eq!(retrieved.file_nr, location.file_nr);
assert_eq!(retrieved.position, location.position);
// Clean up
fs::remove_dir_all(temp_dir).unwrap();
}
}

View File

@@ -1,369 +0,0 @@
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use rand;
use std::env::temp_dir;
use std::fs;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
// Helper function to create a unique temporary directory for tests
fn get_temp_dir() -> PathBuf {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
let random_part = rand::random::<u32>();
let dir = temp_dir().join(format!("ourdb_test_{}_{}", timestamp, random_part));
// Ensure the directory exists and is empty
if dir.exists() {
std::fs::remove_dir_all(&dir).unwrap();
}
std::fs::create_dir_all(&dir).unwrap();
dir
}
#[test]
fn test_basic_operations() {
let temp_dir = get_temp_dir();
// Create a new database with incremental mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test set and get
let test_data = b"Hello, OurDB!";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Test update
let updated_data = b"Updated data";
db.set(OurDBSetArgs {
id: Some(id),
data: updated_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, updated_data);
// Test history
let history = db.get_history(id, 2).unwrap();
assert_eq!(history.len(), 2);
assert_eq!(history[0], updated_data);
assert_eq!(history[1], test_data);
// Test delete
db.delete(id).unwrap();
assert!(db.get(id).is_err());
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_key_value_mode() {
let temp_dir = get_temp_dir();
// Create a new database with key-value mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: false,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test set with explicit ID
let test_data = b"Key-value data";
let id = 42;
db.set(OurDBSetArgs {
id: Some(id),
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Verify next_id fails in key-value mode
assert!(db.get_next_id().is_err());
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_incremental_mode() {
let temp_dir = get_temp_dir();
// Create a new database with incremental mode
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test auto-increment IDs
let data1 = b"First record";
let id1 = db
.set(OurDBSetArgs {
id: None,
data: data1,
})
.unwrap();
let data2 = b"Second record";
let id2 = db
.set(OurDBSetArgs {
id: None,
data: data2,
})
.unwrap();
// IDs should be sequential
assert_eq!(id2, id1 + 1);
// Verify get_next_id works
let next_id = db.get_next_id().unwrap();
assert_eq!(next_id, id2 + 1);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_persistence() {
let temp_dir = get_temp_dir();
// Create data in a new database
{
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
let test_data = b"Persistent data";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
// Explicitly close the database
db.close().unwrap();
// ID should be 1 in a new database
assert_eq!(id, 1);
}
// Reopen the database and verify data persists
{
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Verify data is still there
let retrieved = db.get(1).unwrap();
assert_eq!(retrieved, b"Persistent data");
// Verify incremental counter persisted
let next_id = db.get_next_id().unwrap();
assert_eq!(next_id, 2);
// Clean up
db.destroy().unwrap();
}
}
#[test]
fn test_different_keysizes() {
for keysize in [2, 3, 4, 6].iter() {
let temp_dir = get_temp_dir();
// Ensure the directory exists
std::fs::create_dir_all(&temp_dir).unwrap();
// Create a new database with specified keysize
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: Some(*keysize),
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Test basic operations
let test_data = b"Keysize test data";
let id = db
.set(OurDBSetArgs {
id: None,
data: test_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved, test_data);
// Clean up
db.destroy().unwrap();
}
}
#[test]
fn test_large_data() {
let temp_dir = get_temp_dir();
// Create a new database
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Create a large data set (60KB - within the 64KB limit)
let large_data = vec![b'X'; 60 * 1024];
// Store and retrieve large data
let id = db
.set(OurDBSetArgs {
id: None,
data: &large_data,
})
.unwrap();
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved.len(), large_data.len());
assert_eq!(retrieved, large_data);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_exceed_size_limit() {
let temp_dir = get_temp_dir();
// Create a new database
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: None,
keysize: None,
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Create data larger than the 64KB limit (70KB)
let oversized_data = vec![b'X'; 70 * 1024];
// Attempt to store data that exceeds the size limit
let result = db.set(OurDBSetArgs {
id: None,
data: &oversized_data,
});
// Verify that an error is returned
assert!(
result.is_err(),
"Expected an error when storing data larger than 64KB"
);
// Clean up
db.destroy().unwrap();
}
#[test]
fn test_multiple_files() {
let temp_dir = get_temp_dir();
// Create a new database with small file size to force multiple files
let config = OurDBConfig {
path: temp_dir.clone(),
incremental_mode: true,
file_size: Some(1024), // Very small file size (1KB)
keysize: Some(6), // 6-byte keysize for multiple files
reset: None,
};
let mut db = OurDB::new(config).unwrap();
// Store enough data to span multiple files
let data_size = 500; // bytes per record
let test_data = vec![b'A'; data_size];
let mut ids = Vec::new();
for _ in 0..10 {
let id = db
.set(OurDBSetArgs {
id: None,
data: &test_data,
})
.unwrap();
ids.push(id);
}
// Verify all data can be retrieved
for &id in &ids {
let retrieved = db.get(id).unwrap();
assert_eq!(retrieved.len(), data_size);
}
// Verify multiple files were created
let files = fs::read_dir(&temp_dir)
.unwrap()
.filter_map(Result::ok)
.filter(|entry| {
let path = entry.path();
path.is_file() && path.extension().map_or(false, |ext| ext == "db")
})
.count();
assert!(
files > 1,
"Expected multiple database files, found {}",
files
);
// Clean up
db.destroy().unwrap();
}

View File

@@ -1,787 +0,0 @@
# RadixTree: Architecture for V to Rust Port
## 1. Overview
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This document outlines the architecture for porting the RadixTree module from its original V implementation to Rust, maintaining all existing functionality while leveraging Rust's memory safety, performance, and ecosystem.
The Rust implementation will integrate with the existing OurDB Rust implementation for persistent storage.
```mermaid
graph TD
A[Client Code] --> B[RadixTree API]
B --> C[Node Management]
B --> D[Serialization]
B --> E[Tree Operations]
C --> F[OurDB]
D --> F
E --> C
```
## 2. Current Architecture (V Implementation)
The current V implementation of RadixTree consists of the following components:
### 2.1 Core Data Structures
#### Node
```v
struct Node {
mut:
key_segment string // The segment of the key stored at this node
value []u8 // Value stored at this node (empty if not a leaf)
children []NodeRef // References to child nodes
is_leaf bool // Whether this node is a leaf node
}
```
#### NodeRef
```v
struct NodeRef {
mut:
key_part string // The key segment for this child
node_id u32 // Database ID of the node
}
```
#### RadixTree
```v
@[heap]
pub struct RadixTree {
mut:
db &ourdb.OurDB // Database for persistent storage
root_id u32 // Database ID of the root node
}
```
### 2.2 Key Operations
1. **new()**: Creates a new radix tree with a specified database path
2. **set(key, value)**: Sets a key-value pair in the tree
3. **get(key)**: Retrieves a value by key
4. **update(prefix, new_value)**: Updates the value at a given key prefix
5. **delete(key)**: Removes a key from the tree
6. **list(prefix)**: Lists all keys with a given prefix
7. **getall(prefix)**: Gets all values for keys with a given prefix
### 2.3 Serialization
The V implementation uses a custom binary serialization format for nodes:
- Version byte (1 byte)
- Key segment (string)
- Value length (2 bytes) followed by value bytes
- Children count (2 bytes) followed by children
- Is leaf flag (1 byte)
Each child is serialized as:
- Key part (string)
- Node ID (4 bytes)
### 2.4 Integration with OurDB
The RadixTree uses OurDB for persistent storage:
- Each node is serialized and stored as a record in OurDB
- Node references use OurDB record IDs
- The tree maintains a root node ID for traversal
## 3. Proposed Rust Architecture
The Rust implementation will maintain the same overall architecture while leveraging Rust's type system, ownership model, and error handling.
### 3.1 Core Data Structures
#### Node
```rust
pub struct Node {
key_segment: String,
value: Vec<u8>,
children: Vec<NodeRef>,
is_leaf: bool,
}
```
#### NodeRef
```rust
pub struct NodeRef {
key_part: String,
node_id: u32,
}
```
#### RadixTree
```rust
pub struct RadixTree {
db: ourdb::OurDB,
root_id: u32,
}
```
### 3.2 Public API
```rust
impl RadixTree {
/// Creates a new radix tree with the specified database path
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
// Implementation
}
/// Sets a key-value pair in the tree
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
// Implementation
}
/// Gets a value by key from the tree
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
// Implementation
}
/// Updates the value at a given key prefix
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
// Implementation
}
/// Deletes a key from the tree
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
// Implementation
}
/// Lists all keys with a given prefix
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
// Implementation
}
/// Gets all values for keys with a given prefix
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
// Implementation
}
}
```
### 3.3 Error Handling
```rust
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
#[error("Key not found: {0}")]
KeyNotFound(String),
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Deserialization error: {0}")]
Deserialization(String),
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
### 3.4 Serialization
The Rust implementation will maintain the same binary serialization format for compatibility:
```rust
const VERSION: u8 = 1;
impl Node {
/// Serializes a node to bytes for storage
fn serialize(&self) -> Vec<u8> {
// Implementation
}
/// Deserializes bytes to a node
fn deserialize(data: &[u8]) -> Result<Self, Error> {
// Implementation
}
}
```
### 3.5 Integration with OurDB
The Rust implementation will use the existing OurDB Rust implementation:
```rust
impl RadixTree {
fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
let data = self.db.get(node_id)?;
Node::deserialize(&data)
}
fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
let data = node.serialize();
let args = ourdb::OurDBSetArgs {
id: node_id,
data: &data,
};
Ok(self.db.set(args)?)
}
}
```
## 4. Implementation Strategy
### 4.1 Phase 1: Core Data Structures and Serialization
1. Implement the `Node` and `NodeRef` structs
2. Implement serialization and deserialization functions
3. Implement the `Error` enum for error handling
### 4.2 Phase 2: Basic Tree Operations
1. Implement the `RadixTree` struct with OurDB integration
2. Implement the `new()` function for creating a new tree
3. Implement the `get()` and `set()` functions for basic operations
### 4.3 Phase 3: Advanced Tree Operations
1. Implement the `delete()` function for removing keys
2. Implement the `update()` function for updating values
3. Implement the `list()` and `getall()` functions for prefix operations
### 4.4 Phase 4: Testing and Optimization
1. Port existing tests from V to Rust
2. Add new tests for Rust-specific functionality
3. Benchmark and optimize performance
4. Ensure compatibility with existing RadixTree data
## 5. Implementation Considerations
### 5.1 Memory Management
Leverage Rust's ownership model for safe and efficient memory management:
- Use `String` and `Vec<u8>` for data buffers instead of raw pointers
- Use references and borrows to avoid unnecessary copying
- Implement proper RAII for resource management
### 5.2 Error Handling
Use Rust's `Result` type for comprehensive error handling:
- Define custom error types for RadixTree-specific errors
- Propagate errors using the `?` operator
- Provide detailed error messages
- Implement proper error conversion using the `From` trait
### 5.3 Performance Optimizations
Identify opportunities for performance improvements:
- Use efficient string operations for prefix matching
- Minimize database operations by caching nodes when appropriate
- Use iterators for efficient traversal
- Consider using `Cow<str>` for string operations to avoid unnecessary cloning
### 5.4 Compatibility
Ensure compatibility with the V implementation:
- Maintain the same serialization format
- Ensure identical behavior for all operations
- Support reading existing RadixTree data
## 6. Testing Strategy
### 6.1 Unit Tests
Write comprehensive unit tests for each component:
- Test `Node` serialization/deserialization
- Test string operations (common prefix, etc.)
- Test error handling
### 6.2 Integration Tests
Write integration tests for the complete system:
- Test basic CRUD operations
- Test prefix operations
- Test edge cases (empty keys, very long keys, etc.)
- Test with large datasets
### 6.3 Compatibility Tests
Ensure compatibility with existing RadixTree data:
- Test reading existing V-created RadixTree data
- Test writing data that can be read by the V implementation
### 6.4 Performance Tests
Benchmark performance against the V implementation:
- Measure throughput for set/get operations
- Measure latency for different operations
- Test with different tree sizes and key distributions
## 7. Project Structure
```
radixtree/
├── Cargo.toml
├── src/
│ ├── lib.rs # Public API and re-exports
│ ├── node.rs # Node and NodeRef implementations
│ ├── serialize.rs # Serialization and deserialization
│ ├── error.rs # Error types
│ └── operations.rs # Tree operations implementation
├── tests/
│ ├── basic_test.rs # Basic operations tests
│ ├── prefix_test.rs # Prefix operations tests
│ └── edge_cases.rs # Edge case tests
└── examples/
├── basic.rs # Basic usage example
├── prefix.rs # Prefix operations example
└── performance.rs # Performance benchmark
```
## 8. Dependencies
The Rust implementation will use the following dependencies:
- `ourdb` for persistent storage
- `thiserror` for error handling
- `log` for logging
- `criterion` for benchmarking (dev dependency)
## 9. Compatibility Considerations
To ensure compatibility with the V implementation:
1. Maintain the same serialization format for nodes
2. Ensure identical behavior for all operations
3. Support reading existing RadixTree data
4. Maintain the same performance characteristics
## 10. Future Extensions
Potential future extensions to consider:
1. Async API for non-blocking operations
2. Iterator interface for efficient traversal
3. Batch operations for improved performance
4. Custom serialization formats for specific use cases
5. Compression support for values
6. Concurrency support for parallel operations
## 11. Conclusion
This architecture provides a roadmap for porting RadixTree from V to Rust while maintaining compatibility and leveraging Rust's strengths. The implementation will follow a phased approach, starting with core data structures and gradually building up to the complete system.
The Rust implementation aims to be:
- **Safe**: Leveraging Rust's ownership model for memory safety
- **Fast**: Maintaining or improving performance compared to V
- **Compatible**: Working with existing RadixTree data
- **Extensible**: Providing a foundation for future enhancements
- **Well-tested**: Including comprehensive test coverage
## 12. Implementation Files
### 12.1 Cargo.toml
```toml
[package]
name = "radixtree"
version = "0.1.0"
edition = "2021"
description = "A persistent radix tree implementation using OurDB for storage"
authors = ["OurWorld Team"]
[dependencies]
ourdb = { path = "../ourdb" }
thiserror = "1.0.40"
log = "0.4.17"
[dev-dependencies]
criterion = "0.5.1"
[[bench]]
name = "radixtree_benchmarks"
harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "prefix_operations"
path = "examples/prefix_operations.rs"
```
### 12.2 src/lib.rs
```rust
//! RadixTree is a space-optimized tree data structure that enables efficient string key operations
//! with persistent storage using OurDB as a backend.
//!
//! This implementation provides a persistent radix tree that can be used for efficient
//! prefix-based key operations, such as auto-complete, routing tables, and more.
mod error;
mod node;
mod operations;
mod serialize;
pub use error::Error;
pub use node::{Node, NodeRef};
use ourdb::{OurDB, OurDBConfig, OurDBSetArgs};
use std::path::PathBuf;
/// RadixTree represents a radix tree data structure with persistent storage.
pub struct RadixTree {
db: OurDB,
root_id: u32,
}
impl RadixTree {
/// Creates a new radix tree with the specified database path.
///
/// # Arguments
///
/// * `path` - The path to the database directory
/// * `reset` - Whether to reset the database if it exists
///
/// # Returns
///
/// A new `RadixTree` instance
///
/// # Errors
///
/// Returns an error if the database cannot be created or opened
pub fn new(path: &str, reset: bool) -> Result<Self, Error> {
// Implementation will go here
unimplemented!()
}
/// Sets a key-value pair in the tree.
///
/// # Arguments
///
/// * `key` - The key to set
/// * `value` - The value to set
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn set(&mut self, key: &str, value: Vec<u8>) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Gets a value by key from the tree.
///
/// # Arguments
///
/// * `key` - The key to get
///
/// # Returns
///
/// The value associated with the key
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn get(&mut self, key: &str) -> Result<Vec<u8>, Error> {
// Implementation will go here
unimplemented!()
}
/// Updates the value at a given key prefix.
///
/// # Arguments
///
/// * `prefix` - The key prefix to update
/// * `new_value` - The new value to set
///
/// # Errors
///
/// Returns an error if the prefix is not found or the operation fails
pub fn update(&mut self, prefix: &str, new_value: Vec<u8>) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Deletes a key from the tree.
///
/// # Arguments
///
/// * `key` - The key to delete
///
/// # Errors
///
/// Returns an error if the key is not found or the operation fails
pub fn delete(&mut self, key: &str) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Lists all keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn list(&mut self, prefix: &str) -> Result<Vec<String>, Error> {
// Implementation will go here
unimplemented!()
}
/// Gets all values for keys with a given prefix.
///
/// # Arguments
///
/// * `prefix` - The prefix to search for
///
/// # Returns
///
/// A list of values for keys that start with the given prefix
///
/// # Errors
///
/// Returns an error if the operation fails
pub fn getall(&mut self, prefix: &str) -> Result<Vec<Vec<u8>>, Error> {
// Implementation will go here
unimplemented!()
}
}
```
### 12.3 src/error.rs
```rust
//! Error types for the RadixTree module.
use thiserror::Error;
/// Error type for RadixTree operations.
#[derive(Debug, Error)]
pub enum Error {
/// Error from OurDB operations.
#[error("OurDB error: {0}")]
OurDB(#[from] ourdb::Error),
/// Error when a key is not found.
#[error("Key not found: {0}")]
KeyNotFound(String),
/// Error when a prefix is not found.
#[error("Prefix not found: {0}")]
PrefixNotFound(String),
/// Error during serialization.
#[error("Serialization error: {0}")]
Serialization(String),
/// Error during deserialization.
#[error("Deserialization error: {0}")]
Deserialization(String),
/// Error for invalid operations.
#[error("Invalid operation: {0}")]
InvalidOperation(String),
}
```
### 12.4 src/node.rs
```rust
//! Node types for the RadixTree module.
/// Represents a node in the radix tree.
pub struct Node {
/// The segment of the key stored at this node.
pub key_segment: String,
/// Value stored at this node (empty if not a leaf).
pub value: Vec<u8>,
/// References to child nodes.
pub children: Vec<NodeRef>,
/// Whether this node is a leaf node.
pub is_leaf: bool,
}
/// Reference to a node in the database.
pub struct NodeRef {
/// The key segment for this child.
pub key_part: String,
/// Database ID of the node.
pub node_id: u32,
}
impl Node {
/// Creates a new node.
pub fn new(key_segment: String, value: Vec<u8>, is_leaf: bool) -> Self {
Self {
key_segment,
value,
children: Vec::new(),
is_leaf,
}
}
/// Creates a new root node.
pub fn new_root() -> Self {
Self {
key_segment: String::new(),
value: Vec::new(),
children: Vec::new(),
is_leaf: false,
}
}
}
impl NodeRef {
/// Creates a new node reference.
pub fn new(key_part: String, node_id: u32) -> Self {
Self {
key_part,
node_id,
}
}
}
```
### 12.5 src/serialize.rs
```rust
//! Serialization and deserialization for RadixTree nodes.
use crate::error::Error;
use crate::node::{Node, NodeRef};
/// Current binary format version.
const VERSION: u8 = 1;
impl Node {
/// Serializes a node to bytes for storage.
pub fn serialize(&self) -> Vec<u8> {
// Implementation will go here
unimplemented!()
}
/// Deserializes bytes to a node.
pub fn deserialize(data: &[u8]) -> Result<Self, Error> {
// Implementation will go here
unimplemented!()
}
}
```
### 12.6 src/operations.rs
```rust
//! Implementation of RadixTree operations.
use crate::error::Error;
use crate::node::{Node, NodeRef};
use crate::RadixTree;
impl RadixTree {
/// Helper function to get a node from the database.
pub(crate) fn get_node(&mut self, node_id: u32) -> Result<Node, Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to save a node to the database.
pub(crate) fn save_node(&mut self, node_id: Option<u32>, node: &Node) -> Result<u32, Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to find all keys with a given prefix.
fn find_keys_with_prefix(
&mut self,
node_id: u32,
current_path: &str,
prefix: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to recursively collect all keys under a node.
fn collect_all_keys(
&mut self,
node_id: u32,
current_path: &str,
result: &mut Vec<String>,
) -> Result<(), Error> {
// Implementation will go here
unimplemented!()
}
/// Helper function to get the common prefix of two strings.
fn get_common_prefix(a: &str, b: &str) -> String {
// Implementation will go here
unimplemented!()
}
}
```
### 12.7 examples/basic_usage.rs
```rust
//! Basic usage example for RadixTree.
use radixtree::RadixTree;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Store some data
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
tree.set("helicopter", b"flying".to_vec())?;
// Retrieve and print the data
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value));
// List keys with prefix
let keys = tree.list("hel")?;
println!("Keys with prefix 'hel': {:?}", keys);
// Get all values with prefix
let values = tree.getall("hel")?;
println!("Values with prefix 'hel':");
for (i, value) in values.iter().enumerate() {
println!(" {}: {}", i, String::from_utf8_lossy(value));
}
// Delete a key
tree.delete("help")?;
println!("Deleted 'help'");
// Verify deletion
let keys_after = tree.list("hel")?;
println!("Keys with prefix 'hel' after deletion: {:?}", keys_after);
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}
```

815
radixtree/Cargo.lock generated
View File

@@ -1,815 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bitflags"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_lex"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "errno"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hermit-abi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
[[package]]
name = "is-terminal"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.171"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
[[package]]
name = "linux-raw-sys"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "radixtree"
version = "0.1.0"
dependencies = [
"criterion",
"log",
"ourdb",
"tempfile",
"thiserror",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.15",
]
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustix"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "rustversion"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "syn"
version = "2.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
dependencies = [
"fastrand",
"getrandom 0.3.2",
"once_cell",
"rustix",
"windows-sys",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -1,27 +0,0 @@
[package]
name = "radixtree"
version = "0.1.0"
edition = "2021"
description = "A persistent radix tree implementation using OurDB for storage"
authors = ["OurWorld Team"]
[dependencies]
ourdb = { path = "../ourdb" }
thiserror = "1.0.40"
log = "0.4.17"
[dev-dependencies]
criterion = "0.5.1"
tempfile = "3.8.0"
[[bench]]
name = "radixtree_benchmarks"
harness = false
[[example]]
name = "basic_usage"
path = "examples/basic_usage.rs"
[[example]]
name = "prefix_operations"
path = "examples/prefix_operations.rs"

View File

@@ -1,265 +0,0 @@
# Migration Guide: V to Rust RadixTree
This document provides guidance for migrating from the V implementation of RadixTree to the Rust implementation.
## API Changes
The Rust implementation maintains API compatibility with the V implementation, but with some idiomatic Rust changes:
### V API
```v
// Create a new radix tree
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)!
// Set a key-value pair
rt.set('test', 'value1'.bytes())!
// Get a value by key
value := rt.get('test')!
// Update a value at a prefix
rt.update('prefix', 'new_value'.bytes())!
// Delete a key
rt.delete('test')!
// List keys with a prefix
keys := rt.list('prefix')!
// Get all values with a prefix
values := rt.getall('prefix')!
```
### Rust API
```rust
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radixtree_test", true)?;
// Set a key-value pair
tree.set("test", b"value1".to_vec())?;
// Get a value by key
let value = tree.get("test")?;
// Update a value at a prefix
tree.update("prefix", b"new_value".to_vec())?;
// Delete a key
tree.delete("test")?;
// List keys with a prefix
let keys = tree.list("prefix")?;
// Get all values with a prefix
let values = tree.getall("prefix")?;
```
## Key Differences
1. **Error Handling**: The Rust implementation uses Rust's `Result` type for error handling, while the V implementation uses V's `!` operator.
2. **String Handling**: The Rust implementation uses Rust's `&str` for string parameters and `String` for string return values, while the V implementation uses V's `string` type.
3. **Binary Data**: The Rust implementation uses Rust's `Vec<u8>` for binary data, while the V implementation uses V's `[]u8` type.
4. **Constructor**: The Rust implementation uses a constructor function with separate parameters, while the V implementation uses a struct with named parameters.
5. **Ownership**: The Rust implementation follows Rust's ownership model, requiring mutable references for methods that modify the tree.
## Data Compatibility
The Rust implementation maintains data compatibility with the V implementation:
- The same serialization format is used for nodes
- The same OurDB storage format is used
- Existing RadixTree data created with the V implementation can be read by the Rust implementation
## Migration Steps
1. **Update Dependencies**: Replace the V RadixTree dependency with the Rust RadixTree dependency in your project.
2. **Update Import Statements**: Replace V import statements with Rust use statements.
```v
// V
import freeflowuniverse.herolib.data.radixtree
```
```rust
// Rust
use radixtree::RadixTree;
```
3. **Update Constructor Calls**: Replace V constructor calls with Rust constructor calls.
```v
// V
mut rt := radixtree.new(path: '/path/to/db', reset: false)!
```
```rust
// Rust
let mut tree = RadixTree::new("/path/to/db", false)?;
```
4. **Update Method Calls**: Replace V method calls with Rust method calls.
```v
// V
rt.set('key', 'value'.bytes())!
```
```rust
// Rust
tree.set("key", b"value".to_vec())?;
```
5. **Update Error Handling**: Replace V error handling with Rust error handling.
```v
// V
if value := rt.get('key') {
println('Found: ${value.bytestr()}')
} else {
println('Error: ${err}')
}
```
```rust
// Rust
match tree.get("key") {
Ok(value) => println!("Found: {}", String::from_utf8_lossy(&value)),
Err(e) => println!("Error: {}", e),
}
```
6. **Update String Conversions**: Replace V string conversions with Rust string conversions.
```v
// V
value.bytestr() // Convert []u8 to string
```
```rust
// Rust
String::from_utf8_lossy(&value) // Convert Vec<u8> to string
```
## Example Migration
### V Code
```v
module main
import freeflowuniverse.herolib.data.radixtree
fn main() {
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true) or {
println('Error creating RadixTree: ${err}')
return
}
rt.set('hello', 'world'.bytes()) or {
println('Error setting key: ${err}')
return
}
rt.set('help', 'me'.bytes()) or {
println('Error setting key: ${err}')
return
}
if value := rt.get('hello') {
println('hello: ${value.bytestr()}')
} else {
println('Error getting key: ${err}')
return
}
keys := rt.list('hel') or {
println('Error listing keys: ${err}')
return
}
println('Keys with prefix "hel": ${keys}')
values := rt.getall('hel') or {
println('Error getting all values: ${err}')
return
}
println('Values with prefix "hel":')
for i, value in values {
println(' ${i}: ${value.bytestr()}')
}
rt.delete('help') or {
println('Error deleting key: ${err}')
return
}
println('Deleted "help"')
}
```
### Rust Code
```rust
use radixtree::RadixTree;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut tree = RadixTree::new("/tmp/radixtree_test", true)
.map_err(|e| format!("Error creating RadixTree: {}", e))?;
tree.set("hello", b"world".to_vec())
.map_err(|e| format!("Error setting key: {}", e))?;
tree.set("help", b"me".to_vec())
.map_err(|e| format!("Error setting key: {}", e))?;
let value = tree.get("hello")
.map_err(|e| format!("Error getting key: {}", e))?;
println!("hello: {}", String::from_utf8_lossy(&value));
let keys = tree.list("hel")
.map_err(|e| format!("Error listing keys: {}", e))?;
println!("Keys with prefix \"hel\": {:?}", keys);
let values = tree.getall("hel")
.map_err(|e| format!("Error getting all values: {}", e))?;
println!("Values with prefix \"hel\":");
for (i, value) in values.iter().enumerate() {
println!(" {}: {}", i, String::from_utf8_lossy(value));
}
tree.delete("help")
.map_err(|e| format!("Error deleting key: {}", e))?;
println!("Deleted \"help\"");
Ok(())
}
```
## Performance Considerations
The Rust implementation should provide similar or better performance compared to the V implementation. However, there are some considerations:
1. **Memory Usage**: The Rust implementation may have different memory usage patterns due to Rust's ownership model.
2. **Error Handling**: The Rust implementation uses Rust's `Result` type, which may have different performance characteristics compared to V's error handling.
3. **String Handling**: The Rust implementation uses Rust's string types, which may have different performance characteristics compared to V's string types.
## Troubleshooting
If you encounter issues during migration, check the following:
1. **Data Compatibility**: Ensure that the data format is compatible between the V and Rust implementations.
2. **API Usage**: Ensure that you're using the correct API for the Rust implementation.
3. **Error Handling**: Ensure that you're handling errors correctly in the Rust implementation.
4. **String Encoding**: Ensure that string encoding is consistent between the V and Rust implementations.
If you encounter any issues that are not covered in this guide, please report them to the project maintainers.

View File

@@ -1,189 +0,0 @@
# RadixTree
A persistent radix tree implementation in Rust using OurDB for storage.
## Overview
RadixTree is a space-optimized tree data structure that enables efficient string key operations with persistent storage. This implementation provides a persistent radix tree that can be used for efficient prefix-based key operations, such as auto-complete, routing tables, and more.
A radix tree (also known as a patricia trie or radix trie) is a space-optimized tree data structure that enables efficient string key operations. Unlike a standard trie where each node represents a single character, a radix tree compresses paths by allowing nodes to represent multiple characters (key segments).
Key characteristics:
- Each node stores a segment of a key (not just a single character)
- Nodes can have multiple children, each representing a different branch
- Leaf nodes contain the actual values
- Optimizes storage by compressing common prefixes
## Features
- Efficient prefix-based key operations
- Persistent storage using OurDB backend
- Memory-efficient storage of strings with common prefixes
- Support for binary values
- Thread-safe operations through OurDB
## Usage
Add the dependency to your `Cargo.toml`:
```toml
[dependencies]
radixtree = { path = "../radixtree" }
```
### Basic Example
```rust
use radixtree::RadixTree;
fn main() -> Result<(), radixtree::Error> {
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radix", false)?;
// Set key-value pairs
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
// Get values by key
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value)); // Prints: world
// List keys by prefix
let keys = tree.list("hel")?; // Returns ["hello", "help"]
println!("Keys with prefix 'hel': {:?}", keys);
// Get all values by prefix
let values = tree.getall("hel")?; // Returns [b"world", b"me"]
// Delete keys
tree.delete("help")?;
Ok(())
}
```
## API
### Creating a RadixTree
```rust
// Create a new radix tree
let mut tree = RadixTree::new("/tmp/radix", false)?;
// Create a new radix tree and reset if it exists
let mut tree = RadixTree::new("/tmp/radix", true)?;
```
### Setting Values
```rust
// Set a key-value pair
tree.set("key", b"value".to_vec())?;
```
### Getting Values
```rust
// Get a value by key
let value = tree.get("key")?;
```
### Updating Values
```rust
// Update a value at a given prefix
tree.update("prefix", b"new_value".to_vec())?;
```
### Deleting Keys
```rust
// Delete a key
tree.delete("key")?;
```
### Listing Keys by Prefix
```rust
// List all keys with a given prefix
let keys = tree.list("prefix")?;
```
### Getting All Values by Prefix
```rust
// Get all values for keys with a given prefix
let values = tree.getall("prefix")?;
```
## Performance Characteristics
- Search: O(k) where k is the key length
- Insert: O(k) for new keys, may require node splitting
- Delete: O(k) plus potential node cleanup
- Space: O(n) where n is the total length of all keys
## Use Cases
RadixTree is particularly useful for:
- Prefix-based searching
- IP routing tables
- Dictionary implementations
- Auto-complete systems
- File system paths
- Any application requiring efficient string key operations with persistence
## Implementation Details
The RadixTree implementation uses OurDB for persistent storage:
- Each node is serialized and stored as a record in OurDB
- Node references use OurDB record IDs
- The tree maintains a root node ID for traversal
- Node serialization includes version tracking for format evolution
For more detailed information about the implementation, see the [ARCHITECTURE.md](./ARCHITECTURE.md) file.
## Running Tests
The project includes a comprehensive test suite that verifies all functionality:
```bash
# Run all tests
cargo test
# Run specific test file
cargo test --test basic_test
cargo test --test prefix_test
cargo test --test getall_test
cargo test --test serialize_test
```
## Running Examples
The project includes example applications that demonstrate how to use the RadixTree:
```bash
# Run the basic usage example
cargo run --example basic_usage
# Run the prefix operations example
cargo run --example prefix_operations
```
## Benchmarking
The project includes benchmarks to measure performance:
```bash
# Run all benchmarks
cargo bench
# Run specific benchmark
cargo bench -- set
cargo bench -- get
cargo bench -- prefix_operations
```
## License
This project is licensed under the same license as the HeroCode project.

View File

@@ -1,141 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use radixtree::RadixTree;
use std::path::PathBuf;
use tempfile::tempdir;
fn criterion_benchmark(c: &mut Criterion) {
// Create a temporary directory for benchmarks
let temp_dir = tempdir().expect("Failed to create temp directory");
let db_path = temp_dir.path().to_str().unwrap();
// Benchmark set operation
c.bench_function("set", |b| {
let mut tree = RadixTree::new(db_path, true).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i);
let value = format!("benchmark_value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
i += 1;
});
});
// Setup tree with data for get/list/delete benchmarks
let mut setup_tree = RadixTree::new(db_path, true).unwrap();
for i in 0..1000 {
let key = format!("benchmark_key_{}", i);
let value = format!("benchmark_value_{}", i).into_bytes();
setup_tree.set(&key, value).unwrap();
}
// Benchmark get operation
c.bench_function("get", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i % 1000);
let _value = tree.get(&key).unwrap();
i += 1;
});
});
// Benchmark list operation
c.bench_function("list", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
b.iter(|| {
let _keys = tree.list("benchmark_key_1").unwrap();
});
});
// Benchmark getall operation
c.bench_function("getall", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
b.iter(|| {
let _values = tree.getall("benchmark_key_1").unwrap();
});
});
// Benchmark update operation
c.bench_function("update", |b| {
let mut tree = RadixTree::new(db_path, false).unwrap();
let mut i = 0;
b.iter(|| {
let key = format!("benchmark_key_{}", i % 1000);
let new_value = format!("updated_value_{}", i).into_bytes();
tree.update(&key, new_value).unwrap();
i += 1;
});
});
// Benchmark delete operation
c.bench_function("delete", |b| {
// Create a fresh tree for deletion benchmarks
let delete_dir = tempdir().expect("Failed to create temp directory");
let delete_path = delete_dir.path().to_str().unwrap();
let mut tree = RadixTree::new(delete_path, true).unwrap();
// Setup keys to delete
for i in 0..1000 {
let key = format!("delete_key_{}", i);
let value = format!("delete_value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
}
let mut i = 0;
b.iter(|| {
let key = format!("delete_key_{}", i % 1000);
// Only try to delete if it exists
if tree.get(&key).is_ok() {
tree.delete(&key).unwrap();
}
i += 1;
});
});
// Benchmark prefix operations with varying tree sizes
let mut group = c.benchmark_group("prefix_operations");
for &size in &[100, 1000, 10000] {
// Create a fresh tree for each size
let size_dir = tempdir().expect("Failed to create temp directory");
let size_path = size_dir.path().to_str().unwrap();
let mut tree = RadixTree::new(size_path, true).unwrap();
// Insert data with common prefixes
for i in 0..size {
let prefix = match i % 5 {
0 => "user",
1 => "post",
2 => "comment",
3 => "product",
_ => "category",
};
let key = format!("{}_{}", prefix, i);
let value = format!("value_{}", i).into_bytes();
tree.set(&key, value).unwrap();
}
// Benchmark list operation for this size
group.bench_function(format!("list_size_{}", size), |b| {
b.iter(|| {
for prefix in &["user", "post", "comment", "product", "category"] {
let _keys = tree.list(prefix).unwrap();
}
});
});
// Benchmark getall operation for this size
group.bench_function(format!("getall_size_{}", size), |b| {
b.iter(|| {
for prefix in &["user", "post", "comment", "product", "category"] {
let _values = tree.getall(prefix).unwrap();
}
});
});
}
group.finish();
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -1,51 +0,0 @@
use radixtree::RadixTree;
use std::path::PathBuf;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_example");
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Store some data
println!("Storing data...");
tree.set("hello", b"world".to_vec())?;
tree.set("help", b"me".to_vec())?;
tree.set("helicopter", b"flying".to_vec())?;
// Retrieve and print the data
let value = tree.get("hello")?;
println!("hello: {}", String::from_utf8_lossy(&value));
// Update a value
println!("Updating value...");
tree.update("hello", b"updated world".to_vec())?;
// Retrieve the updated value
let updated_value = tree.get("hello")?;
println!("hello (updated): {}", String::from_utf8_lossy(&updated_value));
// Delete a key
println!("Deleting 'help'...");
tree.delete("help")?;
// Try to retrieve the deleted key (should fail)
match tree.get("help") {
Ok(value) => println!("Unexpected: help still exists with value: {}", String::from_utf8_lossy(&value)),
Err(e) => println!("As expected, help was deleted: {}", e),
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("Cleaned up database directory");
} else {
println!("Database kept at: {}", db_path.display());
}
Ok(())
}

View File

@@ -1,121 +0,0 @@
use radixtree::RadixTree;
use std::time::{Duration, Instant};
use std::io::{self, Write};
// Use much smaller batches to avoid hitting OurDB's size limit
const BATCH_SIZE: usize = 1_000;
const NUM_BATCHES: usize = 1_000; // Total records: 1,000,000
const PROGRESS_INTERVAL: usize = 100;
fn main() -> Result<(), radixtree::Error> {
// Overall metrics
let total_start_time = Instant::now();
let mut total_records_inserted = 0;
let mut batch_times = Vec::with_capacity(NUM_BATCHES);
println!("Will insert up to {} records in batches of {}",
BATCH_SIZE * NUM_BATCHES, BATCH_SIZE);
// Process in batches to avoid OurDB size limits
for batch in 0..NUM_BATCHES {
// Create a new database for each batch
let batch_path = std::env::temp_dir().join(format!("radixtree_batch_{}", batch));
// Clean up any existing database
if batch_path.exists() {
std::fs::remove_dir_all(&batch_path)?;
}
std::fs::create_dir_all(&batch_path)?;
println!("\nBatch {}/{}: Creating new radix tree...", batch + 1, NUM_BATCHES);
let mut tree = RadixTree::new(batch_path.to_str().unwrap(), true)?;
let batch_start_time = Instant::now();
let mut last_progress_time = Instant::now();
let mut last_progress_count = 0;
// Insert records for this batch
for i in 0..BATCH_SIZE {
let global_index = batch * BATCH_SIZE + i;
let key = format!("key:{:08}", global_index);
let value = format!("val{}", global_index).into_bytes();
tree.set(&key, value)?;
// Show progress at intervals
if (i + 1) % PROGRESS_INTERVAL == 0 || i == BATCH_SIZE - 1 {
let records_since_last = i + 1 - last_progress_count;
let time_since_last = last_progress_time.elapsed();
let records_per_second = records_since_last as f64 / time_since_last.as_secs_f64();
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
i + 1, BATCH_SIZE,
(i + 1) as f64 / BATCH_SIZE as f64 * 100.0,
records_per_second);
io::stdout().flush().unwrap();
last_progress_time = Instant::now();
last_progress_count = i + 1;
}
}
let batch_duration = batch_start_time.elapsed();
batch_times.push(batch_duration);
total_records_inserted += BATCH_SIZE;
println!("\nBatch {}/{} completed in {:?} ({:.2} records/sec)",
batch + 1, NUM_BATCHES,
batch_duration,
BATCH_SIZE as f64 / batch_duration.as_secs_f64());
// Test random access performance for this batch
println!("Testing access performance for batch {}...", batch + 1);
let mut total_get_time = Duration::new(0, 0);
let num_samples = 100;
// Use a simple distribution pattern
for i in 0..num_samples {
// Distribute samples across the batch
let sample_id = batch * BATCH_SIZE + (i * (BATCH_SIZE / num_samples));
let key = format!("key:{:08}", sample_id);
let get_start = Instant::now();
let _ = tree.get(&key)?;
total_get_time += get_start.elapsed();
}
println!("Average time to retrieve a record: {:?}",
total_get_time / num_samples as u32);
// Test prefix search performance
println!("Testing prefix search performance...");
let prefix = format!("key:{:02}", batch % 100);
let list_start = Instant::now();
let keys = tree.list(&prefix)?;
let list_duration = list_start.elapsed();
println!("Found {} keys with prefix '{}' in {:?}",
keys.len(), prefix, list_duration);
}
// Overall performance summary
let total_duration = total_start_time.elapsed();
println!("\n\nPerformance Summary:");
println!("Total time to insert {} records: {:?}", total_records_inserted, total_duration);
println!("Average insertion rate: {:.2} records/second",
total_records_inserted as f64 / total_duration.as_secs_f64());
// Show performance trend
println!("\nPerformance Trend (batch number vs. time):");
for (i, duration) in batch_times.iter().enumerate() {
if i % 10 == 0 || i == batch_times.len() - 1 { // Only show every 10th point
println!(" Batch {}: {:?} ({:.2} records/sec)",
i + 1,
duration,
BATCH_SIZE as f64 / duration.as_secs_f64());
}
}
Ok(())
}

View File

@@ -1,134 +0,0 @@
use radixtree::RadixTree;
use std::time::{Duration, Instant};
use std::io::{self, Write};
// Number of records to insert
const TOTAL_RECORDS: usize = 1_000_000;
// How often to report progress (every X records)
const PROGRESS_INTERVAL: usize = 10_000;
// How many records to use for performance sampling
const PERFORMANCE_SAMPLE_SIZE: usize = 1000;
fn main() -> Result<(), radixtree::Error> {
// Create a temporary directory for the database
let db_path = std::env::temp_dir().join("radixtree_performance_test");
// Completely remove and recreate the directory to ensure a clean start
if db_path.exists() {
std::fs::remove_dir_all(&db_path)?;
}
std::fs::create_dir_all(&db_path)?;
println!("Creating radix tree at: {}", db_path.display());
println!("Will insert {} records and show progress...", TOTAL_RECORDS);
// Create a new radix tree
let mut tree = RadixTree::new(db_path.to_str().unwrap(), true)?;
// Track overall time
let start_time = Instant::now();
// Track performance metrics
let mut insertion_times = Vec::with_capacity(TOTAL_RECORDS / PROGRESS_INTERVAL);
let mut last_batch_time = Instant::now();
let mut last_batch_records = 0;
// Insert records and track progress
for i in 0..TOTAL_RECORDS {
let key = format!("key:{:08}", i);
// Use smaller values to avoid exceeding OurDB's size limit
let value = format!("val{}", i).into_bytes();
// Time the insertion of every Nth record for performance sampling
if i % PERFORMANCE_SAMPLE_SIZE == 0 {
let insert_start = Instant::now();
tree.set(&key, value)?;
let insert_duration = insert_start.elapsed();
// Only print detailed timing for specific samples to avoid flooding output
if i % (PERFORMANCE_SAMPLE_SIZE * 10) == 0 {
println!("Record {}: Insertion took {:?}", i, insert_duration);
}
} else {
tree.set(&key, value)?;
}
// Show progress at intervals
if (i + 1) % PROGRESS_INTERVAL == 0 || i == TOTAL_RECORDS - 1 {
let records_in_batch = i + 1 - last_batch_records;
let batch_duration = last_batch_time.elapsed();
let records_per_second = records_in_batch as f64 / batch_duration.as_secs_f64();
insertion_times.push((i + 1, batch_duration));
print!("\rProgress: {}/{} records ({:.2}%) - {:.2} records/sec",
i + 1, TOTAL_RECORDS,
(i + 1) as f64 / TOTAL_RECORDS as f64 * 100.0,
records_per_second);
io::stdout().flush().unwrap();
last_batch_time = Instant::now();
last_batch_records = i + 1;
}
}
let total_duration = start_time.elapsed();
println!("\n\nPerformance Summary:");
println!("Total time to insert {} records: {:?}", TOTAL_RECORDS, total_duration);
println!("Average insertion rate: {:.2} records/second",
TOTAL_RECORDS as f64 / total_duration.as_secs_f64());
// Show performance trend
println!("\nPerformance Trend (records inserted vs. time per batch):");
for (i, (record_count, duration)) in insertion_times.iter().enumerate() {
if i % 10 == 0 || i == insertion_times.len() - 1 { // Only show every 10th point to avoid too much output
println!(" After {} records: {:?} for {} records ({:.2} records/sec)",
record_count,
duration,
PROGRESS_INTERVAL,
PROGRESS_INTERVAL as f64 / duration.as_secs_f64());
}
}
// Test access performance with distributed samples
println!("\nTesting access performance with distributed samples...");
let mut total_get_time = Duration::new(0, 0);
let num_samples = 1000;
// Use a simple distribution pattern instead of random
for i in 0..num_samples {
// Distribute samples across the entire range
let sample_id = (i * (TOTAL_RECORDS / num_samples)) % TOTAL_RECORDS;
let key = format!("key:{:08}", sample_id);
let get_start = Instant::now();
let _ = tree.get(&key)?;
total_get_time += get_start.elapsed();
}
println!("Average time to retrieve a record: {:?}",
total_get_time / num_samples as u32);
// Test prefix search performance
println!("\nTesting prefix search performance...");
let prefixes = ["key:0", "key:1", "key:5", "key:9"];
for prefix in &prefixes {
let list_start = Instant::now();
let keys = tree.list(prefix)?;
let list_duration = list_start.elapsed();
println!("Found {} keys with prefix '{}' in {:?}",
keys.len(), prefix, list_duration);
}
// Clean up (optional)
if std::env::var("KEEP_DB").is_err() {
std::fs::remove_dir_all(&db_path)?;
println!("\nCleaned up database directory");
} else {
println!("\nDatabase kept at: {}", db_path.display());
}
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More