1 Commits

Author SHA1 Message Date
Timur Gordon
6569e819ae marketplace models wip 2025-08-21 14:07:14 +02:00
171 changed files with 10175 additions and 4922 deletions

114
Cargo.lock generated
View File

@@ -65,13 +65,13 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "async-trait"
version = "0.1.89"
version = "0.1.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -123,9 +123,9 @@ dependencies = [
[[package]]
name = "bitflags"
version = "2.9.2"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bitvec"
@@ -168,7 +168,7 @@ dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -213,18 +213,18 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "cc"
version = "1.2.33"
version = "1.2.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f"
checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.3"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
[[package]]
name = "cfg_aliases"
@@ -372,7 +372,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -452,9 +452,9 @@ dependencies = [
[[package]]
name = "hashbrown"
version = "0.15.5"
version = "0.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
[[package]]
name = "heck"
@@ -491,12 +491,10 @@ dependencies = [
name = "heromodels-derive"
version = "0.1.0"
dependencies = [
"heromodels_core",
"proc-macro2",
"quote",
"serde",
"serde_json",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -547,7 +545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
dependencies = [
"equivalent",
"hashbrown 0.15.5",
"hashbrown 0.15.4",
]
[[package]]
@@ -599,7 +597,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -629,9 +627,9 @@ dependencies = [
[[package]]
name = "jsonb"
version = "0.5.4"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a452366d21e8d3cbca680c41388e01d6a88739afef7877961946a6da409f9ccd"
checksum = "96cbb4fba292867a2d86ed83dbe5f9d036f423bf6a491b7d884058b2fde42fcd"
dependencies = [
"byteorder",
"ethnum",
@@ -649,20 +647,9 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.175"
version = "0.2.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libredox"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
dependencies = [
"bitflags",
"libc",
"redox_syscall",
]
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
[[package]]
name = "lock_api"
@@ -773,7 +760,6 @@ dependencies = [
[[package]]
name = "ourdb"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"crc32fast",
"log",
@@ -806,9 +792,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
version = "2.3.2"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "phf"
@@ -920,9 +906,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.101"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
@@ -1093,13 +1079,12 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
name = "rhailib-macros"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"rhai",
"serde",
@@ -1158,9 +1143,9 @@ checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
[[package]]
name = "rustversion"
version = "1.0.22"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
[[package]]
name = "ryu"
@@ -1206,14 +1191,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
name = "serde_json"
version = "1.0.143"
version = "1.0.142"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
dependencies = [
"indexmap",
"itoa",
@@ -1262,9 +1247,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "slab"
version = "0.4.11"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
[[package]]
name = "smallvec"
@@ -1342,7 +1327,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -1364,9 +1349,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.106"
version = "2.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
dependencies = [
"proc-macro2",
"quote",
@@ -1402,7 +1387,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -1416,9 +1401,9 @@ dependencies = [
[[package]]
name = "tinyvec"
version = "1.10.0"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa"
checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71"
dependencies = [
"tinyvec_macros",
]
@@ -1457,7 +1442,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -1519,7 +1504,6 @@ dependencies = [
[[package]]
name = "tst"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16"
dependencies = [
"ourdb",
"thiserror",
@@ -1566,9 +1550,9 @@ checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae"
[[package]]
name = "uuid"
version = "1.18.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.3",
"js-sys",
@@ -1630,7 +1614,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
"wasm-bindgen-shared",
]
@@ -1652,7 +1636,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -1678,11 +1662,11 @@ dependencies = [
[[package]]
name = "whoami"
version = "1.6.1"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d"
checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7"
dependencies = [
"libredox",
"redox_syscall",
"wasite",
"web-sys",
]
@@ -1708,7 +1692,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -1719,7 +1703,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]
[[package]]
@@ -1872,5 +1856,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"syn 2.0.104",
]

View File

@@ -14,6 +14,4 @@ quote = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
heromodels_core = { path = "../heromodels_core" }
serde = { version = "1.0", features = ["derive"] }

View File

@@ -1,6 +1,6 @@
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::{parse_macro_input, Data, DeriveInput, Fields, Lit, Meta, MetaList, MetaNameValue};
use syn::{Data, DeriveInput, Fields, parse_macro_input};
/// Convert a string to snake_case
fn to_snake_case(s: &str) -> String {
@@ -47,165 +47,86 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream {
let db_prefix = to_snake_case(&name_str);
// Extract fields with #[index] attribute
// Supports both top-level (no args) and nested path-based indexes declared on a field
#[derive(Clone)]
enum IndexDecl {
TopLevel {
field_ident: syn::Ident,
field_ty: syn::Type,
},
NestedPath {
on_field_ident: syn::Ident,
path: String, // dotted path relative to the field
},
}
let mut index_decls: Vec<IndexDecl> = Vec::new();
let mut indexed_fields = Vec::new();
let mut custom_index_names = std::collections::HashMap::new();
if let Data::Struct(ref mut data_struct) = input.data {
if let Fields::Named(ref mut fields_named) = data_struct.fields {
for field in &mut fields_named.named {
let mut to_remove: Vec<usize> = Vec::new();
let mut attr_idx = None;
for (i, attr) in field.attrs.iter().enumerate() {
if !attr.path().is_ident("index") {
continue;
}
to_remove.push(i);
if attr.path().is_ident("index") {
attr_idx = Some(i);
if let Some(ref field_name) = field.ident {
// Check if the attribute has parameters
let mut custom_name = None;
if let Some(ref field_name) = field.ident {
match &attr.meta {
Meta::Path(_) => {
// Simple top-level index on this field
index_decls.push(IndexDecl::TopLevel {
field_ident: field_name.clone(),
field_ty: field.ty.clone(),
});
}
Meta::List(MetaList { .. }) => {
// Parse for path = "..."; name is assumed equal to path
// We support syntax: #[index(path = "a.b.c")]
if let Ok(nested) = attr.parse_args_with(
syn::punctuated::Punctuated::<Meta, syn::Token![,]>::parse_terminated,
) {
for meta in nested {
if let Meta::NameValue(MetaNameValue { path, value, .. }) = meta {
if path.is_ident("path") {
if let syn::Expr::Lit(syn::ExprLit { lit: Lit::Str(lit_str), .. }) = value {
let p = lit_str.value();
index_decls.push(IndexDecl::NestedPath {
on_field_ident: field_name.clone(),
path: p,
});
// Parse attribute arguments if any
let meta = attr.meta.clone();
if let syn::Meta::List(list) = meta {
if let Ok(nested) = list.parse_args_with(syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated) {
for meta in nested {
if let syn::Meta::NameValue(name_value) = meta {
if name_value.path.is_ident("name") {
if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(lit_str), .. }) = name_value.value {
custom_name = Some(lit_str.value());
}
}
}
}
}
}
}
_ => {}
indexed_fields.push((field_name.clone(), field.ty.clone()));
if let Some(name) = custom_name {
custom_index_names.insert(field_name.to_string(), name);
}
}
}
}
// remove all #[index] attributes we processed
// remove from the back to keep indices valid
to_remove.sort_unstable();
to_remove.drain(..).rev().for_each(|idx| {
if let Some(idx) = attr_idx {
field.attrs.remove(idx);
});
}
}
}
}
// Generate Model trait implementation
let db_keys_impl = if index_decls.is_empty() {
let db_keys_impl = if indexed_fields.is_empty() {
quote! {
fn db_keys(&self) -> Vec<heromodels_core::IndexKey> {
Vec::new()
}
}
} else {
// Build code for keys from each index declaration
let mut key_snippets: Vec<proc_macro2::TokenStream> = Vec::new();
for decl in &index_decls {
match decl.clone() {
IndexDecl::TopLevel { field_ident, .. } => {
let name_str = field_ident.to_string();
key_snippets.push(quote! {
keys.push(heromodels_core::IndexKey {
name: #name_str,
value: self.#field_ident.to_string(),
});
});
}
IndexDecl::NestedPath { on_field_ident, path } => {
// Name is equal to provided path
let name_str = path.clone();
// Generate traversal code using serde_json to support arrays and objects generically
// Split the path into static segs for iteration
let segs: Vec<String> = path.split('.').map(|s| s.to_string()).collect();
let segs_iter = segs.iter().map(|s| s.as_str());
let segs_array = quote! { [ #( #segs_iter ),* ] };
key_snippets.push(quote! {
// Serialize the target field to JSON for generic traversal
let __hm_json_val = ::serde_json::to_value(&self.#on_field_ident).unwrap_or(::serde_json::Value::Null);
let mut __hm_stack: Vec<&::serde_json::Value> = vec![&__hm_json_val];
for __hm_seg in #segs_array.iter() {
let mut __hm_next: Vec<&::serde_json::Value> = Vec::new();
for __hm_v in &__hm_stack {
match __hm_v {
::serde_json::Value::Array(arr) => {
for __hm_e in arr {
if let ::serde_json::Value::Object(map) = __hm_e {
if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); }
}
}
}
::serde_json::Value::Object(map) => {
if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); }
}
_ => {}
}
}
__hm_stack = __hm_next;
if __hm_stack.is_empty() { break; }
}
for __hm_leaf in __hm_stack {
match __hm_leaf {
::serde_json::Value::Null => {},
::serde_json::Value::Array(_) => {},
::serde_json::Value::Object(_) => {},
other => {
// Convert primitives to string without surrounding quotes for strings
let mut s = other.to_string();
if let ::serde_json::Value::String(_) = other { s = s.trim_matches('"').to_string(); }
keys.push(heromodels_core::IndexKey { name: #name_str, value: s });
}
}
}
});
let field_keys = indexed_fields.iter().map(|(field_name, _)| {
let name_str = custom_index_names
.get(&field_name.to_string())
.cloned()
.unwrap_or(field_name.to_string());
quote! {
heromodels_core::IndexKey {
name: #name_str,
value: self.#field_name.to_string(),
}
}
}
});
quote! {
fn db_keys(&self) -> Vec<heromodels_core::IndexKey> {
let mut keys: Vec<heromodels_core::IndexKey> = Vec::new();
#(#key_snippets)*
keys
vec![
#(#field_keys),*
]
}
}
};
let indexed_field_names: Vec<String> = index_decls
let indexed_field_names = indexed_fields
.iter()
.map(|d| match d {
IndexDecl::TopLevel { field_ident, .. } => field_ident.to_string(),
IndexDecl::NestedPath { path, .. } => path.clone(),
})
.collect();
.map(|f| f.0.to_string())
.collect::<Vec<_>>();
let model_impl = quote! {
impl heromodels_core::Model for #struct_name {
@@ -231,33 +152,51 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream {
}
};
// Generate Index trait implementations only for top-level fields, keep existing behavior
// Generate Index trait implementations
let mut index_impls = proc_macro2::TokenStream::new();
for decl in &index_decls {
if let IndexDecl::TopLevel { field_ident, field_ty } = decl {
let name_str = field_ident.to_string();
let index_struct_name = format_ident!("{}", &name_str);
let field_type = field_ty.clone();
let index_impl = quote! {
pub struct #index_struct_name;
for (field_name, field_type) in &indexed_fields {
let name_str = field_name.to_string();
impl heromodels_core::Index for #index_struct_name {
type Model = super::#struct_name;
type Key = #field_type;
// Get custom index name if specified, otherwise use field name
let index_key = match custom_index_names.get(&name_str) {
Some(custom_name) => custom_name.clone(),
None => name_str.clone(),
};
fn key() -> &'static str { #name_str }
// Convert field name to PascalCase for struct name
// let struct_name_str = to_pascal_case(&name_str);
// let index_struct_name = format_ident!("{}", struct_name_str);
let index_struct_name = format_ident!("{}", &name_str);
fn field_name() -> &'static str { #name_str }
// Default to str for key type
let index_impl = quote! {
pub struct #index_struct_name;
impl heromodels_core::Index for #index_struct_name {
type Model = super::#struct_name;
type Key = #field_type;
fn key() -> &'static str {
#index_key
}
};
index_impls.extend(index_impl);
}
fn field_name() -> &'static str {
#name_str
}
}
};
index_impls.extend(index_impl);
}
if !index_impls.is_empty() {
let index_mod_name = format_ident!("{}_index", db_prefix);
index_impls = quote! { pub mod #index_mod_name { #index_impls } };
index_impls = quote! {
pub mod #index_mod_name {
#index_impls
}
}
}
// Combine the original struct with the generated implementations

View File

@@ -1,38 +1,7 @@
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
// Make the current crate visible as an extern crate named `heromodels_core`
extern crate self as heromodels_core;
extern crate serde_json; // ensure ::serde_json path resolves
// Mock the heromodels_core API at crate root (visible via the alias above)
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IndexKey {
pub name: &'static str,
pub value: String,
}
pub trait Model: std::fmt::Debug + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
fn db_prefix() -> &'static str
where
Self: Sized;
fn get_id(&self) -> u32;
fn base_data_mut(&mut self) -> &mut BaseModelData;
fn db_keys(&self) -> Vec<IndexKey> {
Vec::new()
}
fn indexed_fields() -> Vec<&'static str> {
Vec::new()
}
}
pub trait Index {
type Model: Model;
type Key: ToString + ?Sized;
fn key() -> &'static str;
fn field_name() -> &'static str;
}
// Define the necessary structs and traits for testing
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BaseModelData {
pub id: u32,
@@ -42,18 +11,41 @@ pub struct BaseModelData {
}
impl BaseModelData {
pub fn new() -> Self {
let now = 1000;
Self { id: 0, created_at: now, modified_at: now, comments: Vec::new() }
pub fn new(id: u32) -> Self {
let now = 1000; // Mock timestamp
Self {
id,
created_at: now,
modified_at: now,
comments: Vec::new(),
}
}
pub fn update_modified(&mut self) { self.modified_at += 1; }
}
// Top-level field index tests
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IndexKey {
pub name: &'static str,
pub value: String,
}
pub trait Model: std::fmt::Debug + Clone {
fn db_prefix() -> &'static str;
fn get_id(&self) -> u32;
fn base_data_mut(&mut self) -> &mut BaseModelData;
fn db_keys(&self) -> Vec<IndexKey>;
}
pub trait Index {
type Model: Model;
type Key: ?Sized;
fn key() -> &'static str;
}
// Test struct using the model macro
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
pub struct TestUser {
base_data: heromodels_core::BaseModelData,
struct TestUser {
base_data: BaseModelData,
#[index]
username: String,
@@ -62,12 +54,25 @@ pub struct TestUser {
is_active: bool,
}
// Test struct with custom index name
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
struct TestUserWithCustomIndex {
base_data: BaseModelData,
#[index(name = "custom_username")]
username: String,
#[index]
is_active: bool,
}
#[test]
fn test_basic_model() {
assert_eq!(TestUser::db_prefix(), "test_user");
let user = TestUser {
base_data: heromodels_core::BaseModelData::new(),
base_data: BaseModelData::new(1),
username: "test".to_string(),
is_active: true,
};
@@ -80,47 +85,22 @@ fn test_basic_model() {
assert_eq!(keys[1].value, "true");
}
// Nested path index tests (including vector traversal)
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct GPU { gpu_brand: String }
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct CPU { cpu_brand: String }
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct DeviceInfo { vendor: String, cpu: Vec<CPU>, gpu: Vec<GPU> }
#[derive(Debug, Clone, Serialize, Deserialize)]
#[model]
pub struct NodeLike {
base_data: heromodels_core::BaseModelData,
#[index(path = "vendor")]
#[index(path = "cpu.cpu_brand")]
#[index(path = "gpu.gpu_brand")]
devices: DeviceInfo,
}
#[test]
fn test_nested_indexes() {
let n = NodeLike {
base_data: heromodels_core::BaseModelData::new(),
devices: DeviceInfo {
vendor: "SuperVendor".to_string(),
cpu: vec![CPU { cpu_brand: "Intel".into() }, CPU { cpu_brand: "AMD".into() }],
gpu: vec![GPU { gpu_brand: "NVIDIA".into() }, GPU { gpu_brand: "AMD".into() }],
},
fn test_custom_index_name() {
let user = TestUserWithCustomIndex {
base_data: BaseModelData::new(1),
username: "test".to_string(),
is_active: true,
};
let mut keys = n.db_keys();
// Sort for deterministic assertions
keys.sort_by(|a,b| a.name.cmp(b.name).then(a.value.cmp(&b.value)));
// Check that the Username struct uses the custom index name
assert_eq!(Username::key(), "custom_username");
// Expect 1 (vendor) + 2 (cpu brands) + 2 (gpu brands) = 5 keys
assert_eq!(keys.len(), 5);
assert!(keys.iter().any(|k| k.name == "vendor" && k.value == "SuperVendor"));
assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "Intel"));
assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "AMD"));
assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "NVIDIA"));
assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "AMD"));
// Check that the db_keys method returns the correct keys
let keys = user.db_keys();
assert_eq!(keys.len(), 2);
assert_eq!(keys[0].name, "custom_username");
assert_eq!(keys[0].value, "test");
assert_eq!(keys[1].name, "is_active");
assert_eq!(keys[1].value, "true");
}

1617
heromodels/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -10,11 +10,11 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bincode = { version = "2", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
ourdb = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "ourdb" }
tst = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "tst" }
ourdb = { path = "../../herolib_rust/packages/data/ourdb" }
tst = { path = "../../herolib_rust/packages/data/tst" }
heromodels-derive = { path = "../heromodels-derive" }
heromodels_core = { path = "../heromodels_core" }
rhailib-macros = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "rhailib-macros" }
rhailib-macros = { path = "../../herolib_rust/rhailib/src/macros" }
rhai = { version = "1.21.0", features = [
"std",
"sync",
@@ -53,19 +53,11 @@ path = "examples/finance_example/main.rs"
name = "flow_example"
path = "examples/flow_example.rs"
# [[example]]
# name = "biz_rhai"
# path = "examples/biz_rhai/example.rs"
# required-features = ["rhai"]
[[example]]
name = "biz_rhai"
path = "examples/biz_rhai/example.rs"
required-features = ["rhai"]
[[example]]
name = "postgres_model_example"
path = "examples/postgres_example/example.rs"
[[example]]
name = "heroledger_example"
path = "examples/heroledger_example/example.rs"
[[example]]
name = "grid4_example"
path = "examples/grid4_example/example.rs"

View File

@@ -1,25 +1,10 @@
use chrono::{Duration, Utc, NaiveDateTime};
use chrono::{Duration, Utc};
use heromodels::db::{Collection, Db};
use heromodels::models::User;
use heromodels::models::calendar::{AttendanceStatus, Attendee, Calendar, Event, EventStatus};
use heromodels_core::Model;
fn main() {
// Helper to format i64 timestamps
let fmt_time = |ts: i64| -> String {
let ndt = NaiveDateTime::from_timestamp_opt(ts, 0)
.unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap());
chrono::DateTime::<Utc>::from_utc(ndt, Utc)
.format("%Y-%m-%d %H:%M")
.to_string()
};
let fmt_date = |ts: i64| -> String {
let ndt = NaiveDateTime::from_timestamp_opt(ts, 0)
.unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap());
chrono::DateTime::<Utc>::from_utc(ndt, Utc)
.format("%Y-%m-%d")
.to_string()
};
// Create a new DB instance, reset before every run
let db_path = "/tmp/ourdb_calendar_example";
let db = heromodels::db::hero::OurDB::new(db_path, true).expect("Can create DB");
@@ -62,21 +47,50 @@ fn main() {
println!("- User 2 (ID: {}): {}", user2_id, stored_user2.full_name);
println!("- User 3 (ID: {}): {}", user3_id, stored_user3.full_name);
// --- Create Attendees (embedded in events, not stored separately) ---
// --- Create Attendees ---
println!("\n--- Creating Attendees ---");
let attendee1 = Attendee::new(user1_id).status(AttendanceStatus::Accepted);
let attendee2 = Attendee::new(user2_id).status(AttendanceStatus::Tentative);
let attendee3 = Attendee::new(user3_id); // Default NoResponse
// Store attendees in database and get their IDs
let attendee_collection = db
.collection::<Attendee>()
.expect("can open attendee collection");
let (attendee1_id, stored_attendee1) = attendee_collection
.set(&attendee1)
.expect("can set attendee1");
let (attendee2_id, stored_attendee2) = attendee_collection
.set(&attendee2)
.expect("can set attendee2");
let (attendee3_id, stored_attendee3) = attendee_collection
.set(&attendee3)
.expect("can set attendee3");
println!("Created attendees:");
println!(
"- Attendee 1 (ID: {}): Contact ID {}, Status: {:?}",
attendee1_id, stored_attendee1.contact_id, stored_attendee1.status
);
println!(
"- Attendee 2 (ID: {}): Contact ID {}, Status: {:?}",
attendee2_id, stored_attendee2.contact_id, stored_attendee2.status
);
println!(
"- Attendee 3 (ID: {}): Contact ID {}, Status: {:?}",
attendee3_id, stored_attendee3.contact_id, stored_attendee3.status
);
// --- Create Events with Attendees ---
println!("\n--- Creating Events with Enhanced Features ---");
let now = Utc::now();
let event1_start = (now + Duration::hours(1)).timestamp();
let event1_end = (now + Duration::hours(2)).timestamp();
let event1 = Event::new()
.title("Team Meeting")
.reschedule(event1_start, event1_end)
let event1 = Event::new(
"Team Meeting",
now + Duration::hours(1),
now + Duration::hours(2),
)
.description("Weekly sync-up meeting to discuss project progress.")
.location("Conference Room A")
.color("#FF5722") // Red-orange color
@@ -85,14 +99,14 @@ fn main() {
.category("Work")
.reminder_minutes(15)
.timezone("UTC")
.add_attendee(attendee1.clone())
.add_attendee(attendee2.clone());
.add_attendee(attendee1_id)
.add_attendee(attendee2_id);
let event2_start = (now + Duration::days(1)).timestamp();
let event2_end = (now + Duration::days(1) + Duration::minutes(90)).timestamp();
let event2 = Event::new()
.title("Project Brainstorm")
.reschedule(event2_start, event2_end)
let event2 = Event::new(
"Project Brainstorm",
now + Duration::days(1),
now + Duration::days(1) + Duration::minutes(90),
)
.description("Brainstorming session for new project features.")
.location("Innovation Lab")
.color("#4CAF50") // Green color
@@ -101,28 +115,28 @@ fn main() {
.category("Planning")
.reminder_minutes(30)
.is_recurring(true)
.add_attendee(attendee1.clone())
.add_attendee(attendee3.clone());
.add_attendee(attendee1_id)
.add_attendee(attendee3_id);
let event3_start = (now + Duration::days(2)).timestamp();
let event3_end = (now + Duration::days(2) + Duration::hours(1)).timestamp();
let event3 = Event::new()
.title("Client Call")
.reschedule(event3_start, event3_end)
let event3 = Event::new(
"Client Call",
now + Duration::days(2),
now + Duration::days(2) + Duration::hours(1),
)
.description("Quarterly review with key client.")
.color("#9C27B0") // Purple color
.created_by(user3_id)
.status(EventStatus::Published)
.category("Client")
.reminder_minutes(60)
.add_attendee(attendee2.clone());
.add_attendee(attendee2_id);
// Create an all-day event
let event4_start = (now + Duration::days(7)).timestamp();
let event4_end = (now + Duration::days(7) + Duration::hours(24)).timestamp();
let event4 = Event::new()
.title("Company Holiday")
.reschedule(event4_start, event4_end)
let event4 = Event::new(
"Company Holiday",
now + Duration::days(7),
now + Duration::days(7) + Duration::hours(24),
)
.description("National holiday - office closed.")
.color("#FFC107") // Amber color
.all_day(true)
@@ -134,7 +148,7 @@ fn main() {
println!(
"- Event 1: '{}' at {} with {} attendees",
event1.title,
fmt_time(event1.start_time),
event1.start_time.format("%Y-%m-%d %H:%M"),
event1.attendees.len()
);
println!(
@@ -160,19 +174,12 @@ fn main() {
);
println!(" All-day: {}", event1.all_day);
println!(" Recurring: {}", event1.is_recurring);
println!(
" Attendee IDs: {:?}",
event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(" Attendee IDs: {:?}", event1.attendees);
println!(
"- Event 2: '{}' at {} with {} attendees",
event2.title,
fmt_time(event2.start_time),
event2.start_time.format("%Y-%m-%d %H:%M"),
event2.attendees.len()
);
println!(
@@ -198,19 +205,12 @@ fn main() {
);
println!(" All-day: {}", event2.all_day);
println!(" Recurring: {}", event2.is_recurring);
println!(
" Attendee IDs: {:?}",
event2
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(" Attendee IDs: {:?}", event2.attendees);
println!(
"- Event 3: '{}' at {} with {} attendees",
event3.title,
fmt_time(event3.start_time),
event3.start_time.format("%Y-%m-%d %H:%M"),
event3.attendees.len()
);
println!(
@@ -236,19 +236,12 @@ fn main() {
);
println!(" All-day: {}", event3.all_day);
println!(" Recurring: {}", event3.is_recurring);
println!(
" Attendee IDs: {:?}",
event3
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
);
println!(" Attendee IDs: {:?}", event3.attendees);
println!(
"- Event 4: '{}' at {} (All-day: {})",
event4.title,
fmt_date(event4.start_time),
event4.start_time.format("%Y-%m-%d"),
event4.all_day
);
println!(
@@ -269,37 +262,25 @@ fn main() {
let new_start = now + Duration::hours(2);
let new_end = now + Duration::hours(3);
let mut updated_event1 = event1.clone();
updated_event1 = updated_event1.reschedule(new_start.timestamp(), new_end.timestamp());
updated_event1 = updated_event1.reschedule(new_start, new_end);
println!(
"Rescheduled '{}' to {}",
updated_event1.title,
fmt_time(new_start.timestamp())
new_start.format("%Y-%m-%d %H:%M")
);
// Remove an attendee
updated_event1 = updated_event1.remove_attendee(user1_id);
updated_event1 = updated_event1.remove_attendee(attendee1_id);
println!(
"Removed attendee {} from '{}'. Remaining attendee IDs: {:?}",
user1_id,
updated_event1.title,
updated_event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
attendee1_id, updated_event1.title, updated_event1.attendees
);
// Add a new attendee
updated_event1 = updated_event1.add_attendee(attendee3.clone());
updated_event1 = updated_event1.add_attendee(attendee3_id);
println!(
"Added attendee {} to '{}'. Current attendee IDs: {:?}",
user3_id,
updated_event1.title,
updated_event1
.attendees
.iter()
.map(|a| a.contact_id)
.collect::<Vec<u32>>()
attendee3_id, updated_event1.title, updated_event1.attendees
);
// --- Demonstrate Event Status Changes ---
@@ -319,11 +300,11 @@ fn main() {
println!("Cancelled event: '{}'", cancelled_event.title);
// Update event with new features
let enhanced_start = (now + Duration::days(5)).timestamp();
let enhanced_end = (now + Duration::days(5) + Duration::hours(2)).timestamp();
let enhanced_event = Event::new()
.title("Enhanced Meeting")
.reschedule(enhanced_start, enhanced_end)
let enhanced_event = Event::new(
"Enhanced Meeting",
now + Duration::days(5),
now + Duration::days(5) + Duration::hours(2),
)
.description("Meeting with all new features demonstrated.")
.location("Virtual - Zoom")
.color("#673AB7") // Deep purple
@@ -333,9 +314,9 @@ fn main() {
.reminder_minutes(45)
.timezone("America/New_York")
.is_recurring(true)
.add_attendee(attendee1)
.add_attendee(attendee2)
.add_attendee(attendee3);
.add_attendee(attendee1_id)
.add_attendee(attendee2_id)
.add_attendee(attendee3_id);
println!("Created enhanced event with all features:");
println!(" Title: {}", enhanced_event.title);
@@ -504,13 +485,13 @@ fn main() {
println!("\n--- Modifying Calendar ---");
// Create and store a new event
let ne_start = (now + Duration::days(3)).timestamp();
let ne_end = (now + Duration::days(3) + Duration::minutes(30)).timestamp();
let new_event = Event::new()
.title("1-on-1 Meeting")
.reschedule(ne_start, ne_end)
.description("One-on-one meeting with team member.")
.location("Office");
let new_event = Event::new(
"1-on-1 Meeting",
now + Duration::days(3),
now + Duration::days(3) + Duration::minutes(30),
)
.description("One-on-one meeting with team member.")
.location("Office");
let (new_event_id, _stored_new_event) =
event_collection.set(&new_event).expect("can set new event");
@@ -584,7 +565,7 @@ fn main() {
"- Event ID: {}, Title: '{}', Start: {}, Attendees: {}",
event.get_id(),
event.title,
fmt_time(event.start_time),
event.start_time.format("%Y-%m-%d %H:%M"),
event.attendees.len()
);
}
@@ -602,16 +583,22 @@ fn main() {
retrieved_event1.attendees.len()
);
// Look up attendee details directly from embedded attendees
for attendee in &retrieved_event1.attendees {
if let Some(user) = user_collection
.get_by_id(attendee.contact_id)
.expect("can try to get user")
// Look up attendee details for each attendee ID
for &attendee_id in &retrieved_event1.attendees {
if let Some(attendee) = attendee_collection
.get_by_id(attendee_id)
.expect("can try to get attendee")
{
println!(
" - User {}: {} (Status: {:?})",
attendee.contact_id, user.full_name, attendee.status
);
// Look up user details for the attendee's contact_id
if let Some(user) = user_collection
.get_by_id(attendee.contact_id)
.expect("can try to get user")
{
println!(
" - Attendee ID {}: {} (User: {}, Status: {:?})",
attendee_id, user.full_name, attendee.contact_id, attendee.status
);
}
}
}
}

View File

@@ -1,26 +1,26 @@
use circles_launcher::new_launcher;
use heromodels::models::circle::circle::new_circle;
use circles_launcher::{new_launcher};
use heromodels::models::circle::circle::{new_circle};
use secp256k1::{Secp256k1, SecretKey, PublicKey};
use rand::rngs::OsRng;
use secp256k1::{PublicKey, Secp256k1, SecretKey};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Generate valid secp256k1 keypairs for testing
let secp = Secp256k1::new();
let mut rng = OsRng;
let secret_key1 = SecretKey::new(&mut rng);
let public_key1 = PublicKey::from_secret_key(&secp, &secret_key1);
let pk1_hex = hex::encode(public_key1.serialize());
let secret_key2 = SecretKey::new(&mut rng);
let public_key2 = PublicKey::from_secret_key(&secp, &secret_key2);
let pk2_hex = hex::encode(public_key2.serialize());
let secret_key3 = SecretKey::new(&mut rng);
let public_key3 = PublicKey::from_secret_key(&secp, &secret_key3);
let pk3_hex = hex::encode(public_key3.serialize());
println!("Generated test public keys:");
println!(" PK1: {}", pk1_hex);
println!(" PK2: {}", pk2_hex);
@@ -36,4 +36,4 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.save();
Ok(())
}
}

View File

@@ -1,199 +0,0 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Bid, BidStatus, BillingPeriod};
use heromodels::models::grid4::bid::bid_index::customer_id;
use heromodels_core::Model;
// Helper function to print bid details
fn print_bid_details(bid: &Bid) {
println!("\n--- Bid Details ---");
println!("ID: {}", bid.get_id());
println!("Customer ID: {}", bid.customer_id);
println!("Compute Slices: {}", bid.compute_slices_nr);
println!("Compute Slice Price: ${:.2}", bid.compute_slice_price);
println!("Storage Slices: {}", bid.storage_slices_nr);
println!("Storage Slice Price: ${:.2}", bid.storage_slice_price);
println!("Status: {:?}", bid.status);
println!("Obligation: {}", bid.obligation);
println!("Start Date: {}", bid.start_date);
println!("End Date: {}", bid.end_date);
println!("Billing Period: {:?}", bid.billing_period);
println!("Signature User: {}", bid.signature_user);
println!("Created At: {}", bid.base_data.created_at);
println!("Modified At: {}", bid.base_data.modified_at);
}
fn main() {
// Create a new DB instance in /tmp/grid4_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_db", true).expect("Can create DB");
println!("Grid4 Bid Models - Basic Usage Example");
println!("=====================================");
// Create bids with different configurations
// Bid 1 - Small compute request
let bid1 = Bid::new()
.customer_id(101)
.compute_slices_nr(4)
.compute_slice_price(0.05)
.storage_slices_nr(10)
.storage_slice_price(0.02)
.status(BidStatus::Pending)
.obligation(false)
.start_date(1640995200) // 2022-01-01
.end_date(1672531200) // 2023-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("sig_user_101_abc123".to_string());
// Bid 2 - Large compute request with obligation
let bid2 = Bid::new()
.customer_id(102)
.compute_slices_nr(16)
.compute_slice_price(0.04)
.storage_slices_nr(50)
.storage_slice_price(0.015)
.status(BidStatus::Confirmed)
.obligation(true)
.start_date(1640995200)
.end_date(1704067200) // 2024-01-01
.billing_period(BillingPeriod::Yearly)
.signature_user("sig_user_102_def456".to_string());
// Bid 3 - Storage-heavy request
let bid3 = Bid::new()
.customer_id(103)
.compute_slices_nr(2)
.compute_slice_price(0.06)
.storage_slices_nr(100)
.storage_slice_price(0.01)
.status(BidStatus::Assigned)
.obligation(true)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Hourly)
.signature_user("sig_user_103_ghi789".to_string());
// Bid 4 - Cancelled bid
let bid4 = Bid::new()
.customer_id(104)
.compute_slices_nr(8)
.compute_slice_price(0.055)
.storage_slices_nr(25)
.storage_slice_price(0.018)
.status(BidStatus::Cancelled)
.obligation(false)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Monthly)
.signature_user("sig_user_104_jkl012".to_string());
// Save all bids to database and get their assigned IDs and updated models
let (bid1_id, db_bid1) = db
.collection()
.expect("can open bid collection")
.set(&bid1)
.expect("can set bid");
let (bid2_id, db_bid2) = db
.collection()
.expect("can open bid collection")
.set(&bid2)
.expect("can set bid");
let (bid3_id, db_bid3) = db
.collection()
.expect("can open bid collection")
.set(&bid3)
.expect("can set bid");
let (bid4_id, db_bid4) = db
.collection()
.expect("can open bid collection")
.set(&bid4)
.expect("can set bid");
println!("Bid 1 assigned ID: {}", bid1_id);
println!("Bid 2 assigned ID: {}", bid2_id);
println!("Bid 3 assigned ID: {}", bid3_id);
println!("Bid 4 assigned ID: {}", bid4_id);
// Print all bids retrieved from database
println!("\n--- Bids Retrieved from Database ---");
println!("\n1. Small compute bid:");
print_bid_details(&db_bid1);
println!("\n2. Large compute bid with obligation:");
print_bid_details(&db_bid2);
println!("\n3. Storage-heavy bid:");
print_bid_details(&db_bid3);
println!("\n4. Cancelled bid:");
print_bid_details(&db_bid4);
// Demonstrate different ways to retrieve bids from the database
println!("\n--- Retrieving Bids by Different Methods ---");
println!("\n1. By Customer ID Index (Customer 102):");
let customer_bids = db
.collection::<Bid>()
.expect("can open bid collection")
.get::<customer_id, _>(&102u32)
.expect("can load bids by customer");
assert_eq!(customer_bids.len(), 1);
print_bid_details(&customer_bids[0]);
println!("\n2. Updating Bid Status:");
let mut updated_bid = db_bid1.clone();
updated_bid.status = BidStatus::Confirmed;
let (_, confirmed_bid) = db
.collection::<Bid>()
.expect("can open bid collection")
.set(&updated_bid)
.expect("can update bid");
println!("Updated bid status to Confirmed:");
print_bid_details(&confirmed_bid);
// 3. Delete a bid and show the updated results
println!("\n3. After Deleting a Bid:");
println!("Deleting bid with ID: {}", bid4_id);
db.collection::<Bid>()
.expect("can open bid collection")
.delete_by_id(bid4_id)
.expect("can delete existing bid");
// Show remaining bids
let all_bids = db
.collection::<Bid>()
.expect("can open bid collection")
.get_all()
.expect("can load all bids");
println!("Remaining bids count: {}", all_bids.len());
assert_eq!(all_bids.len(), 3);
// Calculate total compute and storage requested
println!("\n--- Bid Analytics ---");
let total_compute_slices: i32 = all_bids.iter().map(|b| b.compute_slices_nr).sum();
let total_storage_slices: i32 = all_bids.iter().map(|b| b.storage_slices_nr).sum();
let avg_compute_price: f64 = all_bids.iter().map(|b| b.compute_slice_price).sum::<f64>() / all_bids.len() as f64;
let avg_storage_price: f64 = all_bids.iter().map(|b| b.storage_slice_price).sum::<f64>() / all_bids.len() as f64;
println!("Total Compute Slices Requested: {}", total_compute_slices);
println!("Total Storage Slices Requested: {}", total_storage_slices);
println!("Average Compute Price: ${:.3}", avg_compute_price);
println!("Average Storage Price: ${:.3}", avg_storage_price);
// Count bids by status
let confirmed_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Confirmed)).count();
let assigned_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Assigned)).count();
let pending_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Pending)).count();
println!("\nBids by Status:");
println!(" Confirmed: {}", confirmed_count);
println!(" Assigned: {}", assigned_count);
println!(" Pending: {}", pending_count);
println!("\n--- Model Information ---");
println!("Bid DB Prefix: {}", Bid::db_prefix());
}

View File

@@ -1,301 +0,0 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Contract, ContractStatus};
use heromodels::models::grid4::contract::contract_index::customer_id;
use heromodels_core::Model;
// Helper function to print contract details
fn print_contract_details(contract: &Contract) {
println!("\n--- Contract Details ---");
println!("ID: {}", contract.get_id());
println!("Customer ID: {}", contract.customer_id);
println!("Compute Slices: {}", contract.compute_slices.len());
println!("Storage Slices: {}", contract.storage_slices.len());
println!("Compute Slice Price: ${:.2}", contract.compute_slice_price);
println!("Storage Slice Price: ${:.2}", contract.storage_slice_price);
println!("Network Slice Price: ${:.2}", contract.network_slice_price);
println!("Status: {:?}", contract.status);
println!("Start Date: {}", contract.start_date);
println!("End Date: {}", contract.end_date);
println!("Billing Period: {:?}", contract.billing_period);
println!("Signature User: {}", contract.signature_user);
println!("Signature Hoster: {}", contract.signature_hoster);
println!("Created At: {}", contract.base_data.created_at);
println!("Modified At: {}", contract.base_data.modified_at);
// Print compute slices details
if !contract.compute_slices.is_empty() {
println!(" Compute Slices:");
for (i, slice) in contract.compute_slices.iter().enumerate() {
println!(" {}. Node: {}, ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, Passmark: {}, vCores: {}",
i + 1, slice.node_id, slice.id, slice.mem_gb, slice.storage_gb, slice.passmark, slice.vcores);
}
}
// Print storage slices details
if !contract.storage_slices.is_empty() {
println!(" Storage Slices:");
for (i, slice) in contract.storage_slices.iter().enumerate() {
println!(" {}. Node: {}, ID: {}, Size: {}GB",
i + 1, slice.node_id, slice.id, slice.storage_size_gb);
}
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_contracts_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_contracts_db", true).expect("Can create DB");
println!("Grid4 Contract Models - Basic Usage Example");
println!("==========================================");
// Create compute slices for contracts
let compute_slice1 = ComputeSliceProvisioned::new()
.node_id(1001)
.id(1)
.mem_gb(2.0)
.storage_gb(20.0)
.passmark(2500)
.vcores(2)
.cpu_oversubscription(150)
.tags("web-server,production".to_string());
let compute_slice2 = ComputeSliceProvisioned::new()
.node_id(1002)
.id(2)
.mem_gb(4.0)
.storage_gb(40.0)
.passmark(5000)
.vcores(4)
.cpu_oversubscription(120)
.tags("database,high-performance".to_string());
let compute_slice3 = ComputeSliceProvisioned::new()
.node_id(1003)
.id(1)
.mem_gb(8.0)
.storage_gb(80.0)
.passmark(10000)
.vcores(8)
.cpu_oversubscription(100)
.tags("ml-training,gpu-enabled".to_string());
// Create storage slices for contracts
let storage_slice1 = StorageSliceProvisioned::new()
.node_id(2001)
.id(1)
.storage_size_gb(100)
.tags("backup,cold-storage".to_string());
let storage_slice2 = StorageSliceProvisioned::new()
.node_id(2002)
.id(2)
.storage_size_gb(500)
.tags("data-lake,analytics".to_string());
let storage_slice3 = StorageSliceProvisioned::new()
.node_id(2003)
.id(1)
.storage_size_gb(1000)
.tags("archive,long-term".to_string());
// Create contracts with different configurations
// Contract 1 - Small web hosting contract
let contract1 = Contract::new()
.customer_id(201)
.add_compute_slice(compute_slice1.clone())
.add_storage_slice(storage_slice1.clone())
.compute_slice_price(0.05)
.storage_slice_price(0.02)
.network_slice_price(0.01)
.status(ContractStatus::Active)
.start_date(1640995200) // 2022-01-01
.end_date(1672531200) // 2023-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("contract_user_201_abc123".to_string())
.signature_hoster("hoster_node1001_xyz789".to_string());
// Contract 2 - Database hosting contract
let contract2 = Contract::new()
.customer_id(202)
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice2.clone())
.compute_slice_price(0.04)
.storage_slice_price(0.015)
.network_slice_price(0.008)
.status(ContractStatus::Active)
.start_date(1640995200)
.end_date(1704067200) // 2024-01-01
.billing_period(BillingPeriod::Yearly)
.signature_user("contract_user_202_def456".to_string())
.signature_hoster("hoster_node1002_uvw123".to_string());
// Contract 3 - ML training contract (paused)
let contract3 = Contract::new()
.customer_id(203)
.add_compute_slice(compute_slice3.clone())
.add_storage_slice(storage_slice3.clone())
.compute_slice_price(0.08)
.storage_slice_price(0.01)
.network_slice_price(0.015)
.status(ContractStatus::Paused)
.start_date(1640995200)
.end_date(1672531200)
.billing_period(BillingPeriod::Hourly)
.signature_user("contract_user_203_ghi789".to_string())
.signature_hoster("hoster_node1003_rst456".to_string());
// Contract 4 - Multi-slice enterprise contract
let contract4 = Contract::new()
.customer_id(204)
.add_compute_slice(compute_slice1.clone())
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.compute_slice_price(0.045)
.storage_slice_price(0.018)
.network_slice_price(0.012)
.status(ContractStatus::Active)
.start_date(1640995200)
.end_date(1735689600) // 2025-01-01
.billing_period(BillingPeriod::Monthly)
.signature_user("contract_user_204_jkl012".to_string())
.signature_hoster("hoster_enterprise_mno345".to_string());
// Save all contracts to database and get their assigned IDs and updated models
let (contract1_id, db_contract1) = db
.collection()
.expect("can open contract collection")
.set(&contract1)
.expect("can set contract");
let (contract2_id, db_contract2) = db
.collection()
.expect("can open contract collection")
.set(&contract2)
.expect("can set contract");
let (contract3_id, db_contract3) = db
.collection()
.expect("can open contract collection")
.set(&contract3)
.expect("can set contract");
let (contract4_id, db_contract4) = db
.collection()
.expect("can open contract collection")
.set(&contract4)
.expect("can set contract");
println!("Contract 1 assigned ID: {}", contract1_id);
println!("Contract 2 assigned ID: {}", contract2_id);
println!("Contract 3 assigned ID: {}", contract3_id);
println!("Contract 4 assigned ID: {}", contract4_id);
// Print all contracts retrieved from database
println!("\n--- Contracts Retrieved from Database ---");
println!("\n1. Web hosting contract:");
print_contract_details(&db_contract1);
println!("\n2. Database hosting contract:");
print_contract_details(&db_contract2);
println!("\n3. ML training contract (paused):");
print_contract_details(&db_contract3);
println!("\n4. Enterprise multi-slice contract:");
print_contract_details(&db_contract4);
// Demonstrate different ways to retrieve contracts from the database
// 1. Retrieve by customer ID index
println!("\n--- Retrieving Contracts by Different Methods ---");
println!("\n1. By Customer ID Index (Customer 202):");
let customer_contracts = db
.collection::<Contract>()
.expect("can open contract collection")
.get::<customer_id, _>(&202u32)
.expect("can load contracts by customer");
assert_eq!(customer_contracts.len(), 1);
print_contract_details(&customer_contracts[0]);
// 2. Update contract status
println!("\n2. Resuming Paused Contract:");
let mut updated_contract = db_contract3.clone();
updated_contract.status = ContractStatus::Active;
let (_, resumed_contract) = db
.collection::<Contract>()
.expect("can open contract collection")
.set(&updated_contract)
.expect("can update contract");
println!("Updated contract status to Active:");
print_contract_details(&resumed_contract);
// 3. Cancel a contract
println!("\n3. Cancelling a Contract:");
let mut cancelled_contract = db_contract1.clone();
cancelled_contract.status = ContractStatus::Cancelled;
let (_, final_contract) = db
.collection::<Contract>()
.expect("can open contract collection")
.set(&cancelled_contract)
.expect("can update contract");
println!("Cancelled contract:");
print_contract_details(&final_contract);
// Show remaining active contracts
let all_contracts = db
.collection::<Contract>()
.expect("can open contract collection")
.get_all()
.expect("can load all contracts");
println!("\n--- Contract Analytics ---");
let active_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Active))
.collect();
let paused_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Paused))
.collect();
let cancelled_contracts: Vec<_> = all_contracts.iter()
.filter(|c| matches!(c.status, ContractStatus::Cancelled))
.collect();
println!("Total Contracts: {}", all_contracts.len());
println!("Active Contracts: {}", active_contracts.len());
println!("Paused Contracts: {}", paused_contracts.len());
println!("Cancelled Contracts: {}", cancelled_contracts.len());
// Calculate total provisioned resources
let total_compute_slices: usize = all_contracts.iter().map(|c| c.compute_slices.len()).sum();
let total_storage_slices: usize = all_contracts.iter().map(|c| c.storage_slices.len()).sum();
let total_memory_gb: f64 = all_contracts.iter()
.flat_map(|c| &c.compute_slices)
.map(|s| s.mem_gb)
.sum();
let total_storage_gb: i32 = all_contracts.iter()
.flat_map(|c| &c.storage_slices)
.map(|s| s.storage_size_gb)
.sum();
println!("\nProvisioned Resources:");
println!(" Total Compute Slices: {}", total_compute_slices);
println!(" Total Storage Slices: {}", total_storage_slices);
println!(" Total Memory: {:.1} GB", total_memory_gb);
println!(" Total Storage: {} GB", total_storage_gb);
// Calculate average pricing
let avg_compute_price: f64 = all_contracts.iter().map(|c| c.compute_slice_price).sum::<f64>() / all_contracts.len() as f64;
let avg_storage_price: f64 = all_contracts.iter().map(|c| c.storage_slice_price).sum::<f64>() / all_contracts.len() as f64;
let avg_network_price: f64 = all_contracts.iter().map(|c| c.network_slice_price).sum::<f64>() / all_contracts.len() as f64;
println!("\nAverage Pricing:");
println!(" Compute: ${:.3} per slice", avg_compute_price);
println!(" Storage: ${:.3} per slice", avg_storage_price);
println!(" Network: ${:.3} per slice", avg_network_price);
println!("\n--- Model Information ---");
println!("Contract DB Prefix: {}", Contract::db_prefix());
}

View File

@@ -1,12 +0,0 @@
# Grid4 Node Example (OurDB)
This example demonstrates how to use the Grid4 `Node` model against the embedded OurDB backend.
- Creates an in-memory/on-disk OurDB under `/tmp`.
- Demonstrates CRUD and simple index lookups on `country`, `nodegroupid`, and `pubkey`.
Run it:
```bash
cargo run -p heromodels --example grid4_example
```

View File

@@ -1,66 +0,0 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use std::sync::Arc;
fn main() {
// Create a temp OurDB
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let path = format!("/tmp/grid4_example_{}", ts);
let _ = std::fs::remove_dir_all(&path);
let db = Arc::new(OurDB::new(&path, true).expect("create OurDB"));
let nodes = db.collection::<Node>().expect("open node collection");
// Build a node
let cs = ComputeSlice::new()
.nodeid(1)
.slice_id(1)
.mem_gb(64.0)
.storage_gb(1024.0)
.passmark(8000)
.vcores(24)
.gpus(2)
.price_cc(0.5);
let dev = DeviceInfo {
vendor: "ACME".into(),
..Default::default()
};
let n = Node::new()
.nodegroupid(7)
.uptime(98)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("PUB_NODE_X")
.build();
// Store
let (id, stored) = nodes.set(&n).expect("store node");
println!("Stored node id={id} pubkey={} country={}", stored.pubkey, stored.country);
// Query by indexes
let by_country = nodes.get::<country, _>("BE").expect("query country");
println!("Found {} nodes in country=BE", by_country.len());
let by_group = nodes.get::<nodegroupid, _>(&7).expect("query group");
println!("Found {} nodes in group=7", by_group.len());
let by_key = nodes.get::<pubkey, _>("PUB_NODE_X").expect("query pubkey");
println!("Found {} with pubkey PUB_NODE_X", by_key.len());
// Update
let updated = stored.clone().country("NL");
let (_, back) = nodes.set(&updated).expect("update node");
println!("Updated node country={}", back.country);
// Delete
nodes.delete_by_id(id).expect("delete node");
println!("Deleted node id={id}");
}

View File

@@ -1,390 +0,0 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{Node, NodeDevice, ComputeSlice, StorageSlice};
use heromodels::models::grid4::node::node_index::{nodegroupid, country};
use heromodels_core::Model;
// Helper function to print node details
fn print_node_details(node: &Node) {
println!("\n--- Node Details ---");
println!("ID: {}", node.get_id());
println!("NodeGroup ID: {}", node.nodegroupid);
println!("Uptime: {}%", node.uptime);
println!("Country: {}", node.country);
println!("Birth Time: {}", node.birthtime);
println!("Public Key: {}", node.pubkey);
println!("Compute Slices: {}", node.computeslices.len());
println!("Storage Slices: {}", node.storageslices.len());
println!("Created At: {}", node.base_data.created_at);
println!("Modified At: {}", node.base_data.modified_at);
// Print capacity details
println!(" Capacity:");
println!(" Storage: {:.1} GB", node.capacity.storage_gb);
println!(" Memory: {:.1} GB", node.capacity.mem_gb);
println!(" GPU Memory: {:.1} GB", node.capacity.mem_gb_gpu);
println!(" Passmark: {}", node.capacity.passmark);
println!(" vCores: {}", node.capacity.vcores);
// Print device info
println!(" Devices:");
println!(" Vendor: {}", node.devices.vendor);
println!(" CPUs: {}", node.devices.cpu.len());
println!(" GPUs: {}", node.devices.gpu.len());
println!(" Memory: {}", node.devices.memory.len());
println!(" Storage: {}", node.devices.storage.len());
println!(" Network: {}", node.devices.network.len());
// Print compute slices
if !node.computeslices.is_empty() {
println!(" Compute Slices:");
for (i, slice) in node.computeslices.iter().enumerate() {
println!(" {}. ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, vCores: {}, GPUs: {}",
i + 1, slice.id, slice.mem_gb, slice.storage_gb, slice.vcores, slice.gpus);
}
}
// Print storage slices
if !node.storageslices.is_empty() {
println!(" Storage Slices:");
for (i, slice) in node.storageslices.iter().enumerate() {
println!(" {}. ID: {}", i + 1, slice.id);
}
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_nodes_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodes_db", true).expect("Can create DB");
println!("Grid4 Node Models - Basic Usage Example");
println!("======================================");
// Create device components for nodes
// CPU devices
let cpu1 = CPUDevice {
id: "cpu_intel_i7_12700k".to_string(),
cores: 12,
passmark: 28500,
description: "Intel Core i7-12700K".to_string(),
cpu_brand: "Intel".to_string(),
cpu_version: "12th Gen".to_string(),
};
let cpu2 = CPUDevice {
id: "cpu_amd_ryzen_9_5900x".to_string(),
cores: 12,
passmark: 32000,
description: "AMD Ryzen 9 5900X".to_string(),
cpu_brand: "AMD".to_string(),
cpu_version: "Zen 3".to_string(),
};
// GPU devices
let gpu1 = GPUDevice {
id: "gpu_rtx_3080".to_string(),
cores: 8704,
memory_gb: 10.0,
description: "NVIDIA GeForce RTX 3080".to_string(),
gpu_brand: "NVIDIA".to_string(),
gpu_version: "RTX 30 Series".to_string(),
};
let gpu2 = GPUDevice {
id: "gpu_rtx_4090".to_string(),
cores: 16384,
memory_gb: 24.0,
description: "NVIDIA GeForce RTX 4090".to_string(),
gpu_brand: "NVIDIA".to_string(),
gpu_version: "RTX 40 Series".to_string(),
};
// Memory devices
let memory1 = MemoryDevice {
id: "mem_ddr4_32gb".to_string(),
size_gb: 32.0,
description: "DDR4-3200 32GB Kit".to_string(),
};
let memory2 = MemoryDevice {
id: "mem_ddr5_64gb".to_string(),
size_gb: 64.0,
description: "DDR5-5600 64GB Kit".to_string(),
};
// Storage devices
let storage1 = StorageDevice {
id: "ssd_nvme_1tb".to_string(),
size_gb: 1000.0,
description: "NVMe SSD 1TB".to_string(),
};
let storage2 = StorageDevice {
id: "hdd_sata_4tb".to_string(),
size_gb: 4000.0,
description: "SATA HDD 4TB".to_string(),
};
// Network devices
let network1 = NetworkDevice {
id: "eth_1gbit".to_string(),
speed_mbps: 1000,
description: "Gigabit Ethernet".to_string(),
};
let network2 = NetworkDevice {
id: "eth_10gbit".to_string(),
speed_mbps: 10000,
description: "10 Gigabit Ethernet".to_string(),
};
// Create device info configurations
let devices1 = DeviceInfo {
vendor: "Dell".to_string(),
cpu: vec![cpu1.clone()],
gpu: vec![gpu1.clone()],
memory: vec![memory1.clone()],
storage: vec![storage1.clone(), storage2.clone()],
network: vec![network1.clone()],
};
let devices2 = DeviceInfo {
vendor: "HP".to_string(),
cpu: vec![cpu2.clone()],
gpu: vec![gpu2.clone()],
memory: vec![memory2.clone()],
storage: vec![storage1.clone()],
network: vec![network2.clone()],
};
// Create node capacities
let capacity1 = NodeCapacity {
storage_gb: 5000.0,
mem_gb: 32.0,
mem_gb_gpu: 10.0,
passmark: 28500,
vcores: 24,
};
let capacity2 = NodeCapacity {
storage_gb: 1000.0,
mem_gb: 64.0,
mem_gb_gpu: 24.0,
passmark: 32000,
vcores: 24,
};
// Create compute slices
let compute_slice1 = ComputeSlice::new()
.id(1)
.mem_gb(4.0)
.storage_gb(100.0)
.passmark(3000)
.vcores(2)
.cpu_oversubscription(150)
.storage_oversubscription(120)
.gpus(0);
let compute_slice2 = ComputeSlice::new()
.id(2)
.mem_gb(8.0)
.storage_gb(200.0)
.passmark(6000)
.vcores(4)
.cpu_oversubscription(130)
.storage_oversubscription(110)
.gpus(1);
let compute_slice3 = ComputeSlice::new()
.id(1)
.mem_gb(16.0)
.storage_gb(400.0)
.passmark(12000)
.vcores(8)
.cpu_oversubscription(110)
.storage_oversubscription(100)
.gpus(1);
// Create storage slices
let storage_slice1 = StorageSlice::new().id(1);
let storage_slice2 = StorageSlice::new().id(2);
let storage_slice3 = StorageSlice::new().id(3);
// Create nodes with different configurations
// Node 1 - Web hosting node
let node1 = Node::new()
.nodegroupid(1001)
.uptime(98)
.add_compute_slice(compute_slice1.clone())
.add_compute_slice(compute_slice2.clone())
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.devices(devices1.clone())
.country("US".to_string())
.capacity(capacity1.clone())
.birthtime(1640995200) // 2022-01-01
.pubkey("node1_pubkey_abc123xyz789".to_string())
.signature_node("node1_signature_def456".to_string())
.signature_farmer("farmer1_signature_ghi789".to_string());
// Node 2 - High-performance computing node
let node2 = Node::new()
.nodegroupid(1002)
.uptime(99)
.add_compute_slice(compute_slice3.clone())
.add_storage_slice(storage_slice3.clone())
.devices(devices2.clone())
.country("DE".to_string())
.capacity(capacity2.clone())
.birthtime(1672531200) // 2023-01-01
.pubkey("node2_pubkey_jkl012mno345".to_string())
.signature_node("node2_signature_pqr678".to_string())
.signature_farmer("farmer2_signature_stu901".to_string());
// Node 3 - Storage-focused node
let node3 = Node::new()
.nodegroupid(1001)
.uptime(95)
.add_storage_slice(storage_slice1.clone())
.add_storage_slice(storage_slice2.clone())
.add_storage_slice(storage_slice3.clone())
.devices(devices1.clone())
.country("NL".to_string())
.capacity(capacity1.clone())
.birthtime(1704067200) // 2024-01-01
.pubkey("node3_pubkey_vwx234yzab567".to_string())
.signature_node("node3_signature_cde890".to_string())
.signature_farmer("farmer1_signature_fgh123".to_string());
// Save all nodes to database and get their assigned IDs and updated models
let (node1_id, db_node1) = db
.collection()
.expect("can open node collection")
.set(&node1)
.expect("can set node");
let (node2_id, db_node2) = db
.collection()
.expect("can open node collection")
.set(&node2)
.expect("can set node");
let (node3_id, db_node3) = db
.collection()
.expect("can open node collection")
.set(&node3)
.expect("can set node");
println!("Node 1 assigned ID: {}", node1_id);
println!("Node 2 assigned ID: {}", node2_id);
println!("Node 3 assigned ID: {}", node3_id);
// Print all nodes retrieved from database
println!("\n--- Nodes Retrieved from Database ---");
println!("\n1. Web hosting node:");
print_node_details(&db_node1);
println!("\n2. High-performance computing node:");
print_node_details(&db_node2);
println!("\n3. Storage-focused node:");
print_node_details(&db_node3);
// Demonstrate different ways to retrieve nodes from the database
// 1. Retrieve by nodegroup ID index
println!("\n--- Retrieving Nodes by Different Methods ---");
println!("\n1. By NodeGroup ID Index (NodeGroup 1001):");
let nodegroup_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<nodegroupid, _>(&1001i32)
.expect("can load nodes by nodegroup");
assert_eq!(nodegroup_nodes.len(), 2);
for (i, node) in nodegroup_nodes.iter().enumerate() {
println!(" Node {}: ID {}, Country: {}, Uptime: {}%",
i + 1, node.get_id(), node.country, node.uptime);
}
// 2. Retrieve by country index
println!("\n2. By Country Index (Germany - DE):");
let country_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<country, _>("DE")
.expect("can load nodes by country");
assert_eq!(country_nodes.len(), 1);
print_node_details(&country_nodes[0]);
// 3. Update node uptime
println!("\n3. Updating Node Uptime:");
let mut updated_node = db_node1.clone();
updated_node.uptime = 99;
let (_, uptime_updated_node) = db
.collection::<Node>()
.expect("can open node collection")
.set(&updated_node)
.expect("can update node");
println!("Updated node uptime to 99%:");
println!(" Node ID: {}, New Uptime: {}%", uptime_updated_node.get_id(), uptime_updated_node.uptime);
// Show all nodes and calculate analytics
let all_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get_all()
.expect("can load all nodes");
println!("\n--- Node Analytics ---");
println!("Total Nodes: {}", all_nodes.len());
// Calculate total capacity
let total_storage_gb: f64 = all_nodes.iter().map(|n| n.capacity.storage_gb).sum();
let total_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb).sum();
let total_gpu_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb_gpu).sum();
let total_vcores: i32 = all_nodes.iter().map(|n| n.capacity.vcores).sum();
let avg_uptime: f64 = all_nodes.iter().map(|n| n.uptime as f64).sum::<f64>() / all_nodes.len() as f64;
println!("Total Capacity:");
println!(" Storage: {:.1} GB", total_storage_gb);
println!(" Memory: {:.1} GB", total_memory_gb);
println!(" GPU Memory: {:.1} GB", total_gpu_memory_gb);
println!(" vCores: {}", total_vcores);
println!(" Average Uptime: {:.1}%", avg_uptime);
// Count nodes by country
let mut country_counts = std::collections::HashMap::new();
for node in &all_nodes {
*country_counts.entry(&node.country).or_insert(0) += 1;
}
println!("\nNodes by Country:");
for (country, count) in country_counts {
println!(" {}: {}", country, count);
}
// Count total slices
let total_compute_slices: usize = all_nodes.iter().map(|n| n.computeslices.len()).sum();
let total_storage_slices: usize = all_nodes.iter().map(|n| n.storageslices.len()).sum();
println!("\nTotal Slices:");
println!(" Compute Slices: {}", total_compute_slices);
println!(" Storage Slices: {}", total_storage_slices);
// Vendor distribution
let mut vendor_counts = std::collections::HashMap::new();
for node in &all_nodes {
*vendor_counts.entry(&node.devices.vendor).or_insert(0) += 1;
}
println!("\nNodes by Vendor:");
for (vendor, count) in vendor_counts {
println!(" {}: {}", vendor, count);
}
println!("\n--- Model Information ---");
println!("Node DB Prefix: {}", Node::db_prefix());
}

View File

@@ -1,284 +0,0 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{NodeGroup, PricingPolicy, SLAPolicy};
use heromodels_core::Model;
// Helper function to print nodegroup details
fn print_nodegroup_details(nodegroup: &NodeGroup) {
println!("\n--- NodeGroup Details ---");
println!("ID: {}", nodegroup.get_id());
println!("Farmer ID: {}", nodegroup.farmerid);
println!("Description: {}", nodegroup.description);
println!("Secret: {}", nodegroup.secret);
println!("Compute Slice Pricing (CC): {:.4}", nodegroup.compute_slice_normalized_pricing_cc);
println!("Storage Slice Pricing (CC): {:.4}", nodegroup.storage_slice_normalized_pricing_cc);
println!("Signature Farmer: {}", nodegroup.signature_farmer);
println!("Created At: {}", nodegroup.base_data.created_at);
println!("Modified At: {}", nodegroup.base_data.modified_at);
// Print SLA Policy details
println!(" SLA Policy:");
println!(" Uptime: {}%", nodegroup.slapolicy.sla_uptime);
println!(" Bandwidth: {} Mbit/s", nodegroup.slapolicy.sla_bandwidth_mbit);
println!(" Penalty: {}%", nodegroup.slapolicy.sla_penalty);
// Print Pricing Policy details
println!(" Pricing Policy:");
println!(" Marketplace Year Discounts: {:?}%", nodegroup.pricingpolicy.marketplace_year_discounts);
}
fn main() {
// Create a new DB instance in /tmp/grid4_nodegroups_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodegroups_db", true).expect("Can create DB");
println!("Grid4 NodeGroup Models - Basic Usage Example");
println!("===========================================");
// Create SLA policies
let sla_policy_premium = SLAPolicy {
sla_uptime: 99,
sla_bandwidth_mbit: 1000,
sla_penalty: 200,
};
let sla_policy_standard = SLAPolicy {
sla_uptime: 95,
sla_bandwidth_mbit: 100,
sla_penalty: 100,
};
let sla_policy_basic = SLAPolicy {
sla_uptime: 90,
sla_bandwidth_mbit: 50,
sla_penalty: 50,
};
// Create pricing policies
let pricing_policy_aggressive = PricingPolicy {
marketplace_year_discounts: vec![40, 50, 60],
};
let pricing_policy_standard = PricingPolicy {
marketplace_year_discounts: vec![30, 40, 50],
};
let pricing_policy_conservative = PricingPolicy {
marketplace_year_discounts: vec![20, 30, 40],
};
// Create nodegroups with different configurations
// NodeGroup 1 - Premium hosting provider
let nodegroup1 = NodeGroup::new()
.farmerid(501)
.secret("encrypted_boot_secret_premium_abc123".to_string())
.description("Premium hosting with 99% uptime SLA and high-speed connectivity".to_string())
.slapolicy(sla_policy_premium.clone())
.pricingpolicy(pricing_policy_aggressive.clone())
.compute_slice_normalized_pricing_cc(0.0450)
.storage_slice_normalized_pricing_cc(0.0180)
.signature_farmer("farmer_501_premium_signature_xyz789".to_string());
// NodeGroup 2 - Standard business provider
let nodegroup2 = NodeGroup::new()
.farmerid(502)
.secret("encrypted_boot_secret_standard_def456".to_string())
.description("Standard business hosting with reliable performance".to_string())
.slapolicy(sla_policy_standard.clone())
.pricingpolicy(pricing_policy_standard.clone())
.compute_slice_normalized_pricing_cc(0.0350)
.storage_slice_normalized_pricing_cc(0.0150)
.signature_farmer("farmer_502_standard_signature_uvw012".to_string());
// NodeGroup 3 - Budget-friendly provider
let nodegroup3 = NodeGroup::new()
.farmerid(503)
.secret("encrypted_boot_secret_budget_ghi789".to_string())
.description("Cost-effective hosting for development and testing".to_string())
.slapolicy(sla_policy_basic.clone())
.pricingpolicy(pricing_policy_conservative.clone())
.compute_slice_normalized_pricing_cc(0.0250)
.storage_slice_normalized_pricing_cc(0.0120)
.signature_farmer("farmer_503_budget_signature_rst345".to_string());
// NodeGroup 4 - Enterprise provider
let nodegroup4 = NodeGroup::new()
.farmerid(504)
.secret("encrypted_boot_secret_enterprise_jkl012".to_string())
.description("Enterprise-grade infrastructure with maximum reliability".to_string())
.slapolicy(sla_policy_premium.clone())
.pricingpolicy(pricing_policy_standard.clone())
.compute_slice_normalized_pricing_cc(0.0500)
.storage_slice_normalized_pricing_cc(0.0200)
.signature_farmer("farmer_504_enterprise_signature_mno678".to_string());
// Save all nodegroups to database and get their assigned IDs and updated models
let (nodegroup1_id, db_nodegroup1) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup1)
.expect("can set nodegroup");
let (nodegroup2_id, db_nodegroup2) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup2)
.expect("can set nodegroup");
let (nodegroup3_id, db_nodegroup3) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup3)
.expect("can set nodegroup");
let (nodegroup4_id, db_nodegroup4) = db
.collection()
.expect("can open nodegroup collection")
.set(&nodegroup4)
.expect("can set nodegroup");
println!("NodeGroup 1 assigned ID: {}", nodegroup1_id);
println!("NodeGroup 2 assigned ID: {}", nodegroup2_id);
println!("NodeGroup 3 assigned ID: {}", nodegroup3_id);
println!("NodeGroup 4 assigned ID: {}", nodegroup4_id);
// Print all nodegroups retrieved from database
println!("\n--- NodeGroups Retrieved from Database ---");
println!("\n1. Premium hosting provider:");
print_nodegroup_details(&db_nodegroup1);
println!("\n2. Standard business provider:");
print_nodegroup_details(&db_nodegroup2);
println!("\n3. Budget-friendly provider:");
print_nodegroup_details(&db_nodegroup3);
println!("\n4. Enterprise provider:");
print_nodegroup_details(&db_nodegroup4);
// Demonstrate different ways to retrieve nodegroups from the database
// 1. Retrieve by farmer ID index
println!("\n--- Retrieving NodeGroups by Different Methods ---");
println!("\n1. By Farmer ID Index (Farmer 502):");
let farmer_nodegroups = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.get_by_index("farmerid", &502u32)
.expect("can load nodegroups by farmer");
assert_eq!(farmer_nodegroups.len(), 1);
print_nodegroup_details(&farmer_nodegroups[0]);
// 2. Update nodegroup pricing
println!("\n2. Updating NodeGroup Pricing:");
let mut updated_nodegroup = db_nodegroup3.clone();
updated_nodegroup.compute_slice_normalized_pricing_cc = 0.0280;
updated_nodegroup.storage_slice_normalized_pricing_cc = 0.0130;
let (_, price_updated_nodegroup) = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.set(&updated_nodegroup)
.expect("can update nodegroup");
println!("Updated pricing for budget provider:");
println!(" Compute: {:.4} CC", price_updated_nodegroup.compute_slice_normalized_pricing_cc);
println!(" Storage: {:.4} CC", price_updated_nodegroup.storage_slice_normalized_pricing_cc);
// 3. Update SLA policy
println!("\n3. Updating SLA Policy:");
let mut sla_updated_nodegroup = db_nodegroup2.clone();
sla_updated_nodegroup.slapolicy.sla_uptime = 98;
sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit = 500;
let (_, sla_updated_nodegroup) = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.set(&sla_updated_nodegroup)
.expect("can update nodegroup");
println!("Updated SLA policy for standard provider:");
println!(" Uptime: {}%", sla_updated_nodegroup.slapolicy.sla_uptime);
println!(" Bandwidth: {} Mbit/s", sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit);
// Show all nodegroups and calculate analytics
let all_nodegroups = db
.collection::<NodeGroup>()
.expect("can open nodegroup collection")
.get_all()
.expect("can load all nodegroups");
println!("\n--- NodeGroup Analytics ---");
println!("Total NodeGroups: {}", all_nodegroups.len());
// Calculate pricing statistics
let avg_compute_price: f64 = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_storage_price: f64 = all_nodegroups.iter()
.map(|ng| ng.storage_slice_normalized_pricing_cc)
.sum::<f64>() / all_nodegroups.len() as f64;
let min_compute_price = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.fold(f64::INFINITY, f64::min);
let max_compute_price = all_nodegroups.iter()
.map(|ng| ng.compute_slice_normalized_pricing_cc)
.fold(f64::NEG_INFINITY, f64::max);
println!("Pricing Statistics:");
println!(" Average Compute Price: {:.4} CC", avg_compute_price);
println!(" Average Storage Price: {:.4} CC", avg_storage_price);
println!(" Compute Price Range: {:.4} - {:.4} CC", min_compute_price, max_compute_price);
// Calculate SLA statistics
let avg_uptime: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_uptime as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_bandwidth: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_bandwidth_mbit as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
let avg_penalty: f64 = all_nodegroups.iter()
.map(|ng| ng.slapolicy.sla_penalty as f64)
.sum::<f64>() / all_nodegroups.len() as f64;
println!("\nSLA Statistics:");
println!(" Average Uptime Guarantee: {:.1}%", avg_uptime);
println!(" Average Bandwidth Guarantee: {:.0} Mbit/s", avg_bandwidth);
println!(" Average Penalty Rate: {:.0}%", avg_penalty);
// Count farmers
let unique_farmers: std::collections::HashSet<_> = all_nodegroups.iter()
.map(|ng| ng.farmerid)
.collect();
println!("\nFarmer Statistics:");
println!(" Unique Farmers: {}", unique_farmers.len());
println!(" NodeGroups per Farmer: {:.1}", all_nodegroups.len() as f64 / unique_farmers.len() as f64);
// Analyze discount policies
let total_discount_tiers: usize = all_nodegroups.iter()
.map(|ng| ng.pricingpolicy.marketplace_year_discounts.len())
.sum();
let avg_discount_tiers: f64 = total_discount_tiers as f64 / all_nodegroups.len() as f64;
println!("\nDiscount Policy Statistics:");
println!(" Average Discount Tiers: {:.1}", avg_discount_tiers);
// Find best value providers (high SLA, low price)
println!("\n--- Provider Rankings ---");
let mut providers_with_scores: Vec<_> = all_nodegroups.iter()
.map(|ng| {
let value_score = (ng.slapolicy.sla_uptime as f64) / ng.compute_slice_normalized_pricing_cc;
(ng, value_score)
})
.collect();
providers_with_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
println!("Best Value Providers (Uptime/Price ratio):");
for (i, (ng, score)) in providers_with_scores.iter().enumerate() {
println!(" {}. Farmer {}: {:.0} ({}% uptime, {:.4} CC)",
i + 1, ng.farmerid, score, ng.slapolicy.sla_uptime, ng.compute_slice_normalized_pricing_cc);
}
println!("\n--- Model Information ---");
println!("NodeGroup DB Prefix: {}", NodeGroup::db_prefix());
}

View File

@@ -1,311 +0,0 @@
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::{NodeGroupReputation, NodeReputation};
use heromodels_core::Model;
// Helper function to print nodegroup reputation details
fn print_nodegroup_reputation_details(reputation: &NodeGroupReputation) {
println!("\n--- NodeGroup Reputation Details ---");
println!("ID: {}", reputation.get_id());
println!("NodeGroup ID: {}", reputation.nodegroup_id);
println!("Reputation Score: {}/100", reputation.reputation);
println!("Uptime: {}%", reputation.uptime);
println!("Node Count: {}", reputation.nodes.len());
println!("Created At: {}", reputation.base_data.created_at);
println!("Modified At: {}", reputation.base_data.modified_at);
// Print individual node reputations
if !reputation.nodes.is_empty() {
println!(" Individual Node Reputations:");
for (i, node_rep) in reputation.nodes.iter().enumerate() {
println!(" {}. Node {}: Reputation {}/100, Uptime {}%",
i + 1, node_rep.node_id, node_rep.reputation, node_rep.uptime);
}
// Calculate average node reputation and uptime
let avg_node_reputation: f64 = reputation.nodes.iter()
.map(|n| n.reputation as f64)
.sum::<f64>() / reputation.nodes.len() as f64;
let avg_node_uptime: f64 = reputation.nodes.iter()
.map(|n| n.uptime as f64)
.sum::<f64>() / reputation.nodes.len() as f64;
println!(" Average Node Reputation: {:.1}/100", avg_node_reputation);
println!(" Average Node Uptime: {:.1}%", avg_node_uptime);
}
}
fn main() {
// Create a new DB instance in /tmp/grid4_reputation_db, and reset before every run
let db = heromodels::db::hero::OurDB::new("/tmp/grid4_reputation_db", true).expect("Can create DB");
println!("Grid4 Reputation Models - Basic Usage Example");
println!("============================================");
// Create individual node reputations
// High-performing nodes
let node_rep1 = NodeReputation::new()
.node_id(1001)
.reputation(85)
.uptime(99);
let node_rep2 = NodeReputation::new()
.node_id(1002)
.reputation(92)
.uptime(98);
let node_rep3 = NodeReputation::new()
.node_id(1003)
.reputation(78)
.uptime(97);
// Medium-performing nodes
let node_rep4 = NodeReputation::new()
.node_id(2001)
.reputation(65)
.uptime(94);
let node_rep5 = NodeReputation::new()
.node_id(2002)
.reputation(72)
.uptime(96);
// Lower-performing nodes
let node_rep6 = NodeReputation::new()
.node_id(3001)
.reputation(45)
.uptime(88);
let node_rep7 = NodeReputation::new()
.node_id(3002)
.reputation(38)
.uptime(85);
// New nodes with default reputation
let node_rep8 = NodeReputation::new()
.node_id(4001)
.reputation(50) // default
.uptime(0); // just started
let node_rep9 = NodeReputation::new()
.node_id(4002)
.reputation(50) // default
.uptime(0); // just started
// Create nodegroup reputations with different performance profiles
// NodeGroup 1 - High-performance provider
let nodegroup_rep1 = NodeGroupReputation::new()
.nodegroup_id(1001)
.reputation(85) // high reputation earned over time
.uptime(98) // excellent uptime
.add_node_reputation(node_rep1.clone())
.add_node_reputation(node_rep2.clone())
.add_node_reputation(node_rep3.clone());
// NodeGroup 2 - Medium-performance provider
let nodegroup_rep2 = NodeGroupReputation::new()
.nodegroup_id(1002)
.reputation(68) // decent reputation
.uptime(95) // good uptime
.add_node_reputation(node_rep4.clone())
.add_node_reputation(node_rep5.clone());
// NodeGroup 3 - Struggling provider
let nodegroup_rep3 = NodeGroupReputation::new()
.nodegroup_id(1003)
.reputation(42) // below average reputation
.uptime(87) // poor uptime
.add_node_reputation(node_rep6.clone())
.add_node_reputation(node_rep7.clone());
// NodeGroup 4 - New provider (default reputation)
let nodegroup_rep4 = NodeGroupReputation::new()
.nodegroup_id(1004)
.reputation(50) // default starting reputation
.uptime(0) // no history yet
.add_node_reputation(node_rep8.clone())
.add_node_reputation(node_rep9.clone());
// Save all nodegroup reputations to database and get their assigned IDs and updated models
let (rep1_id, db_rep1) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep1)
.expect("can set reputation");
let (rep2_id, db_rep2) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep2)
.expect("can set reputation");
let (rep3_id, db_rep3) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep3)
.expect("can set reputation");
let (rep4_id, db_rep4) = db
.collection()
.expect("can open reputation collection")
.set(&nodegroup_rep4)
.expect("can set reputation");
println!("NodeGroup Reputation 1 assigned ID: {}", rep1_id);
println!("NodeGroup Reputation 2 assigned ID: {}", rep2_id);
println!("NodeGroup Reputation 3 assigned ID: {}", rep3_id);
println!("NodeGroup Reputation 4 assigned ID: {}", rep4_id);
// Print all reputation records retrieved from database
println!("\n--- Reputation Records Retrieved from Database ---");
println!("\n1. High-performance provider:");
print_nodegroup_reputation_details(&db_rep1);
println!("\n2. Medium-performance provider:");
print_nodegroup_reputation_details(&db_rep2);
println!("\n3. Struggling provider:");
print_nodegroup_reputation_details(&db_rep3);
println!("\n4. New provider:");
print_nodegroup_reputation_details(&db_rep4);
// Demonstrate different ways to retrieve reputation records from the database
// 1. Retrieve by nodegroup ID index
println!("\n--- Retrieving Reputation by Different Methods ---");
println!("\n1. By NodeGroup ID Index (NodeGroup 1002):");
let nodegroup_reps = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.get_by_index("nodegroup_id", &1002u32)
.expect("can load reputation by nodegroup");
assert_eq!(nodegroup_reps.len(), 1);
print_nodegroup_reputation_details(&nodegroup_reps[0]);
// 2. Update reputation scores (simulate performance improvement)
println!("\n2. Updating Reputation Scores (Performance Improvement):");
let mut improved_rep = db_rep3.clone();
improved_rep.reputation = 55; // improved from 42
improved_rep.uptime = 92; // improved from 87
// Also improve individual node reputations
for node_rep in &mut improved_rep.nodes {
node_rep.reputation += 10; // boost each node's reputation
node_rep.uptime += 5; // improve uptime
}
let (_, updated_rep) = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.set(&improved_rep)
.expect("can update reputation");
println!("Updated reputation for struggling provider:");
print_nodegroup_reputation_details(&updated_rep);
// 3. Add new node to existing nodegroup reputation
println!("\n3. Adding New Node to Existing NodeGroup:");
let new_node_rep = NodeReputation::new()
.node_id(1004)
.reputation(88)
.uptime(99);
let mut expanded_rep = db_rep1.clone();
expanded_rep.add_node_reputation(new_node_rep);
// Recalculate nodegroup reputation based on node average
let total_node_rep: i32 = expanded_rep.nodes.iter().map(|n| n.reputation).sum();
expanded_rep.reputation = total_node_rep / expanded_rep.nodes.len() as i32;
let (_, expanded_rep) = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.set(&expanded_rep)
.expect("can update reputation");
println!("Added new high-performing node to top provider:");
print_nodegroup_reputation_details(&expanded_rep);
// Show all reputation records and calculate analytics
let all_reps = db
.collection::<NodeGroupReputation>()
.expect("can open reputation collection")
.get_all()
.expect("can load all reputations");
println!("\n--- Reputation Analytics ---");
println!("Total NodeGroup Reputations: {}", all_reps.len());
// Calculate overall statistics
let avg_nodegroup_reputation: f64 = all_reps.iter()
.map(|r| r.reputation as f64)
.sum::<f64>() / all_reps.len() as f64;
let avg_nodegroup_uptime: f64 = all_reps.iter()
.filter(|r| r.uptime > 0) // exclude new providers with 0 uptime
.map(|r| r.uptime as f64)
.sum::<f64>() / all_reps.iter().filter(|r| r.uptime > 0).count() as f64;
println!("Overall Statistics:");
println!(" Average NodeGroup Reputation: {:.1}/100", avg_nodegroup_reputation);
println!(" Average NodeGroup Uptime: {:.1}%", avg_nodegroup_uptime);
// Count reputation tiers
let excellent_reps = all_reps.iter().filter(|r| r.reputation >= 80).count();
let good_reps = all_reps.iter().filter(|r| r.reputation >= 60 && r.reputation < 80).count();
let average_reps = all_reps.iter().filter(|r| r.reputation >= 40 && r.reputation < 60).count();
let poor_reps = all_reps.iter().filter(|r| r.reputation < 40).count();
println!("\nReputation Distribution:");
println!(" Excellent (80-100): {}", excellent_reps);
println!(" Good (60-79): {}", good_reps);
println!(" Average (40-59): {}", average_reps);
println!(" Poor (0-39): {}", poor_reps);
// Calculate total nodes and their statistics
let total_nodes: usize = all_reps.iter().map(|r| r.nodes.len()).sum();
let all_node_reps: Vec<i32> = all_reps.iter()
.flat_map(|r| &r.nodes)
.map(|n| n.reputation)
.collect();
let all_node_uptimes: Vec<i32> = all_reps.iter()
.flat_map(|r| &r.nodes)
.filter(|n| n.uptime > 0)
.map(|n| n.uptime)
.collect();
let avg_node_reputation: f64 = all_node_reps.iter().sum::<i32>() as f64 / all_node_reps.len() as f64;
let avg_node_uptime: f64 = all_node_uptimes.iter().sum::<i32>() as f64 / all_node_uptimes.len() as f64;
println!("\nNode-Level Statistics:");
println!(" Total Nodes: {}", total_nodes);
println!(" Average Node Reputation: {:.1}/100", avg_node_reputation);
println!(" Average Node Uptime: {:.1}%", avg_node_uptime);
// Find best and worst performing nodegroups
let best_nodegroup = all_reps.iter().max_by_key(|r| r.reputation).unwrap();
let worst_nodegroup = all_reps.iter().min_by_key(|r| r.reputation).unwrap();
println!("\nPerformance Leaders:");
println!(" Best NodeGroup: {} (Reputation: {}, Uptime: {}%)",
best_nodegroup.nodegroup_id, best_nodegroup.reputation, best_nodegroup.uptime);
println!(" Worst NodeGroup: {} (Reputation: {}, Uptime: {}%)",
worst_nodegroup.nodegroup_id, worst_nodegroup.reputation, worst_nodegroup.uptime);
// Rank nodegroups by reputation
let mut ranked_nodegroups: Vec<_> = all_reps.iter().collect();
ranked_nodegroups.sort_by(|a, b| b.reputation.cmp(&a.reputation));
println!("\nNodeGroup Rankings (by Reputation):");
for (i, rep) in ranked_nodegroups.iter().enumerate() {
let status = match rep.reputation {
80..=100 => "Excellent",
60..=79 => "Good",
40..=59 => "Average",
_ => "Poor",
};
println!(" {}. NodeGroup {}: {} ({}/100, {}% uptime)",
i + 1, rep.nodegroup_id, status, rep.reputation, rep.uptime);
}
println!("\n--- Model Information ---");
println!("NodeGroupReputation DB Prefix: {}", NodeGroupReputation::db_prefix());
}

View File

@@ -1,6 +1,6 @@
use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules;
use heromodels_core::db::hero::OurDB;
use rhai::{Dynamic, Engine};
use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules;
use std::sync::Arc;
use std::{fs, path::Path};

View File

@@ -1,15 +0,0 @@
# Heroledger Postgres Example
This example demonstrates how to use the Heroledger `User` model against Postgres using the `heromodels::db::postgres` backend.
- Connects to Postgres with user `postgres` and password `test123` on `localhost:5432`.
- Creates the table and indexes automatically on first use.
- Shows basic CRUD and an index lookup on `username`.
Run it:
```bash
cargo run -p heromodels --example heroledger_example
```
Make sure Postgres is running locally and accessible with the credentials above.

View File

@@ -1,54 +0,0 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::heroledger::user::user_index::username;
use heromodels::models::heroledger::user::{SecretBox, User};
fn main() {
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("Can connect to Postgres");
println!("Heroledger User - Postgres Example");
println!("==================================");
let users = db.collection::<User>().expect("open user collection");
// Clean
if let Ok(existing) = users.get_all() {
for u in existing {
let _ = users.delete_by_id(u.get_id());
}
}
let sb = SecretBox::new().data(vec![1, 2, 3]).nonce(vec![9, 9, 9]).build();
let u = User::new(0)
.username("alice")
.pubkey("PUBKEY_A")
.add_email("alice@example.com")
.add_userprofile(sb)
.build();
let (id, stored) = users.set(&u).expect("store user");
println!("Stored user id={id} username={} pubkey={}", stored.username, stored.pubkey);
let by_idx = users.get::<username, _>("alice").expect("by username");
println!("Found {} user(s) with username=alice", by_idx.len());
let fetched = users.get_by_id(id).expect("get by id").expect("exists");
println!("Fetched by id={} username={} emails={:?}", id, fetched.username, fetched.email);
// Update
let updated = fetched.clone().add_email("work@alice.example");
let (_, back) = users.set(&updated).expect("update user");
println!("Updated emails = {:?}", back.email);
// Delete
users.delete_by_id(id).expect("delete user");
println!("Deleted user id={id}");
}

View File

@@ -1,11 +1,8 @@
use heromodels::db::postgres::Config;
use heromodels::db::{Collection, Db};
use heromodels::models::userexample::user::user_index::{email, username};
use heromodels::models::userexample::user::user_index::{is_active, username};
use heromodels::models::{Comment, User};
use heromodels_core::Model;
// For demonstrating embedded/nested indexes
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels::models::grid4::node::node_index::{country as node_country, pubkey as node_pubkey};
// Helper function to print user details
fn print_user_details(user: &User) {
@@ -40,31 +37,14 @@ fn main() {
)
.expect("Can connect to postgress");
// Unique suffix to avoid collisions with legacy rows from prior runs
use std::time::{SystemTime, UNIX_EPOCH};
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let user1_name = format!("johndoe_{}", ts);
let user2_name = format!("janesmith_{}", ts);
let user3_name = format!("willism_{}", ts);
let user4_name = format!("carrols_{}", ts);
let user1_email = format!("john.doe+{}@example.com", ts);
let user2_email = format!("jane.smith+{}@example.com", ts);
let user3_email = format!("willis.masters+{}@example.com", ts);
let user4_email = format!("carrol.smith+{}@example.com", ts);
println!("Hero Models - Basic Usage Example");
println!("================================");
// Clean up any existing data to ensure consistent results
println!("Cleaning up existing data...");
let user_collection = db.collection::<User>().expect("can open user collection");
let comment_collection = db
.collection::<Comment>()
.expect("can open comment collection");
let comment_collection = db.collection::<Comment>().expect("can open comment collection");
// Clear all existing users and comments
if let Ok(existing_users) = user_collection.get_all() {
for user in existing_users {
@@ -82,32 +62,32 @@ fn main() {
// User 1
let user1 = User::new()
.username(&user1_name)
.email(&user1_email)
.username("johndoe")
.email("john.doe@example.com")
.full_name("John Doe")
.is_active(false)
.build();
// User 2
let user2 = User::new()
.username(&user2_name)
.email(&user2_email)
.username("janesmith")
.email("jane.smith@example.com")
.full_name("Jane Smith")
.is_active(true)
.build();
// User 3
let user3 = User::new()
.username(&user3_name)
.email(&user3_email)
.username("willism")
.email("willis.masters@example.com")
.full_name("Willis Masters")
.is_active(true)
.build();
// User 4
let user4 = User::new()
.username(&user4_name)
.email(&user4_email)
.username("carrols")
.email("carrol.smith@example.com")
.full_name("Carrol Smith")
.is_active(false)
.build();
@@ -163,95 +143,66 @@ fn main() {
let stored_users = db
.collection::<User>()
.expect("can open user collection")
.get::<username, _>(&user1_name)
.get::<username, _>("johndoe")
.expect("can load stored user");
assert_eq!(stored_users.len(), 1);
print_user_details(&stored_users[0]);
// 2. Retrieve by email index
println!("\n2. By Email Index:");
let by_email = db
// 2. Retrieve by active status
println!("\n2. By Active Status (Active = true):");
let active_users = db
.collection::<User>()
.expect("can open user collection")
.get::<email, _>(&user2_email)
.expect("can load stored user by email");
assert_eq!(by_email.len(), 1);
print_user_details(&by_email[0]);
.get::<is_active, _>(&true)
.expect("can load stored users");
assert_eq!(active_users.len(), 2);
for active_user in active_users.iter() {
print_user_details(active_user);
}
// 3. Delete a user and show the updated results
println!("\n3. After Deleting a User:");
let user_to_delete_id = stored_users[0].get_id();
let user_to_delete_id = active_users[0].get_id();
println!("Deleting user with ID: {user_to_delete_id}");
db.collection::<User>()
.expect("can open user collection")
.delete_by_id(user_to_delete_id)
.expect("can delete existing user");
// Verify deletion by querying the same username again
let should_be_empty = db
// Show remaining active users
let active_users = db
.collection::<User>()
.expect("can open user collection")
.get::<username, _>(&user1_name)
.expect("can query by username after delete");
println!(" a. Query by username '{}' after delete -> {} results", user1_name, should_be_empty.len());
assert_eq!(should_be_empty.len(), 0);
.get::<is_active, _>(&true)
.expect("can load stored users");
println!(" a. Remaining Active Users:");
assert_eq!(active_users.len(), 1);
for active_user in active_users.iter() {
print_user_details(active_user);
}
// Show inactive users
let inactive_users = db
.collection::<User>()
.expect("can open user collection")
.get::<is_active, _>(&false)
.expect("can load stored users");
println!(" b. Inactive Users:");
assert_eq!(inactive_users.len(), 2);
for inactive_user in inactive_users.iter() {
print_user_details(inactive_user);
}
// Delete a user based on an index for good measure
db.collection::<User>()
.expect("can open user collection")
.delete::<username, _>(&user4_name)
.delete::<username, _>("janesmith")
.expect("can delete existing user");
// Demonstrate embedded/nested indexes with Grid4 Node
println!("\n--- Demonstrating Embedded/Nested Indexes (Grid4::Node) ---");
println!("Node indexed fields: {:?}", Node::indexed_fields());
// Build a minimal node with nested data and persist it
let cs = ComputeSlice::new()
.nodeid(42)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(6000)
.vcores(16)
.gpus(1)
.price_cc(0.33);
let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() };
let node = Node::new()
.nodegroupid(101)
.uptime(99)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("EX_NODE_PK_1")
.build();
let (node_id, _stored_node) = db
.collection::<Node>()
.expect("can open node collection")
.set(&node)
.expect("can set node");
println!("Stored node id: {}", node_id);
// Query by top-level indexes
let be_nodes = db
.collection::<Node>()
.expect("can open node collection")
.get::<node_country, _>("BE")
.expect("can query nodes by country");
println!("Nodes in BE (count may include legacy rows): {}", be_nodes.len());
let by_pk = db
.collection::<Node>()
.expect("can open node collection")
.get::<node_pubkey, _>("EX_NODE_PK_1")
.expect("can query node by pubkey");
assert!(by_pk.iter().any(|n| n.get_id() == node_id));
// Note: Nested path indexes (e.g., devices.vendor, computeslices.passmark) are created and used
// for DB-side indexing, but are not yet exposed as typed Index keys in the API. They appear in
// Node::indexed_fields() and contribute to Model::db_keys(), enabling performant JSONB GIN indexes.
println!("\n--- User Model Information ---");
println!("User DB Prefix: {}", User::db_prefix());
@@ -261,7 +212,7 @@ fn main() {
// 1. Create and save a comment
println!("\n1. Creating a Comment:");
let comment = Comment::new()
.user_id(db_user2.get_id()) // commenter's user ID (use an existing user)
.user_id(db_user1.get_id()) // commenter's user ID
.content("This is a comment on the user")
.build();
@@ -279,7 +230,7 @@ fn main() {
// 3. Associate the comment with a user
println!("\n2. Associating Comment with User:");
let mut updated_user = db_user2.clone();
let mut updated_user = db_user1.clone();
updated_user.base_data.add_comment(db_comment.get_id());
// Save the updated user and get the new version

View File

@@ -8,8 +8,8 @@ use std::{
collections::HashSet,
path::PathBuf,
sync::{
Arc, Mutex,
atomic::{AtomicU32, Ordering},
Arc, Mutex,
},
};

View File

@@ -119,4 +119,4 @@ impl Circle {
/// Creates a new circle builder
pub fn new_circle() -> Circle {
Circle::new()
}
}

View File

@@ -1,17 +1,16 @@
use crate::db::Db;
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn, register_authorized_get_by_id_fn,
};
use rhai::plugin::*;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
use rhailib_macros::{
register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn,
register_authorized_get_by_id_fn,
};
use std::collections::HashMap;
use std::sync::Arc;
use crate::models::circle::Circle;
type RhaiCircle = Circle;
use crate::db::Collection;
use crate::db::hero::OurDB;
use crate::db::Collection;
use crate::models::circle::ThemeData;
#[export_module]

View File

@@ -1,128 +0,0 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Bid status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum BidStatus {
#[default]
Pending,
Confirmed,
Assigned,
Cancelled,
Done,
}
/// Billing period enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum BillingPeriod {
#[default]
Hourly,
Monthly,
Yearly,
Biannually,
Triannually,
}
/// I can bid for infra, and optionally get accepted
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Bid {
pub base_data: BaseModelData,
/// links back to customer for this capacity (user on ledger)
#[index]
pub customer_id: u32,
/// nr of slices I need in 1 machine
pub compute_slices_nr: i32,
/// price per 1 GB slice I want to accept
pub compute_slice_price: f64,
/// nr of storage slices needed
pub storage_slices_nr: i32,
/// price per 1 GB storage slice I want to accept
pub storage_slice_price: f64,
pub status: BidStatus,
/// if obligation then will be charged and money needs to be in escrow, otherwise its an intent
pub obligation: bool,
/// epoch timestamp
pub start_date: u32,
/// epoch timestamp
pub end_date: u32,
/// signature as done by a user/consumer to validate their identity and intent
pub signature_user: String,
pub billing_period: BillingPeriod,
}
impl Bid {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices_nr: 0,
compute_slice_price: 0.0,
storage_slices_nr: 0,
storage_slice_price: 0.0,
status: BidStatus::default(),
obligation: false,
start_date: 0,
end_date: 0,
signature_user: String::new(),
billing_period: BillingPeriod::default(),
}
}
pub fn customer_id(mut self, v: u32) -> Self {
self.customer_id = v;
self
}
pub fn compute_slices_nr(mut self, v: i32) -> Self {
self.compute_slices_nr = v;
self
}
pub fn compute_slice_price(mut self, v: f64) -> Self {
self.compute_slice_price = v;
self
}
pub fn storage_slices_nr(mut self, v: i32) -> Self {
self.storage_slices_nr = v;
self
}
pub fn storage_slice_price(mut self, v: f64) -> Self {
self.storage_slice_price = v;
self
}
pub fn status(mut self, v: BidStatus) -> Self {
self.status = v;
self
}
pub fn obligation(mut self, v: bool) -> Self {
self.obligation = v;
self
}
pub fn start_date(mut self, v: u32) -> Self {
self.start_date = v;
self
}
pub fn end_date(mut self, v: u32) -> Self {
self.end_date = v;
self
}
pub fn signature_user(mut self, v: impl ToString) -> Self {
self.signature_user = v.to_string();
self
}
pub fn billing_period(mut self, v: BillingPeriod) -> Self {
self.billing_period = v;
self
}
}

View File

@@ -1,39 +0,0 @@
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// SLA policy matching the V spec `SLAPolicy`
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct SLAPolicy {
/// should +90
pub sla_uptime: i32,
/// minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee
pub sla_bandwidth_mbit: i32,
/// 0-100, percent of money given back in relation to month if sla breached,
/// e.g. 200 means we return 2 months worth of rev if sla missed
pub sla_penalty: i32,
}
impl SLAPolicy {
pub fn new() -> Self { Self::default() }
pub fn sla_uptime(mut self, v: i32) -> Self { self.sla_uptime = v; self }
pub fn sla_bandwidth_mbit(mut self, v: i32) -> Self { self.sla_bandwidth_mbit = v; self }
pub fn sla_penalty(mut self, v: i32) -> Self { self.sla_penalty = v; self }
pub fn build(self) -> Self { self }
}
/// Pricing policy matching the V spec `PricingPolicy`
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct PricingPolicy {
/// e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization
/// then this provider gives 30%, 2Y 40%, ...
pub marketplace_year_discounts: Vec<i32>,
/// e.g. 10,20,30
pub volume_discounts: Vec<i32>,
}
impl PricingPolicy {
pub fn new() -> Self { Self { marketplace_year_discounts: vec![30, 40, 50], volume_discounts: vec![10, 20, 30] } }
pub fn marketplace_year_discounts(mut self, v: Vec<i32>) -> Self { self.marketplace_year_discounts = v; self }
pub fn volume_discounts(mut self, v: Vec<i32>) -> Self { self.volume_discounts = v; self }
pub fn build(self) -> Self { self }
}

View File

@@ -1,219 +0,0 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
use super::bid::BillingPeriod;
/// Contract status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum ContractStatus {
#[default]
Active,
Cancelled,
Error,
Paused,
}
/// Compute slice provisioned for a contract
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct ComputeSliceProvisioned {
pub node_id: u32,
/// the id of the slice in the node
pub id: u16,
pub mem_gb: f64,
pub storage_gb: f64,
pub passmark: i32,
pub vcores: i32,
pub cpu_oversubscription: i32,
pub tags: String,
}
/// Storage slice provisioned for a contract
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageSliceProvisioned {
pub node_id: u32,
/// the id of the slice in the node, are tracked in the node itself
pub id: u16,
pub storage_size_gb: i32,
pub tags: String,
}
/// Contract for provisioned infrastructure
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Contract {
pub base_data: BaseModelData,
/// links back to customer for this capacity (user on ledger)
#[index]
pub customer_id: u32,
pub compute_slices: Vec<ComputeSliceProvisioned>,
pub storage_slices: Vec<StorageSliceProvisioned>,
/// price per 1 GB agreed upon
pub compute_slice_price: f64,
/// price per 1 GB agreed upon
pub storage_slice_price: f64,
/// price per 1 GB agreed upon (transfer)
pub network_slice_price: f64,
pub status: ContractStatus,
/// epoch timestamp
pub start_date: u32,
/// epoch timestamp
pub end_date: u32,
/// signature as done by a user/consumer to validate their identity and intent
pub signature_user: String,
/// signature as done by the hoster
pub signature_hoster: String,
pub billing_period: BillingPeriod,
}
impl Contract {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices: Vec::new(),
storage_slices: Vec::new(),
compute_slice_price: 0.0,
storage_slice_price: 0.0,
network_slice_price: 0.0,
status: ContractStatus::default(),
start_date: 0,
end_date: 0,
signature_user: String::new(),
signature_hoster: String::new(),
billing_period: BillingPeriod::default(),
}
}
pub fn customer_id(mut self, v: u32) -> Self {
self.customer_id = v;
self
}
pub fn add_compute_slice(mut self, slice: ComputeSliceProvisioned) -> Self {
self.compute_slices.push(slice);
self
}
pub fn add_storage_slice(mut self, slice: StorageSliceProvisioned) -> Self {
self.storage_slices.push(slice);
self
}
pub fn compute_slice_price(mut self, v: f64) -> Self {
self.compute_slice_price = v;
self
}
pub fn storage_slice_price(mut self, v: f64) -> Self {
self.storage_slice_price = v;
self
}
pub fn network_slice_price(mut self, v: f64) -> Self {
self.network_slice_price = v;
self
}
pub fn status(mut self, v: ContractStatus) -> Self {
self.status = v;
self
}
pub fn start_date(mut self, v: u32) -> Self {
self.start_date = v;
self
}
pub fn end_date(mut self, v: u32) -> Self {
self.end_date = v;
self
}
pub fn signature_user(mut self, v: impl ToString) -> Self {
self.signature_user = v.to_string();
self
}
pub fn signature_hoster(mut self, v: impl ToString) -> Self {
self.signature_hoster = v.to_string();
self
}
pub fn billing_period(mut self, v: BillingPeriod) -> Self {
self.billing_period = v;
self
}
}
impl ComputeSliceProvisioned {
pub fn new() -> Self {
Self::default()
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn id(mut self, v: u16) -> Self {
self.id = v;
self
}
pub fn mem_gb(mut self, v: f64) -> Self {
self.mem_gb = v;
self
}
pub fn storage_gb(mut self, v: f64) -> Self {
self.storage_gb = v;
self
}
pub fn passmark(mut self, v: i32) -> Self {
self.passmark = v;
self
}
pub fn vcores(mut self, v: i32) -> Self {
self.vcores = v;
self
}
pub fn cpu_oversubscription(mut self, v: i32) -> Self {
self.cpu_oversubscription = v;
self
}
pub fn tags(mut self, v: impl ToString) -> Self {
self.tags = v.to_string();
self
}
}
impl StorageSliceProvisioned {
pub fn new() -> Self {
Self::default()
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn id(mut self, v: u16) -> Self {
self.id = v;
self
}
pub fn storage_size_gb(mut self, v: i32) -> Self {
self.storage_size_gb = v;
self
}
pub fn tags(mut self, v: impl ToString) -> Self {
self.tags = v.to_string();
self
}
}

View File

@@ -1,18 +1,16 @@
pub mod bid;
pub mod common;
pub mod contract;
pub mod node;
pub mod nodegroup;
pub mod reputation;
pub mod reservation;
pub use bid::{Bid, BidStatus, BillingPeriod};
pub use common::{PricingPolicy, SLAPolicy};
pub use contract::{Contract, ContractStatus, ComputeSliceProvisioned, StorageSliceProvisioned};
pub use node::{
CPUDevice, ComputeSlice, DeviceInfo, GPUDevice, MemoryDevice, NetworkDevice, Node,
NodeCapacity, StorageDevice, StorageSlice,
};
pub use nodegroup::NodeGroup;
pub use reputation::{NodeGroupReputation, NodeReputation};
pub use reservation::{Reservation, ReservationStatus};
Node,
DeviceInfo,
StorageDevice,
MemoryDevice,
CPUDevice,
GPUDevice,
NetworkDevice,
NodeCapacity,
ComputeSlice,
StorageSlice,
PricingPolicy,
SLAPolicy,
};

View File

@@ -1,8 +1,7 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use rhai::CustomType;
use serde::{Deserialize, Serialize};
use super::common::{PricingPolicy, SLAPolicy};
/// Storage device information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
@@ -95,26 +94,57 @@ pub struct NodeCapacity {
pub vcores: i32,
}
// PricingPolicy and SLAPolicy moved to `common.rs` to be shared across models.
/// Pricing policy for slices (minimal version until full spec available)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct PricingPolicy {
/// Human friendly policy name (e.g. "fixed", "market")
pub name: String,
/// Optional free-form details as JSON-encoded string
pub details: Option<String>,
}
/// SLA policy for slices (minimal version until full spec available)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct SLAPolicy {
/// Uptime in percentage (0..100)
pub uptime: f32,
/// Max response time in ms
pub max_response_time_ms: u32,
}
/// Compute slice (typically represents a base unit of compute)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct ComputeSlice {
pub base_data: BaseModelData,
/// the node in the grid, there is an object describing the node
#[index]
pub nodeid: u32,
/// the id of the slice in the node
pub id: u16,
#[index]
pub id: i32,
pub mem_gb: f64,
pub storage_gb: f64,
pub passmark: i32,
pub vcores: i32,
pub cpu_oversubscription: i32,
pub storage_oversubscription: i32,
/// Min/max allowed price range for validation
#[serde(default)]
pub price_range: Vec<f64>,
/// nr of GPU's see node to know what GPU's are
pub gpus: u8,
/// price per slice (even if the grouped one)
pub price_cc: f64,
pub pricing_policy: PricingPolicy,
pub sla_policy: SLAPolicy,
}
impl ComputeSlice {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodeid: 0,
id: 0,
mem_gb: 0.0,
storage_gb: 0.0,
@@ -122,62 +152,63 @@ impl ComputeSlice {
vcores: 0,
cpu_oversubscription: 0,
storage_oversubscription: 0,
price_range: vec![0.0, 0.0],
gpus: 0,
price_cc: 0.0,
pricing_policy: PricingPolicy::default(),
sla_policy: SLAPolicy::default(),
}
}
pub fn id(mut self, id: u16) -> Self {
self.id = id;
self
}
pub fn mem_gb(mut self, v: f64) -> Self {
self.mem_gb = v;
self
}
pub fn storage_gb(mut self, v: f64) -> Self {
self.storage_gb = v;
self
}
pub fn passmark(mut self, v: i32) -> Self {
self.passmark = v;
self
}
pub fn vcores(mut self, v: i32) -> Self {
self.vcores = v;
self
}
pub fn cpu_oversubscription(mut self, v: i32) -> Self {
self.cpu_oversubscription = v;
self
}
pub fn storage_oversubscription(mut self, v: i32) -> Self {
self.storage_oversubscription = v;
self
}
pub fn gpus(mut self, v: u8) -> Self {
self.gpus = v;
self
}
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
pub fn mem_gb(mut self, v: f64) -> Self { self.mem_gb = v; self }
pub fn storage_gb(mut self, v: f64) -> Self { self.storage_gb = v; self }
pub fn passmark(mut self, v: i32) -> Self { self.passmark = v; self }
pub fn vcores(mut self, v: i32) -> Self { self.vcores = v; self }
pub fn cpu_oversubscription(mut self, v: i32) -> Self { self.cpu_oversubscription = v; self }
pub fn storage_oversubscription(mut self, v: i32) -> Self { self.storage_oversubscription = v; self }
pub fn price_range(mut self, min_max: Vec<f64>) -> Self { self.price_range = min_max; self }
pub fn gpus(mut self, v: u8) -> Self { self.gpus = v; self }
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
}
/// Storage slice (typically 1GB of storage)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct StorageSlice {
pub base_data: BaseModelData,
/// the node in the grid
#[index]
pub nodeid: u32,
/// the id of the slice in the node, are tracked in the node itself
pub id: u16,
#[index]
pub id: i32,
/// price per slice (even if the grouped one)
pub price_cc: f64,
pub pricing_policy: PricingPolicy,
pub sla_policy: SLAPolicy,
}
impl StorageSlice {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodeid: 0,
id: 0,
price_cc: 0.0,
pricing_policy: PricingPolicy::default(),
sla_policy: SLAPolicy::default(),
}
}
pub fn id(mut self, id: u16) -> Self {
self.id = id;
self
}
pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self }
pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self }
pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self }
pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self }
pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self }
}
/// Grid4 Node model
@@ -193,20 +224,13 @@ pub struct Node {
pub computeslices: Vec<ComputeSlice>,
pub storageslices: Vec<StorageSlice>,
pub devices: DeviceInfo,
/// 2 letter code as specified in lib/data/countries/data/countryInfo.txt
/// 2 letter code
#[index]
pub country: String,
/// Hardware capacity details
pub capacity: NodeCapacity,
/// first time node was active
pub birthtime: u32,
/// node public key
#[index]
pub pubkey: String,
/// signature done on node to validate pubkey with privkey
pub signature_node: String,
/// signature as done by farmers to validate their identity
pub signature_farmer: String,
/// lets keep it simple and compatible
pub provisiontime: u32,
}
impl Node {
@@ -220,61 +244,21 @@ impl Node {
devices: DeviceInfo::default(),
country: String::new(),
capacity: NodeCapacity::default(),
birthtime: 0,
pubkey: String::new(),
signature_node: String::new(),
signature_farmer: String::new(),
provisiontime: 0,
}
}
pub fn nodegroupid(mut self, v: i32) -> Self {
self.nodegroupid = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self {
self.computeslices.push(s);
self
}
pub fn add_storage_slice(mut self, s: StorageSlice) -> Self {
self.storageslices.push(s);
self
}
pub fn devices(mut self, d: DeviceInfo) -> Self {
self.devices = d;
self
}
pub fn country(mut self, c: impl ToString) -> Self {
self.country = c.to_string();
self
}
pub fn capacity(mut self, c: NodeCapacity) -> Self {
self.capacity = c;
self
}
pub fn birthtime(mut self, t: u32) -> Self {
self.birthtime = t;
self
}
pub fn pubkey(mut self, v: impl ToString) -> Self {
self.pubkey = v.to_string();
self
}
pub fn signature_node(mut self, v: impl ToString) -> Self {
self.signature_node = v.to_string();
self
}
pub fn signature_farmer(mut self, v: impl ToString) -> Self {
self.signature_farmer = v.to_string();
self
}
pub fn nodegroupid(mut self, v: i32) -> Self { self.nodegroupid = v; self }
pub fn uptime(mut self, v: i32) -> Self { self.uptime = v; self }
pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { self.computeslices.push(s); self }
pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { self.storageslices.push(s); self }
pub fn devices(mut self, d: DeviceInfo) -> Self { self.devices = d; self }
pub fn country(mut self, c: impl ToString) -> Self { self.country = c.to_string(); self }
pub fn capacity(mut self, c: NodeCapacity) -> Self { self.capacity = c; self }
pub fn provisiontime(mut self, t: u32) -> Self { self.provisiontime = t; self }
/// Placeholder for capacity recalculation out of the devices on the Node
pub fn check(self) -> Self {
pub fn recalc_capacity(mut self) -> Self {
// TODO: calculate NodeCapacity out of the devices on the Node
self
}

View File

@@ -1,52 +0,0 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
use super::common::{PricingPolicy, SLAPolicy};
/// Grid4 NodeGroup model (root object for farmer configuration)
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeGroup {
pub base_data: BaseModelData,
/// link back to farmer who owns the nodegroup, is a user?
#[index]
pub farmerid: u32,
/// only visible by farmer, in future encrypted, used to boot a node
pub secret: String,
pub description: String,
pub slapolicy: SLAPolicy,
pub pricingpolicy: PricingPolicy,
/// pricing in CC - cloud credit, per 2GB node slice
pub compute_slice_normalized_pricing_cc: f64,
/// pricing in CC - cloud credit, per 1GB storage slice
pub storage_slice_normalized_pricing_cc: f64,
/// signature as done by farmers to validate that they created this group
pub signature_farmer: String,
}
impl NodeGroup {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
farmerid: 0,
secret: String::new(),
description: String::new(),
slapolicy: SLAPolicy::default(),
pricingpolicy: PricingPolicy::new(),
compute_slice_normalized_pricing_cc: 0.0,
storage_slice_normalized_pricing_cc: 0.0,
signature_farmer: String::new(),
}
}
pub fn farmerid(mut self, v: u32) -> Self { self.farmerid = v; self }
pub fn secret(mut self, v: impl ToString) -> Self { self.secret = v.to_string(); self }
pub fn description(mut self, v: impl ToString) -> Self { self.description = v.to_string(); self }
pub fn slapolicy(mut self, v: SLAPolicy) -> Self { self.slapolicy = v; self }
pub fn pricingpolicy(mut self, v: PricingPolicy) -> Self { self.pricingpolicy = v; self }
pub fn compute_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.compute_slice_normalized_pricing_cc = v; self }
pub fn storage_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.storage_slice_normalized_pricing_cc = v; self }
pub fn signature_farmer(mut self, v: impl ToString) -> Self { self.signature_farmer = v.to_string(); self }
}

View File

@@ -1,85 +0,0 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Node reputation information
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeReputation {
pub node_id: u32,
/// between 0 and 100, earned over time
pub reputation: i32,
/// between 0 and 100, set by system, farmer has no ability to set this
pub uptime: i32,
}
/// NodeGroup reputation model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct NodeGroupReputation {
pub base_data: BaseModelData,
#[index]
pub nodegroup_id: u32,
/// between 0 and 100, earned over time
pub reputation: i32,
/// between 0 and 100, set by system, farmer has no ability to set this
pub uptime: i32,
pub nodes: Vec<NodeReputation>,
}
impl NodeGroupReputation {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
nodegroup_id: 0,
reputation: 50, // default as per spec
uptime: 0,
nodes: Vec::new(),
}
}
pub fn nodegroup_id(mut self, v: u32) -> Self {
self.nodegroup_id = v;
self
}
pub fn reputation(mut self, v: i32) -> Self {
self.reputation = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
pub fn add_node_reputation(mut self, node_rep: NodeReputation) -> Self {
self.nodes.push(node_rep);
self
}
}
impl NodeReputation {
pub fn new() -> Self {
Self {
node_id: 0,
reputation: 50, // default as per spec
uptime: 0,
}
}
pub fn node_id(mut self, v: u32) -> Self {
self.node_id = v;
self
}
pub fn reputation(mut self, v: i32) -> Self {
self.reputation = v;
self
}
pub fn uptime(mut self, v: i32) -> Self {
self.uptime = v;
self
}
}

View File

@@ -1,58 +0,0 @@
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::{CustomType, TypeBuilder};
use serde::{Deserialize, Serialize};
/// Reservation status as per V spec
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub enum ReservationStatus {
#[default]
Pending,
Confirmed,
Assigned,
Cancelled,
Done,
}
/// Grid4 Reservation model
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)]
pub struct Reservation {
pub base_data: BaseModelData,
/// links back to customer for this capacity
#[index]
pub customer_id: u32,
pub compute_slices: Vec<u32>,
pub storage_slices: Vec<u32>,
pub status: ReservationStatus,
/// if obligation then will be charged and money needs to be in escrow, otherwise its an intent
pub obligation: bool,
/// epoch
pub start_date: u32,
pub end_date: u32,
}
impl Reservation {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
customer_id: 0,
compute_slices: Vec::new(),
storage_slices: Vec::new(),
status: ReservationStatus::Pending,
obligation: false,
start_date: 0,
end_date: 0,
}
}
pub fn customer_id(mut self, v: u32) -> Self { self.customer_id = v; self }
pub fn add_compute_slice(mut self, id: u32) -> Self { self.compute_slices.push(id); self }
pub fn compute_slices(mut self, v: Vec<u32>) -> Self { self.compute_slices = v; self }
pub fn add_storage_slice(mut self, id: u32) -> Self { self.storage_slices.push(id); self }
pub fn storage_slices(mut self, v: Vec<u32>) -> Self { self.storage_slices = v; self }
pub fn status(mut self, v: ReservationStatus) -> Self { self.status = v; self }
pub fn obligation(mut self, v: bool) -> Self { self.obligation = v; self }
pub fn start_date(mut self, v: u32) -> Self { self.start_date = v; self }
pub fn end_date(mut self, v: u32) -> Self { self.end_date = v; self }
}

View File

@@ -1,194 +0,0 @@
# Grid4 Data Model
This module defines data models for nodes, groups, and slices in a cloud/grid infrastructure. Each root object is marked with `@[heap]` and can be indexed for efficient querying.
## Root Objects Overview
| Object | Description | Index Fields |
| ----------- | --------------------------------------------- | ------------------------------ |
| `Node` | Represents a single node in the grid | `id`, `nodegroupid`, `country` |
| `NodeGroup` | Represents a group of nodes owned by a farmer | `id`, `farmerid` |
---
## Node
Represents a single node in the grid with slices, devices, and capacity.
| Field | Type | Description | Indexed |
| --------------- | ---------------- | -------------------------------------------- | ------- |
| `id` | `int` | Unique node ID | ✅ |
| `nodegroupid` | `int` | ID of the owning node group | ✅ |
| `uptime` | `int` | Uptime percentage (0-100) | ✅ |
| `computeslices` | `[]ComputeSlice` | List of compute slices | ❌ |
| `storageslices` | `[]StorageSlice` | List of storage slices | ❌ |
| `devices` | `DeviceInfo` | Hardware device info (storage, memory, etc.) | ❌ |
| `country` | `string` | 2-letter country code | ✅ |
| `capacity` | `NodeCapacity` | Aggregated hardware capacity | ❌ |
| `provisiontime` | `u32` | Provisioning time (simple/compatible format) | ✅ |
---
## NodeGroup
Represents a group of nodes owned by a farmer, with policies.
| Field | Type | Description | Indexed |
| ------------------------------------- | --------------- | ---------------------------------------------- | ------- |
| `id` | `u32` | Unique group ID | ✅ |
| `farmerid` | `u32` | Farmer/user ID | ✅ |
| `secret` | `string` | Encrypted secret for booting nodes | ❌ |
| `description` | `string` | Group description | ❌ |
| `slapolicy` | `SLAPolicy` | SLA policy details | ❌ |
| `pricingpolicy` | `PricingPolicy` | Pricing policy details | ❌ |
| `compute_slice_normalized_pricing_cc` | `f64` | Pricing per 2GB compute slice in cloud credits | ❌ |
| `storage_slice_normalized_pricing_cc` | `f64` | Pricing per 1GB storage slice in cloud credits | ❌ |
| `reputation` | `int` | Reputation (0-100) | ✅ |
| `uptime` | `int` | Uptime (0-100) | ✅ |
---
## ComputeSlice
Represents a compute slice (e.g., 1GB memory unit).
| Field | Type | Description |
| -------------------------- | --------------- | -------------------------------- |
| `nodeid` | `u32` | Owning node ID |
| `id` | `int` | Slice ID in node |
| `mem_gb` | `f64` | Memory in GB |
| `storage_gb` | `f64` | Storage in GB |
| `passmark` | `int` | Passmark score |
| `vcores` | `int` | Virtual cores |
| `cpu_oversubscription` | `int` | CPU oversubscription ratio |
| `storage_oversubscription` | `int` | Storage oversubscription ratio |
| `price_range` | `[]f64` | Price range [min, max] |
| `gpus` | `u8` | Number of GPUs |
| `price_cc` | `f64` | Price per slice in cloud credits |
| `pricing_policy` | `PricingPolicy` | Pricing policy |
| `sla_policy` | `SLAPolicy` | SLA policy |
---
## StorageSlice
Represents a 1GB storage slice.
| Field | Type | Description |
| ---------------- | --------------- | -------------------------------- |
| `nodeid` | `u32` | Owning node ID |
| `id` | `int` | Slice ID in node |
| `price_cc` | `f64` | Price per slice in cloud credits |
| `pricing_policy` | `PricingPolicy` | Pricing policy |
| `sla_policy` | `SLAPolicy` | SLA policy |
---
## DeviceInfo
Hardware device information for a node.
| Field | Type | Description |
| --------- | ----------------- | ----------------------- |
| `vendor` | `string` | Vendor of the node |
| `storage` | `[]StorageDevice` | List of storage devices |
| `memory` | `[]MemoryDevice` | List of memory devices |
| `cpu` | `[]CPUDevice` | List of CPU devices |
| `gpu` | `[]GPUDevice` | List of GPU devices |
| `network` | `[]NetworkDevice` | List of network devices |
---
## StorageDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `size_gb` | `f64` | Size in GB |
| `description` | `string` | Description of device |
---
## MemoryDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `size_gb` | `f64` | Size in GB |
| `description` | `string` | Description of device |
---
## CPUDevice
| Field | Type | Description |
| ------------- | -------- | ------------------------ |
| `id` | `string` | Unique ID for device |
| `cores` | `int` | Number of CPU cores |
| `passmark` | `int` | Passmark benchmark score |
| `description` | `string` | Description of device |
| `cpu_brand` | `string` | Brand of the CPU |
| `cpu_version` | `string` | Version of the CPU |
---
## GPUDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `cores` | `int` | Number of GPU cores |
| `memory_gb` | `f64` | GPU memory in GB |
| `description` | `string` | Description of device |
| `gpu_brand` | `string` | Brand of the GPU |
| `gpu_version` | `string` | Version of the GPU |
---
## NetworkDevice
| Field | Type | Description |
| ------------- | -------- | --------------------- |
| `id` | `string` | Unique ID for device |
| `speed_mbps` | `int` | Network speed in Mbps |
| `description` | `string` | Description of device |
---
## NodeCapacity
Aggregated hardware capacity for a node.
| Field | Type | Description |
| ------------ | ----- | ---------------------- |
| `storage_gb` | `f64` | Total storage in GB |
| `mem_gb` | `f64` | Total memory in GB |
| `mem_gb_gpu` | `f64` | Total GPU memory in GB |
| `passmark` | `int` | Total passmark score |
| `vcores` | `int` | Total virtual cores |
---
## SLAPolicy
Service Level Agreement policy for slices or node groups.
| Field | Type | Description |
| -------------------- | ----- | --------------------------------------- |
| `sla_uptime` | `int` | Required uptime % (e.g., 90) |
| `sla_bandwidth_mbit` | `int` | Guaranteed bandwidth in Mbps (0 = none) |
| `sla_penalty` | `int` | Penalty % if SLA is breached (0-100) |
---
## PricingPolicy
Pricing policy for slices or node groups.
| Field | Type | Description |
| ---------------------------- | ------- | --------------------------------------------------------- |
| `marketplace_year_discounts` | `[]int` | Discounts for 1Y, 2Y, 3Y prepaid usage (e.g. [30,40,50]) |
| `volume_discounts` | `[]int` | Volume discounts based on purchase size (e.g. [10,20,30]) |

View File

@@ -1,37 +0,0 @@
module datamodel
// I can bid for infra, and optionally get accepted
@[heap]
pub struct Bid {
pub mut:
id u32
customer_id u32 // links back to customer for this capacity (user on ledger)
compute_slices_nr int // nr of slices I need in 1 machine
compute_slice_price f64 // price per 1 GB slice I want to accept
storage_slices_nr int
storage_slice_price f64 // price per 1 GB storage slice I want to accept
storage_slices_nr int
status BidStatus
obligation bool // if obligation then will be charged and money needs to be in escrow, otherwise its an intent
start_date u32 // epoch
end_date u32
signature_user string // signature as done by a user/consumer to validate their identity and intent
billing_period BillingPeriod
}
pub enum BidStatus {
pending
confirmed
assigned
cancelled
done
}
pub enum BillingPeriod {
hourly
monthly
yearly
biannually
triannually
}

View File

@@ -1,52 +0,0 @@
module datamodel
// I can bid for infra, and optionally get accepted
@[heap]
pub struct Contract {
pub mut:
id u32
customer_id u32 // links back to customer for this capacity (user on ledger)
compute_slices []ComputeSliceProvisioned
storage_slices []StorageSliceProvisioned
compute_slice_price f64 // price per 1 GB agreed upon
storage_slice_price f64 // price per 1 GB agreed upon
network_slice_price f64 // price per 1 GB agreed upon (transfer)
status ContractStatus
start_date u32 // epoch
end_date u32
signature_user string // signature as done by a user/consumer to validate their identity and intent
signature_hoster string // signature as done by the hoster
billing_period BillingPeriod
}
pub enum ConctractStatus {
active
cancelled
error
paused
}
// typically 1GB of memory, but can be adjusted based based on size of machine
pub struct ComputeSliceProvisioned {
pub mut:
node_id u32
id u16 // the id of the slice in the node
mem_gb f64
storage_gb f64
passmark int
vcores int
cpu_oversubscription int
tags string
}
// 1GB of storage
pub struct StorageSliceProvisioned {
pub mut:
node_id u32
id u16 // the id of the slice in the node, are tracked in the node itself
storage_size_gb int
tags string
}

View File

@@ -1,104 +0,0 @@
module datamodel
//ACCESS ONLY TF
@[heap]
pub struct Node {
pub mut:
id int
nodegroupid int
uptime int // 0..100
computeslices []ComputeSlice
storageslices []StorageSlice
devices DeviceInfo
country string // 2 letter code as specified in lib/data/countries/data/countryInfo.txt, use that library for validation
capacity NodeCapacity // Hardware capacity details
birthtime u32 // first time node was active
pubkey string
signature_node string // signature done on node to validate pubkey with privkey
signature_farmer string // signature as done by farmers to validate their identity
}
pub struct DeviceInfo {
pub mut:
vendor string
storage []StorageDevice
memory []MemoryDevice
cpu []CPUDevice
gpu []GPUDevice
network []NetworkDevice
}
pub struct StorageDevice {
pub mut:
id string // can be used in node
size_gb f64 // Size of the storage device in gigabytes
description string // Description of the storage device
}
pub struct MemoryDevice {
pub mut:
id string // can be used in node
size_gb f64 // Size of the memory device in gigabytes
description string // Description of the memory device
}
pub struct CPUDevice {
pub mut:
id string // can be used in node
cores int // Number of CPU cores
passmark int
description string // Description of the CPU
cpu_brand string // Brand of the CPU
cpu_version string // Version of the CPU
}
pub struct GPUDevice {
pub mut:
id string // can be used in node
cores int // Number of GPU cores
memory_gb f64 // Size of the GPU memory in gigabytes
description string // Description of the GPU
gpu_brand string
gpu_version string
}
pub struct NetworkDevice {
pub mut:
id string // can be used in node
speed_mbps int // Network speed in Mbps
description string // Description of the network device
}
// NodeCapacity represents the hardware capacity details of a node.
pub struct NodeCapacity {
pub mut:
storage_gb f64 // Total storage in gigabytes
mem_gb f64 // Total memory in gigabytes
mem_gb_gpu f64 // Total GPU memory in gigabytes
passmark int // Passmark score for the node
vcores int // Total virtual cores
}
// typically 1GB of memory, but can be adjusted based based on size of machine
pub struct ComputeSlice {
pub mut:
u16 int // the id of the slice in the node
mem_gb f64
storage_gb f64
passmark int
vcores int
cpu_oversubscription int
storage_oversubscription int
gpus u8 // nr of GPU's see node to know what GPU's are
}
// 1GB of storage
pub struct StorageSlice {
pub mut:
u16 int // the id of the slice in the node, are tracked in the node itself
}
fn (mut n Node) check() ! {
// todo calculate NodeCapacity out of the devices on the Node
}

View File

@@ -1,33 +0,0 @@
module datamodel
// is a root object, is the only obj farmer needs to configure in the UI, this defines how slices will be created
@[heap]
pub struct NodeGroup {
pub mut:
id u32
farmerid u32 // link back to farmer who owns the nodegroup, is a user?
secret string // only visible by farmer, in future encrypted, used to boot a node
description string
slapolicy SLAPolicy
pricingpolicy PricingPolicy
compute_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 2GB node slice
storage_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 1GB storage slice
signature_farmer string // signature as done by farmers to validate that they created this group
}
pub struct SLAPolicy {
pub mut:
sla_uptime int // should +90
sla_bandwidth_mbit int // minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee
sla_penalty int // 0-100, percent of money given back in relation to month if sla breached, e.g. 200 means we return 2 months worth of rev if sla missed
}
pub struct PricingPolicy {
pub mut:
marketplace_year_discounts []int = [30, 40, 50] // e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization on all his purchaes then this provider gives 30%, 2Y 40%, ...
// volume_discounts []int = [10, 20, 30] // e.g. 10,20,30
}

View File

@@ -1,19 +0,0 @@
@[heap]
pub struct NodeGroupReputation {
pub mut:
nodegroup_id u32
reputation int = 50 // between 0 and 100, earned over time
uptime int // between 0 and 100, set by system, farmer has no ability to set this
nodes []NodeReputation
}
pub struct NodeReputation {
pub mut:
node_id u32
reputation int = 50 // between 0 and 100, earned over time
uptime int // between 0 and 100, set by system, farmer has no ability to set this
}

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -209,13 +209,10 @@ pub struct DNSZone {
pub base_data: BaseModelData,
#[index]
pub domain: String,
#[index(path = "subdomain")]
#[index(path = "record_type")]
pub dnsrecords: Vec<DNSRecord>,
pub administrators: Vec<u32>,
pub status: DNSZoneStatus,
pub metadata: HashMap<String, String>,
#[index(path = "primary_ns")]
pub soarecord: Vec<SOARecord>,
}
@@ -300,3 +297,5 @@ impl DNSZone {
self
}
}

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
@@ -184,6 +184,8 @@ impl Group {
}
}
/// Represents the membership relationship between users and groups
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
@@ -230,3 +232,5 @@ impl UserGroupMembership {
self
}
}

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
@@ -111,3 +111,5 @@ impl Member {
self
}
}

View File

@@ -1,10 +1,20 @@
// Export all heroledger model modules
pub mod dnsrecord;
pub mod user;
pub mod group;
pub mod membership;
pub mod money;
pub mod rhai;
pub mod membership;
pub mod dnsrecord;
pub mod secretbox;
pub mod signature;
pub mod user;
pub mod user_kvs;
pub mod rhai;
// Re-export key types for convenience
pub use user::{User, UserStatus, UserProfile, KYCInfo, KYCStatus, SecretBox};
pub use group::{Group, UserGroupMembership, GroupStatus, Visibility, GroupConfig};
pub use money::{Account, Asset, AccountPolicy, AccountPolicyItem, Transaction, AccountStatus, TransactionType, Signature as TransactionSignature};
pub use membership::{Member, MemberRole, MemberStatus};
pub use dnsrecord::{DNSZone, DNSRecord, SOARecord, NameType, NameCat, DNSZoneStatus};
pub use secretbox::{Notary, NotaryStatus, SecretBoxCategory};
pub use signature::{Signature, SignatureStatus, ObjectType};
pub use user_kvs::{UserKVS, UserKVSItem};

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -223,6 +223,8 @@ impl Account {
}
}
/// Represents an asset in the financial system
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
@@ -340,6 +342,8 @@ impl Asset {
}
}
/// Represents account policies for various operations
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
@@ -396,6 +400,8 @@ impl AccountPolicy {
}
}
/// Represents a financial transaction
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
@@ -505,3 +511,5 @@ impl Transaction {
self
}
}

View File

@@ -1,13 +1,8 @@
use ::rhai::plugin::*;
use ::rhai::{Dynamic, Engine, EvalAltResult, Module};
use ::rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module};
use std::mem;
use crate::models::heroledger::{
dnsrecord::DNSZone,
group::{Group, Visibility},
money::Account,
user::{User, UserStatus},
};
use crate::models::heroledger::*;
// ============================================================================
// User Module
@@ -17,8 +12,6 @@ type RhaiUser = User;
#[export_module]
mod rhai_user_module {
use crate::models::heroledger::user::User;
use super::RhaiUser;
#[rhai_fn(name = "new_user", return_raw)]
@@ -37,21 +30,30 @@ mod rhai_user_module {
}
#[rhai_fn(name = "add_email", return_raw)]
pub fn add_email(user: &mut RhaiUser, email: String) -> Result<RhaiUser, Box<EvalAltResult>> {
pub fn add_email(
user: &mut RhaiUser,
email: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.add_email(email);
Ok(user.clone())
}
#[rhai_fn(name = "pubkey", return_raw)]
pub fn set_pubkey(user: &mut RhaiUser, pubkey: String) -> Result<RhaiUser, Box<EvalAltResult>> {
pub fn set_pubkey(
user: &mut RhaiUser,
pubkey: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let owned = std::mem::take(user);
*user = owned.pubkey(pubkey);
Ok(user.clone())
}
#[rhai_fn(name = "status", return_raw)]
pub fn set_status(user: &mut RhaiUser, status: String) -> Result<RhaiUser, Box<EvalAltResult>> {
pub fn set_status(
user: &mut RhaiUser,
status: String,
) -> Result<RhaiUser, Box<EvalAltResult>> {
let status_enum = match status.as_str() {
"Active" => UserStatus::Active,
"Inactive" => UserStatus::Inactive,
@@ -113,7 +115,10 @@ mod rhai_group_module {
}
#[rhai_fn(name = "name", return_raw)]
pub fn set_name(group: &mut RhaiGroup, name: String) -> Result<RhaiGroup, Box<EvalAltResult>> {
pub fn set_name(
group: &mut RhaiGroup,
name: String,
) -> Result<RhaiGroup, Box<EvalAltResult>> {
let owned = std::mem::take(group);
*group = owned.name(name);
Ok(group.clone())
@@ -258,11 +263,15 @@ mod rhai_dns_zone_module {
Ok(zone.clone())
}
#[rhai_fn(name = "save_dns_zone", return_raw)]
pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> {
Ok(zone.clone())
}
// Getters
#[rhai_fn(name = "get_id")]
pub fn get_id(zone: &mut RhaiDNSZone) -> i64 {

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
@@ -138,3 +138,5 @@ impl Notary {
self
}
}

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
@@ -116,3 +116,5 @@ impl Signature {
self
}
}

View File

@@ -1,4 +1,4 @@
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -366,3 +366,5 @@ impl User {
self
}
}

View File

@@ -1,7 +1,7 @@
use super::secretbox::SecretBox;
use heromodels_core::{BaseModelData, IndexKey, Model};
use heromodels_core::{Model, BaseModelData, IndexKey};
use heromodels_derive::model;
use serde::{Deserialize, Serialize};
use super::secretbox::SecretBox;
/// Represents a per-user key-value store
#[model]
@@ -44,6 +44,8 @@ impl UserKVS {
}
}
/// Represents an item in a user's key-value store
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
@@ -114,3 +116,5 @@ impl UserKVSItem {
self
}
}

View File

@@ -46,4 +46,4 @@ pub struct IdenfyVerificationData {
pub doc_issuing_country: Option<String>,
#[serde(rename = "manuallyDataChanged")]
pub manually_data_changed: Option<bool>,
}
}

View File

@@ -2,4 +2,4 @@
pub mod kyc;
pub use kyc::*;
pub use kyc::*;

View File

@@ -8,4 +8,4 @@ pub struct Address {
pub postal_code: String,
pub country: String,
pub company: Option<String>,
}
}

View File

@@ -10,16 +10,16 @@ pub mod contact;
pub mod finance;
pub mod flow;
pub mod governance;
pub mod grid4;
pub mod heroledger;
pub mod identity;
pub mod legal;
pub mod library;
pub mod location;
pub mod object;
pub mod payment;
pub mod projects;
// pub mod tfmarketplace;
pub mod payment;
pub mod identity;
pub mod tfmarketplace;
pub mod grid4;
// Re-export key types for convenience
pub use core::Comment;
@@ -39,4 +39,3 @@ pub use legal::{Contract, ContractRevision, ContractSigner, ContractStatus, Sign
pub use library::collection::Collection;
pub use library::items::{Image, Markdown, Pdf};
pub use projects::{Project, Status};
pub use heroledger::*;

View File

@@ -1,6 +1,6 @@
use super::Object;
use rhai::plugin::*;
use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module};
use super::Object;
type RhaiObject = Object;
@@ -16,7 +16,10 @@ pub mod generated_rhai_module {
/// Set the title of an Object
#[rhai_fn(name = "object_title")]
pub fn object_title(object: &mut RhaiObject, title: String) -> RhaiObject {
pub fn object_title(
object: &mut RhaiObject,
title: String,
) -> RhaiObject {
let mut result = object.clone();
result.title = title;
result
@@ -24,7 +27,10 @@ pub mod generated_rhai_module {
/// Set the description of an Object
#[rhai_fn(name = "object_description")]
pub fn object_description(object: &mut RhaiObject, description: String) -> RhaiObject {
pub fn object_description(
object: &mut RhaiObject,
description: String,
) -> RhaiObject {
let mut result = object.clone();
result.description = description;
result

View File

@@ -2,4 +2,4 @@
pub mod stripe;
pub use stripe::*;
pub use stripe::*;

View File

@@ -27,4 +27,4 @@ pub struct StripeEventData {
pub struct StripeEventRequest {
pub id: Option<String>,
pub idempotency_key: Option<String>,
}
}

View File

@@ -0,0 +1,115 @@
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Default)]
pub struct UserActivityBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
activity_type: Option<crate::models::user::ActivityType>,
description: Option<String>,
timestamp: Option<chrono::DateTime<chrono::Utc>>,
metadata: Option<std::collections::HashMap<String, serde_json::Value>>,
category: Option<String>,
importance: Option<crate::models::user::ActivityImportance>,
}
impl UserActivityBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.into());
self
}
pub fn activity_type(mut self, activity_type: crate::models::user::ActivityType) -> Self {
self.activity_type = Some(activity_type);
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn timestamp(mut self, timestamp: chrono::DateTime<chrono::Utc>) -> Self {
self.timestamp = Some(timestamp);
self
}
pub fn metadata(mut self, metadata: std::collections::HashMap<String, serde_json::Value>) -> Self {
self.metadata = Some(metadata);
self
}
pub fn category(mut self, category: impl Into<String>) -> Self {
self.category = Some(category.into());
self
}
pub fn importance(mut self, importance: crate::models::user::ActivityImportance) -> Self {
self.importance = Some(importance);
self
}
pub fn build(self) -> Result<crate::models::user::UserActivity, String> {
Ok(crate::models::user::UserActivity {
base_data: BaseModelData::new(),
// id: self.base_data.id.unwrap_or_else(|| uuid::Uuid::new_v4().to_string()) - moved to base_data,
activity_type: self.activity_type.ok_or("activity_type is required")?,
description: self.description.unwrap_or_else(|| "No description".to_string()),
timestamp: self.timestamp.unwrap_or_else(|| chrono::Utc::now()),
metadata: self.metadata.unwrap_or_default(),
category: self.category.unwrap_or_else(|| "General".to_string()),
importance: self.importance.unwrap_or(crate::models::user::ActivityImportance::Medium),
})
}
}
/// User Activity Tracking
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserActivity {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub activity_type: ActivityType,
pub description: String,
#[serde(deserialize_with = "deserialize_datetime")]
pub timestamp: DateTime<Utc>,
pub metadata: std::collections::HashMap<String, serde_json::Value>,
pub category: String,
pub importance: ActivityImportance,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ActivityType {
Login,
Purchase,
Deployment,
ServiceCreated,
AppPublished,
NodeAdded,
NodeUpdated,
WalletTransaction,
ProfileUpdate,
SettingsChange,
MarketplaceView,
SliceCreated,
SliceAllocated,
SliceReleased,
SliceRentalStarted,
SliceRentalStopped,
SliceRentalRestarted,
SliceRentalCancelled,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ActivityImportance {
Low,
Medium,
High,
Critical,
}

View File

@@ -0,0 +1,361 @@
use heromodels_core::BaseModelData;
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
/// Unified App struct that can represent published apps, deployments, and deployment stats
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct App {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
// Core app information
pub name: String,
pub category: Option<String>,
pub version: Option<String>,
pub status: String,
// Deployment information
pub customer_name: Option<String>,
pub customer_email: Option<String>,
pub deployed_date: Option<String>,
pub health_score: Option<f32>,
pub region: Option<String>,
pub instances: Option<i32>,
pub resource_usage: Option<ResourceUtilization>,
// Business metrics
pub deployments: Option<i32>,
pub rating: Option<f32>,
pub monthly_revenue_usd: Option<i32>,
pub cost_per_month: Option<Decimal>,
// Metadata
pub last_updated: Option<String>,
pub auto_healing: Option<bool>,
pub provider: Option<String>,
pub deployed_at: Option<DateTime<Utc>>,
}
impl App {
/// Convenience method to get the app ID
pub fn id(&self) -> &u32 {
&self.base_data.id
}
/// Get category with default
pub fn category_or_default(&self) -> String {
self.category.clone().unwrap_or_else(|| "Application".to_string())
}
/// Get version with default
pub fn version_or_default(&self) -> String {
self.version.clone().unwrap_or_else(|| "1.0.0".to_string())
}
/// Get deployments count with default
pub fn deployments_or_default(&self) -> i32 {
self.deployments.unwrap_or(0)
}
/// Get rating with default
pub fn rating_or_default(&self) -> f32 {
self.rating.unwrap_or(4.0)
}
/// Get monthly revenue with default
pub fn monthly_revenue_usd_or_default(&self) -> i32 {
self.monthly_revenue_usd.unwrap_or(0)
}
/// Get last updated with default
pub fn last_updated_or_default(&self) -> String {
self.last_updated.clone().unwrap_or_else(|| "Unknown".to_string())
}
/// Get auto healing with default
pub fn auto_healing_or_default(&self) -> bool {
self.auto_healing.unwrap_or(false)
}
}
pub struct Deployment {
pub base_data: BaseModelData,
pub app_id: String,
pub instance_id: String,
pub status: String,
pub region: String,
pub health_score: Option<f32>,
pub resource_usage: Option<ResourceUtilization>,
pub deployed_at: Option<DateTime<Utc>>,
}
/// Resource utilization information
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ResourceUtilization {
pub cpu: i32,
pub memory: i32,
pub storage: i32,
pub network: i32,
}
/// Deployment status enumeration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub enum DeploymentStatus {
#[default]
Running,
Stopped,
Failed,
Pending,
Maintenance,
}
/// Unified App builder
#[derive(Default)]
pub struct AppBuilder {
base_data: BaseModelData,
name: Option<String>,
category: Option<String>,
version: Option<String>,
status: Option<String>,
customer_name: Option<String>,
customer_email: Option<String>,
deployed_date: Option<String>,
health_score: Option<f32>,
region: Option<String>,
instances: Option<i32>,
resource_usage: Option<ResourceUtilization>,
deployments: Option<i32>,
rating: Option<f32>,
monthly_revenue_usd: Option<i32>,
cost_per_month: Option<Decimal>,
last_updated: Option<String>,
auto_healing: Option<bool>,
provider: Option<String>,
deployed_at: Option<DateTime<Utc>>,
}
impl AppBuilder {
pub fn new() -> Self {
Self {
base_data: BaseModelData::new(),
..Default::default()
}
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn category(mut self, category: impl Into<String>) -> Self {
self.category = Some(category.into());
self
}
pub fn version(mut self, version: impl Into<String>) -> Self {
self.version = Some(version.into());
self
}
pub fn status(mut self, status: impl Into<String>) -> Self {
self.status = Some(status.into());
self
}
pub fn customer_name(mut self, name: impl Into<String>) -> Self {
self.customer_name = Some(name.into());
self
}
pub fn customer_email(mut self, email: impl Into<String>) -> Self {
self.customer_email = Some(email.into());
self
}
pub fn deployed_date(mut self, date: impl Into<String>) -> Self {
self.deployed_date = Some(date.into());
self
}
pub fn health_score(mut self, score: f32) -> Self {
self.health_score = Some(score);
self
}
pub fn region(mut self, region: impl Into<String>) -> Self {
self.region = Some(region.into());
self
}
pub fn instances(mut self, instances: i32) -> Self {
self.instances = Some(instances);
self
}
pub fn resource_usage(mut self, usage: ResourceUtilization) -> Self {
self.resource_usage = Some(usage);
self
}
pub fn deployments(mut self, deployments: i32) -> Self {
self.deployments = Some(deployments);
self
}
pub fn rating(mut self, rating: f32) -> Self {
self.rating = Some(rating);
self
}
pub fn monthly_revenue_usd(mut self, revenue: i32) -> Self {
self.monthly_revenue_usd = Some(revenue);
self
}
pub fn cost_per_month(mut self, cost: Decimal) -> Self {
self.cost_per_month = Some(cost);
self
}
pub fn last_updated(mut self, updated: impl Into<String>) -> Self {
self.last_updated = Some(updated.into());
self
}
pub fn auto_healing(mut self, enabled: bool) -> Self {
self.auto_healing = Some(enabled);
self
}
pub fn provider(mut self, provider: impl Into<String>) -> Self {
self.provider = Some(provider.into());
self
}
pub fn deployed_at(mut self, date: DateTime<Utc>) -> Self {
self.deployed_at = Some(date);
self
}
pub fn build(self) -> Result<App, String> {
Ok(App {
base_data: self.base_data,
name: self.name.ok_or("name is required")?,
category: self.category,
version: self.version,
status: self.status.unwrap_or_else(|| "Active".to_string()),
customer_name: self.customer_name,
customer_email: self.customer_email,
deployed_date: self.deployed_date,
health_score: self.health_score,
region: self.region,
instances: self.instances,
resource_usage: self.resource_usage,
deployments: self.deployments,
rating: self.rating,
monthly_revenue_usd: self.monthly_revenue_usd,
cost_per_month: self.cost_per_month,
last_updated: self.last_updated,
auto_healing: self.auto_healing,
provider: self.provider,
deployed_at: self.deployed_at,
})
}
}
impl App {
pub fn builder() -> AppBuilder {
AppBuilder::new()
}
// Template methods for common app types
pub fn analytics_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Analytics")
.version("1.0.0")
.status("Active")
.rating(4.5)
.auto_healing(true)
.build()
.unwrap()
}
pub fn database_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Database")
.version("1.0.0")
.status("Active")
.rating(4.2)
.auto_healing(false) // Databases need manual intervention
.build()
.unwrap()
}
pub fn web_template(name: &str) -> Self {
Self::builder()
.name(name)
.category("Web")
.version("1.0.0")
.status("Active")
.rating(4.0)
.auto_healing(true)
.build()
.unwrap()
}
// Fluent methods for chaining
pub fn with_stats(mut self, deployments: i32, rating: f32, monthly_revenue_usd: i32) -> Self {
self.deployments = Some(deployments);
self.rating = Some(rating);
self.monthly_revenue_usd = Some(monthly_revenue_usd);
self
}
pub fn with_auto_healing(mut self, enabled: bool) -> Self {
self.auto_healing = Some(enabled);
self
}
pub fn with_version(mut self, version: impl Into<String>) -> Self {
self.version = Some(version.into());
self
}
pub fn with_last_updated(mut self, updated: impl Into<String>) -> Self {
self.last_updated = Some(updated.into());
self
}
pub fn with_deployment_info(mut self, customer_name: &str, customer_email: &str, region: &str) -> Self {
self.customer_name = Some(customer_name.to_string());
self.customer_email = Some(customer_email.to_string());
self.region = Some(region.to_string());
self.deployed_at = Some(Utc::now());
self
}
pub fn with_resource_usage(mut self, cpu: i32, memory: i32, storage: i32, network: i32) -> Self {
self.resource_usage = Some(ResourceUtilization {
cpu,
memory,
storage,
network,
});
self
}
}
// Type aliases for backward compatibility
pub type PublishedApp = App;
pub type AppDeployment = App;
pub type DeploymentStat = App;
pub type UserDeployment = App;
pub type PublishedAppBuilder = AppBuilder;
pub type AppDeploymentBuilder = AppBuilder;
pub type DeploymentStatBuilder = AppBuilder;
pub type UserDeploymentBuilder = AppBuilder;

View File

@@ -0,0 +1,351 @@
//! Builder patterns for all marketplace models
//! This module provides a centralized, maintainable way to construct complex structs
//! with sensible defaults and validation.
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use serde_json::Value;
use std::collections::HashMap;
use super::{
user::{PublishedApp, DeploymentStat, ResourceUtilization, User, UserRole, MockUserData, ServiceBooking},
product::{Product, ProductAttribute, ProductAvailability, ProductMetadata},
order::{Order, OrderItem, OrderStatus, PaymentDetails, Address, PurchaseType},
};
use crate::services::user_persistence::AppDeployment;
use heromodels_core::BaseModelData;
// =============================================================================
// USER MODEL BUILDERS
// =============================================================================
#[derive(Default)]
pub struct MockDataBuilder {
user_type: Option<String>,
include_farmer_data: Option<bool>,
include_service_data: Option<bool>,
include_app_data: Option<bool>,
}
impl MockDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn user_type(mut self, user_type: impl Into<String>) -> Self {
self.user_type = Some(user_type.into());
self
}
pub fn include_farmer_data(mut self, include: bool) -> Self {
self.include_farmer_data = Some(include);
self
}
pub fn include_service_data(mut self, include: bool) -> Self {
self.include_service_data = Some(include);
self
}
pub fn include_app_data(mut self, include: bool) -> Self {
self.include_app_data = Some(include);
self
}
pub fn build(self) -> crate::models::user::MockUserData {
// This would create appropriate mock data based on configuration
// For now, return a default instance
crate::models::user::MockUserData::new_user()
}
}
// =============================================================================
// FARMER DATA BUILDER
// =============================================================================
#[derive(Default)]
pub struct FarmerDataBuilder {
total_nodes: Option<i32>,
online_nodes: Option<i32>,
total_capacity: Option<crate::models::user::NodeCapacity>,
used_capacity: Option<crate::models::user::NodeCapacity>,
monthly_earnings: Option<i32>,
total_earnings: Option<i32>,
uptime_percentage: Option<f32>,
nodes: Option<Vec<crate::models::user::FarmNode>>,
earnings_history: Option<Vec<crate::models::user::EarningsRecord>>,
active_slices: Option<i32>,
}
impl FarmerDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn total_nodes(mut self, total_nodes: i32) -> Self {
self.total_nodes = Some(total_nodes);
self
}
pub fn online_nodes(mut self, online_nodes: i32) -> Self {
self.online_nodes = Some(online_nodes);
self
}
pub fn total_capacity(mut self, capacity: crate::models::user::NodeCapacity) -> Self {
self.total_capacity = Some(capacity);
self
}
pub fn used_capacity(mut self, capacity: crate::models::user::NodeCapacity) -> Self {
self.used_capacity = Some(capacity);
self
}
pub fn monthly_earnings_usd(mut self, earnings: i32) -> Self {
self.monthly_earnings = Some(earnings);
self
}
pub fn total_earnings_usd(mut self, earnings: i32) -> Self {
self.total_earnings = Some(earnings);
self
}
pub fn uptime_percentage(mut self, uptime: f32) -> Self {
self.uptime_percentage = Some(uptime);
self
}
pub fn nodes(mut self, nodes: Vec<crate::models::user::FarmNode>) -> Self {
self.nodes = Some(nodes);
self
}
pub fn earnings_history(mut self, history: Vec<crate::models::user::EarningsRecord>) -> Self {
self.earnings_history = Some(history);
self
}
pub fn earnings(mut self, earnings: Vec<crate::models::user::EarningsRecord>) -> Self {
self.earnings_history = Some(earnings);
self
}
pub fn active_slices(mut self, active_slices: i32) -> Self {
self.active_slices = Some(active_slices);
self
}
pub fn calculate_totals(mut self) -> Self {
// Calculate totals from existing data
if let Some(ref nodes) = self.nodes {
self.total_nodes = Some(nodes.len() as i32);
self.online_nodes = Some(nodes.iter().filter(|n| matches!(n.status, crate::models::user::NodeStatus::Online)).count() as i32);
// Calculate total and used capacity from all nodes
let mut total_capacity = crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
};
let mut used_capacity = crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
};
for node in nodes {
total_capacity.cpu_cores += node.capacity.cpu_cores;
total_capacity.memory_gb += node.capacity.memory_gb;
total_capacity.storage_gb += node.capacity.storage_gb;
total_capacity.bandwidth_mbps += node.capacity.bandwidth_mbps;
total_capacity.ssd_storage_gb += node.capacity.ssd_storage_gb;
total_capacity.hdd_storage_gb += node.capacity.hdd_storage_gb;
used_capacity.cpu_cores += node.used_capacity.cpu_cores;
used_capacity.memory_gb += node.used_capacity.memory_gb;
used_capacity.storage_gb += node.used_capacity.storage_gb;
used_capacity.bandwidth_mbps += node.used_capacity.bandwidth_mbps;
used_capacity.ssd_storage_gb += node.used_capacity.ssd_storage_gb;
used_capacity.hdd_storage_gb += node.used_capacity.hdd_storage_gb;
}
self.total_capacity = Some(total_capacity);
self.used_capacity = Some(used_capacity);
// Calculate uptime percentage
if !nodes.is_empty() {
let avg_uptime = nodes.iter().map(|n| n.uptime_percentage).sum::<f32>() / nodes.len() as f32;
self.uptime_percentage = Some(avg_uptime);
}
}
if let Some(ref earnings) = self.earnings_history {
let total: i32 = earnings.iter().map(|e| e.amount.to_string().parse::<i32>().unwrap_or(0)).sum();
self.total_earnings = Some(total);
self.monthly_earnings = Some(total); // Set monthly earnings as well
}
self
}
pub fn build(self) -> Result<crate::models::user::FarmerData, String> {
Ok(crate::models::user::FarmerData {
total_nodes: self.total_nodes.unwrap_or(0),
online_nodes: self.online_nodes.unwrap_or(0),
total_capacity: self.total_capacity.unwrap_or(crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
}),
used_capacity: self.used_capacity.unwrap_or(crate::models::user::NodeCapacity {
cpu_cores: 0,
memory_gb: 0,
storage_gb: 0,
bandwidth_mbps: 0,
ssd_storage_gb: 0,
hdd_storage_gb: 0,
}),
monthly_earnings_usd: self.monthly_earnings.unwrap_or(0),
total_earnings_usd: self.total_earnings.unwrap_or(0),
uptime_percentage: self.uptime_percentage.unwrap_or(0.0),
nodes: self.nodes.unwrap_or_default(),
earnings_history: self.earnings_history.unwrap_or_default(),
slice_templates: Vec::default(), // Will be populated separately
active_slices: self.active_slices.unwrap_or(0),
})
}
}
// =============================================================================
// SERVICE BOOKING BUILDER
// =============================================================================
#[derive(Default)]
pub struct SpendingRecordBuilder {
date: Option<String>,
amount: Option<i32>,
service_name: Option<String>,
provider_name: Option<String>,
}
impl SpendingRecordBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn date(mut self, date: &str) -> Self {
self.date = Some(date.to_string());
self
}
pub fn amount(mut self, amount: i32) -> Self {
self.amount = Some(amount);
self
}
pub fn service_name(mut self, name: &str) -> Self {
self.service_name = Some(name.to_string());
self
}
pub fn provider_name(mut self, name: &str) -> Self {
self.provider_name = Some(name.to_string());
self
}
pub fn build(self) -> Result<crate::models::user::SpendingRecord, String> {
Ok(crate::models::user::SpendingRecord {
date: self.date.ok_or("Date is required")?,
amount: self.amount.unwrap_or(0),
service_name: self.service_name.ok_or("Service name is required")?,
provider_name: self.provider_name.ok_or("Provider name is required")?,
})
}
}
impl crate::models::user::SpendingRecord {
pub fn builder() -> SpendingRecordBuilder {
SpendingRecordBuilder::new()
}
}
// =============================================================================
// AUTO TOP-UP BUILDERS
// =============================================================================
#[derive(Default)]
pub struct AutoTopUpSettingsBuilder {
enabled: Option<bool>,
threshold_amount: Option<Decimal>,
topup_amount: Option<Decimal>,
payment_method_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
daily_limit: Option<Decimal>,
monthly_limit: Option<Decimal>,
}
impl AutoTopUpSettingsBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn enabled(mut self, enabled: bool) -> Self {
self.enabled = Some(enabled);
self
}
pub fn threshold_amount(mut self, amount: Decimal) -> Self {
self.threshold_amount = Some(amount);
self
}
pub fn topup_amount(mut self, amount: Decimal) -> Self {
self.topup_amount = Some(amount);
self
}
pub fn payment_method_id(mut self) -> Self{
self.payment_method_id = Some(id.into());
self
}
pub fn daily_limit(mut self, limit: Decimal) -> Self {
self.daily_limit = Some(limit);
self
}
pub fn monthly_limit(mut self, limit: Decimal) -> Self {
self.monthly_limit = Some(limit);
self
}
pub fn build(self) -> Result<crate::services::user_persistence::AutoTopUpSettings, String> {
Ok(crate::services::user_persistence::AutoTopUpSettings {
enabled: self.enabled.unwrap_or(false),
threshold_amount_usd: self.threshold_amount.unwrap_or(dec!(10.0)),
topup_amount_usd: self.topup_amount.unwrap_or(dec!(25.0)),
payment_method_base_data: BaseModelData::new(),
// id: self.payment_method_id.ok_or("payment_method_id is required")? - moved to base_data,
daily_limit_usd: self.daily_limit,
monthly_limit_usd: self.monthly_limit,
// created_at: chrono::Utc::now() - moved to base_data,
// updated_at: chrono::Utc::now() - moved to base_data,
})
}
}

View File

@@ -0,0 +1,105 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Shopping Cart Models
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CartItem {
pub product_id: u32,
pub quantity: u32,
pub selected_specifications: HashMap<String, serde_json::Value>,
pub added_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Cart {
pub base_data: BaseModelData,
pub items: Vec<CartItem>,
}
impl Cart {
pub fn new() -> Self{
let now = Utc::now();
Self {
base_data: BaseModelData::new(),
items: Vec::default(),
}
}
pub fn add_item(&mut self, item: CartItem) {
// Check if item already exists and update quantity
if let Some(existing_item) = self.items.iter_mut()
.find(|i| i.product_id == item.product_id && i.selected_specifications == item.selected_specifications) {
existing_item.quantity += item.quantity;
} else {
self.items.push(item);
}
}
pub fn remove_item(&mut self, product_id: &str, name: &str) -> bool{
let initial_len = self.items.len();
self.items.retain(|item| item.product_id != product_id);
if self.items.len() != initial_len {
self.base_data.updated_at = Utc::now();
true
} else {
false
}
}
pub fn update_item_quantity(&mut self, product_id: &str, name: &str) -> bool {
if let Some(item) = self.items.iter_mut().find(|i| i.product_id == product_id) {
if quantity == 0 {
return self.remove_item(product_id);
}
item.quantity = quantity;
item.updated_at = Utc::now();
self.base_data.updated_at = Utc::now();
true
} else {
false
}
}
pub fn clear(&mut self) {
self.items.clear();
self.base_data.updated_at = Utc::now();
}
pub fn get_total_items(&self) -> u32 {
self.items.iter().map(|item| item.quantity).sum()
}
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
}
impl CartItem {
pub fn new(product_id: &str, name: &str) -> Self {
let now = Utc::now();
Self {
product_id,
quantity,
selected_specifications: HashMap::default(),
added_at: now,
// updated_at: now - moved to base_data,
}
}
pub fn with_specifications(
product_id: &str, name: &str) -> Self {
let now = Utc::now();
Self {
product_id,
quantity,
selected_specifications: specifications,
added_at: now,
// updated_at: now - moved to base_data,
}
}
}

View File

@@ -0,0 +1,90 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Configurable currency support for any currency type
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Currency {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub code: String, // USD, EUR, BTC, ETH, etc.
pub name: String,
pub symbol: String,
pub currency_type: CurrencyType,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum CurrencyType {
Fiat,
Cryptocurrency,
Token,
Points, // For loyalty/reward systems
Custom(String), // For marketplace-specific currencies
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Price {
pub base_amount: Decimal, // Amount in marketplace base currency
pub base_currency: String,
pub display_currency: String,
pub display_amount: Decimal,
pub formatted_display: String,
pub conversion_rate: Decimal,
pub conversion_timestamp: DateTime<Utc>,
}
impl Currency {
pub fn new(
code: String,
name: String,
symbol: String,
currency_type: CurrencyType,
) -> Self {
Self {
base_data: BaseModelData::new(),
code,
name,
symbol,
currency_type,
}
}
}
impl Price {
pub fn new(
base_amount: Decimal,
base_currency: String,
display_currency: String,
conversion_rate: Decimal,
) -> Self {
let display_amount = base_amount * conversion_rate;
// Use proper currency symbol formatting - this will be updated by the currency service
Self {
base_amount,
base_currency: base_currency.clone(),
display_currency: display_currency.clone(),
display_amount,
formatted_display: format!("{} {}", display_amount.round_dp(2), display_currency),
conversion_rate,
conversion_timestamp: Utc::now(),
}
}
pub fn format_with_symbol(&self, symbol: &str) -> String {
format!("{} {}",
self.display_amount.round_dp(2),
symbol
)
}
pub fn update_formatted_display(&mut self, formatted: String) {
self.formatted_display = formatted;
}
}

View File

@@ -0,0 +1,30 @@
/// Farmer-specific data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FarmerData {
pub total_nodes: i32,
pub online_nodes: i32,
pub total_capacity: NodeCapacity,
pub used_capacity: NodeCapacity,
pub monthly_earnings_usd: i32,
pub total_earnings_usd: i32,
pub uptime_percentage: f32,
pub nodes: Vec<FarmNode>,
pub earnings_history: Vec<EarningsRecord>,
pub slice_templates: Vec<crate::models::product::Product>,
pub active_slices: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FarmerSettings {
#[serde(default)]
pub auto_accept_deployments: bool,
#[serde(default = "default_maintenance_window")]
pub maintenance_window: String,
#[serde(default)]
pub notification_preferences: NotificationSettings,
pub minimum_deployment_duration: i32, // hours
pub preferred_regions: Vec<String>,
#[serde(default)]
pub default_slice_customizations: Option<std::collections::HashMap<String, serde_json::Value>>, // Placeholder for DefaultSliceFormat
}

View File

@@ -0,0 +1,17 @@
// Export models - starting with basic models first
// pub mod user;
// pub mod product;
// pub mod currency;
// pub mod order;
// pub mod pool;
// pub mod builders; // Re-enabled with essential builders only
// pub mod cart;
// pub mod payment;
// pub mod service;
// pub mod slice;
// pub mod node;
pub mod app;
// Re-export commonly used types for easier access
pub use app::{App, PublishedApp, PublishedAppBuilder, ResourceUtilization, AppBuilder, DeploymentStatus};
// pub mod node; // Temporarily disabled - has many service dependencies

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
# Notes
all id's of base objects are u32
Cart is front end specific,
currency and exchange rates should be calculated by client
stuff such as decomal numbers related to presentation shouldnt be in base models
purchase doesnt need to now wether it is instant or cart
all base objects contain created_at and updated_at, so not needed to be added to every model

View File

@@ -0,0 +1,402 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Order {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub items: Vec<OrderItem>,
pub subtotal_base: Decimal, // In base currency
pub total_base: Decimal, // In base currency
pub base_currency: String,
pub currency_used: String, // Currency user paid in
pub currency_total: Decimal, // Amount in user's currency
pub conversion_rate: Decimal, // Rate used for conversion
pub status: OrderStatus,
pub payment_method: String,
pub payment_details: Option<PaymentDetails>,
pub billing_address: Option<Address>,
pub shipping_address: Option<Address>,
pub notes: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct OrderItem {
pub product_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub product_name: String,
pub product_category: String,
pub quantity: u32,
pub unit_price_base: Decimal, // In base currency
pub total_price_base: Decimal, // In base currency
pub specifications: HashMap<String, serde_json::Value>,
pub provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub provider_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum OrderStatus {
Pending,
Confirmed,
Processing,
Deployed,
Completed,
Cancelled,
Refunded,
Failed,
}
/// Order summary for display purposes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OrderSummary {
pub subtotal: Decimal,
pub tax: Decimal,
pub shipping: Decimal,
pub discount: Decimal,
pub total: Decimal,
pub currency: String,
pub item_count: u32,
}
impl Order {
pub fn new(
base_data: BaseModelData::new(),
// id: String - moved to base_data,
user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
base_currency: String,
currency_used: String,
conversion_rate: Decimal,
) -> Self {
Self {
base_data: BaseModelData::new(),
user_id,
items: Vec::default(),
subtotal_base: Decimal::from(0),
total_base: Decimal::from(0),
base_currency,
currency_used,
currency_total: Decimal::from(0),
conversion_rate,
status: OrderStatus::Pending,
payment_method: String::new(),
payment_details: None,
billing_address: None,
shipping_address: None,
notes: None,
}
}
pub fn add_item(&mut self, item: OrderItem) {
self.items.push(item);
self.calculate_totals();
}
pub fn calculate_totals(&mut self) {
self.subtotal_base = self.items.iter()
.map(|item| item.total_price_base)
.sum();
self.total_base = self.subtotal_base; // Add taxes, fees, etc. here
self.currency_total = self.total_base * self.conversion_rate;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn update_status(&mut self, status: OrderStatus) {
self.status = status;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn set_payment_details(&mut self, payment_details: PaymentDetails) {
self.payment_details = Some(payment_details);
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn get_item_count(&self) -> u32 {
self.items.iter().map(|item| item.quantity).sum()
}
}
impl OrderItem {
pub fn new(
product_base_data: BaseModelData::new(),
// id: String - moved to base_data,
product_name: String,
product_category: String,
quantity: u32,
unit_price_base: Decimal,
provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
provider_name: String,
) -> Self {
Self {
product_id,
product_name,
product_category,
quantity,
unit_price_base,
total_price_base: unit_price_base * Decimal::from(quantity),
specifications: HashMap::default(),
provider_id,
provider_name,
}
}
pub fn add_specification(&mut self, key: String, value: serde_json::Value) {
self.specifications.insert(key, value);
}
pub fn update_quantity(&mut self, quantity: u32) {
self.quantity = quantity;
self.total_price_base = self.unit_price_base * Decimal::from(quantity);
}
}
#[derive(Default)]
pub struct OrderBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
user_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
items: Vec<OrderItem>,
subtotal_base: Option<Decimal>,
total_base: Option<Decimal>,
base_currency: Option<String>,
currency_used: Option<String>,
currency_total: Option<Decimal>,
conversion_rate: Option<Decimal>,
status: Option<OrderStatus>,
payment_method: Option<String>,
payment_details: Option<PaymentDetails>,
billing_address: Option<Address>,
shipping_address: Option<Address>,
notes: Option<String>,
purchase_type: Option<PurchaseType>,
// created_at: Option<DateTime<Utc>> - moved to base_data,
// updated_at: Option<DateTime<Utc>> - moved to base_data,
}
impl OrderBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.into());
self
}
pub fn user_id(mut self, user_id: &str, name: &str) -> Self{
self.user_id = Some(user_id.into());
self
}
pub fn add_item(mut self, item: OrderItem) -> Self {
self.items.push(item);
self
}
pub fn items(mut self, items: Vec<OrderItem>) -> Self {
self.items = items;
self
}
pub fn subtotal_base(mut self, subtotal: Decimal) -> Self {
self.subtotal_base = Some(subtotal);
self
}
pub fn total_base(mut self, total: Decimal) -> Self {
self.total_base = Some(total);
self
}
pub fn base_currency(mut self, currency: impl Into<String>) -> Self {
self.base_currency = Some(currency.into());
self
}
pub fn currency_used(mut self, currency: impl Into<String>) -> Self {
self.currency_used = Some(currency.into());
self
}
pub fn currency_total(mut self, total: Decimal) -> Self {
self.currency_total = Some(total);
self
}
pub fn conversion_rate(mut self, rate: Decimal) -> Self {
self.conversion_rate = Some(rate);
self
}
pub fn status(mut self, status: OrderStatus) -> Self {
self.status = Some(status);
self
}
pub fn payment_method(mut self, method: impl Into<String>) -> Self {
self.payment_method = Some(method.into());
self
}
pub fn payment_details(mut self, details: PaymentDetails) -> Self {
self.payment_details = Some(details);
self
}
pub fn billing_address(mut self, address: Address) -> Self {
self.billing_address = Some(address);
self
}
pub fn shipping_address(mut self, address: Address) -> Self {
self.shipping_address = Some(address);
self
}
pub fn notes(mut self, notes: impl Into<String>) -> Self {
self.notes = Some(notes.into());
self
}
pub fn purchase_type(mut self, purchase_type: PurchaseType) -> Self {
self.purchase_type = Some(purchase_type);
self
}
pub fn build(self) -> Result<Order, String> {
let now = Utc::now();
let subtotal = self.subtotal_base.unwrap_or_else(|| {
self.items.iter().map(|item| item.total_price_base).sum()
});
Ok(Order {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("id is required")? - moved to base_data,
user_base_data: BaseModelData::new(),
// id: self.user_id.ok_or("user_id is required")? - moved to base_data,
items: self.items,
subtotal_base: subtotal,
total_base: self.total_base.unwrap_or(subtotal),
base_currency: self.base_currency.unwrap_or_else(|| "USD".to_string()),
currency_used: self.currency_used.unwrap_or_else(|| "USD".to_string()),
currency_total: self.currency_total.unwrap_or(subtotal),
conversion_rate: self.conversion_rate.unwrap_or_else(|| Decimal::from(1)),
status: self.status.unwrap_or(OrderStatus::Pending),
payment_method: self.payment_method.unwrap_or_else(|| "credit_card".to_string()),
payment_details: self.payment_details,
billing_address: self.billing_address,
shipping_address: self.shipping_address,
notes: self.notes,
purchase_type: self.purchase_type.unwrap_or(PurchaseType::Cart),
// created_at: self.base_data.created_at.unwrap_or(now) - moved to base_data,
// updated_at: self.base_data.updated_at.unwrap_or(now) - moved to base_data,
})
}
}
impl Order {
pub fn builder() -> OrderBuilder {
OrderBuilder::new()
}
}
#[derive(Default)]
pub struct OrderItemBuilder {
product_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
product_name: Option<String>,
product_category: Option<String>,
quantity: Option<u32>,
unit_price_base: Option<Decimal>,
total_price_base: Option<Decimal>,
specifications: HashMap<String, Value>,
provider_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
provider_name: Option<String>,
}
impl OrderItemBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn product_id(mut self) -> Self{
self.product_id = Some(id.into());
self
}
pub fn product_name(mut self, name: impl Into<String>) -> Self {
self.product_name = Some(name.into());
self
}
pub fn product_category(mut self, category: impl Into<String>) -> Self {
self.product_category = Some(category.into());
self
}
pub fn quantity(mut self, quantity: u32) -> Self {
self.quantity = Some(quantity);
self
}
pub fn unit_price_base(mut self, price: Decimal) -> Self {
self.unit_price_base = Some(price);
self
}
pub fn add_specification(mut self, key: impl Into<String>, value: Value) -> Self {
self.specifications.insert(key.into(), value);
self
}
pub fn provider_id(mut self) -> Self{
self.provider_id = Some(id.into());
self
}
pub fn provider_name(mut self, name: impl Into<String>) -> Self {
self.provider_name = Some(name.into());
self
}
pub fn build(self) -> Result<OrderItem, String> {
let quantity = self.quantity.unwrap_or(1);
let unit_price = self.unit_price_base.ok_or("unit_price_base is required")?;
let total_price = self.total_price_base.unwrap_or(unit_price * Decimal::from(quantity));
Ok(OrderItem {
product_base_data: BaseModelData::new(),
// id: self.product_id.ok_or("product_id is required")? - moved to base_data,
product_name: self.product_name.ok_or("product_name is required")?,
product_category: self.product_category.ok_or("product_category is required")?,
quantity,
unit_price_base: unit_price,
total_price_base: total_price,
specifications: self.specifications,
provider_base_data: BaseModelData::new(),
// id: self.provider_id.ok_or("provider_id is required")? - moved to base_data,
provider_name: self.provider_name.ok_or("provider_name is required")?,
})
}
}
impl OrderItem {
pub fn builder() -> OrderItemBuilder {
OrderItemBuilder::new()
}
}

View File

@@ -0,0 +1,77 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaymentDetails {
pub payment_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub payment_method: PaymentMethod,
pub transaction_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub payment_status: PaymentStatus,
pub payment_timestamp: Option<DateTime<Utc>>,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaymentMethod {
CreditCard {
last_four: String,
card_type: String,
},
BankTransfer {
bank_name: String,
account_last_four: String,
},
Cryptocurrency {
currency: String,
wallet_address: String,
},
Token {
token_type: String,
wallet_address: String,
},
Mock {
method_name: String,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaymentStatus {
Pending,
Processing,
Completed,
Failed,
Cancelled,
Refunded,
}
impl PaymentDetails {
pub fn new(payment_id: &str, name: &str) -> Self {
Self {
payment_id,
payment_method,
transaction_base_data: BaseModelData::new(),
// id: None - moved to base_data,
payment_status: PaymentStatus::Pending,
payment_timestamp: None,
failure_reason: None,
}
}
pub fn mark_completed(&mut self, transaction_id: String) { - moved to base_data
self.transaction_id = Some(transaction_id);
self.payment_status = PaymentStatus::Completed;
self.payment_timestamp = Some(Utc::now());
}
pub fn mark_failed(&mut self, reason: String) {
self.payment_status = PaymentStatus::Failed;
self.failure_reason = Some(reason);
self.payment_timestamp = Some(Utc::now());
}
}

View File

@@ -0,0 +1,105 @@
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LiquidityPool {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub name: String,
pub token_a: String,
pub token_b: String,
pub reserve_a: Decimal,
pub reserve_b: Decimal,
pub exchange_rate: Decimal,
pub liquidity: Decimal,
pub volume_24h: Decimal,
pub fee_percentage: Decimal,
pub status: PoolStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PoolStatus {
Active,
Paused,
Maintenance,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExchangeRequest {
pub pool_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub from_token: String,
pub to_token: String,
pub amount: Decimal,
pub min_receive: Option<Decimal>,
pub slippage_tolerance: Option<Decimal>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExchangeResponse {
pub success: bool,
pub message: String,
pub transaction_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub from_amount: Option<Decimal>,
pub to_amount: Option<Decimal>,
pub exchange_rate: Option<Decimal>,
pub fee: Option<Decimal>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StakeRequest {
pub amount: Decimal,
pub duration_months: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StakePosition {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub user_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub amount: Decimal,
pub start_date: DateTime<Utc>,
pub end_date: DateTime<Utc>,
pub discount_percentage: Decimal,
pub reputation_bonus: i32,
pub status: StakeStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum StakeStatus {
Active,
Completed,
Withdrawn,
}
/// Pool analytics data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolAnalytics {
pub price_history: Vec<PricePoint>,
pub volume_history: Vec<VolumePoint>,
pub liquidity_distribution: HashMap<String, Decimal>,
pub staking_distribution: HashMap<String, i32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PricePoint {
pub timestamp: DateTime<Utc>,
pub price: Decimal,
pub volume: Decimal,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VolumePoint {
pub date: String,
pub volume: Decimal,
}

View File

@@ -0,0 +1,660 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use heromodels_derive::model;
use rhai::CustomType;
/// Generic product structure that can represent any marketplace item
#[model]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, CustomType)]
pub struct Product {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
#[index]
pub name: String,
pub category: ProductCategory,
pub description: String,
pub price: Price,
pub attributes: HashMap<String, ProductAttribute>, // Generic attributes
pub provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub provider_name: String,
pub availability: ProductAvailability,
pub metadata: ProductMetadata, // Extensible metadata
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Price {
pub base_amount: Decimal,
pub currency: u32,
}
/// Configurable product categories
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ProductCategory {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub name: String,
pub display_name: String,
pub description: String,
pub attribute_schema: Vec<AttributeDefinition>, // Defines allowed attributes
pub parent_category: Option<String>,
pub is_active: bool,
}
/// Generic attribute system for any product type
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ProductAttribute {
pub key: String,
pub value: serde_json::Value,
pub attribute_type: AttributeType,
pub is_searchable: bool,
pub is_filterable: bool,
pub display_order: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum AttributeType {
Text,
Number,
SliceConfiguration,
Boolean,
Select(Vec<String>), // Predefined options
MultiSelect(Vec<String>),
Range { min: f64, max: f64 },
Custom(String), // For marketplace-specific types
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct AttributeDefinition {
pub key: String,
pub name: String,
pub attribute_type: AttributeType,
pub is_required: bool,
pub is_searchable: bool,
pub is_filterable: bool,
pub validation_rules: Vec<ValidationRule>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ValidationRule {
MinLength(usize),
MaxLength(usize),
MinValue(f64),
MaxValue(f64),
Pattern(String),
Custom(String),
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ProductAvailability {
Available,
Limited,
Unavailable,
PreOrder,
Custom(String), // For marketplace-specific availability states
}
impl Default for ProductAvailability {
fn default() -> Self {
ProductAvailability::Available
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ProductVisibility {
Public,
Private,
Draft,
Archived,
}
impl Default for ProductVisibility {
fn default() -> Self {
ProductVisibility::Public
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct ProductMetadata {
pub tags: Vec<String>,
pub location: Option<String>,
pub rating: Option<f32>,
pub review_count: u32,
pub featured: bool,
pub last_updated: chrono::DateTime<chrono::Utc>,
pub visibility: ProductVisibility,
pub seo_keywords: Vec<String>,
pub custom_fields: HashMap<String, serde_json::Value>,
}
/// Support for different pricing models
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PricingModel {
OneTime, // Single purchase
Recurring { interval: String }, // Subscription
UsageBased { unit: String }, // Pay per use
Tiered(Vec<PriceTier>), // Volume discounts
Custom(String), // Marketplace-specific
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PriceTier {
pub min_quantity: u32,
pub max_quantity: Option<u32>,
pub price_per_unit: Decimal,
pub discount_percentage: Option<f32>,
}
impl Product {
pub fn new(
name: String,
category: ProductCategory,
description: String,
price: Price,
provider_base_data: BaseModelData::new(),
// id: String - moved to base_data,
provider_name: String,
) -> Self {
Self {
base_data: BaseModelData::new(),
name,
category,
description,
price,
attributes: HashMap::default(),
provider_id,
provider_name,
availability: ProductAvailability::Available,
metadata: ProductMetadata {
tags: Vec::default(),
location: None,
rating: None,
review_count: 0,
featured: false,
last_updated: chrono::Utc::now(),
visibility: ProductVisibility::Public,
seo_keywords: Vec::new(),
custom_fields: HashMap::default(),
},
}
}
pub fn add_attribute(&mut self, key: String, value: serde_json::Value, attribute_type: AttributeType) {
let attribute = ProductAttribute {
key: key.clone(),
value,
attribute_type,
is_searchable: true,
is_filterable: true,
display_order: None,
};
self.attributes.insert(key, attribute);
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn set_featured(&mut self, featured: bool) {
self.metadata.featured = featured;
self.base_data.modified_at = Utc::now().timestamp();
}
pub fn add_tag(&mut self, tag: String) {
if !self.metadata.tags.contains(&tag) {
self.metadata.tags.push(tag);
self.base_data.modified_at = Utc::now().timestamp();
}
}
pub fn set_rating(&mut self, rating: f32, review_count: u32) {
self.metadata.rating = Some(rating);
self.metadata.review_count = review_count;
self.base_data.modified_at = Utc::now().timestamp();
}
}
impl ProductCategory {
pub fn new() -> Self {
// id: String - moved to base_data, name: String, display_name: String, description: String) -> Self {
Self {
base_data: BaseModelData::new(),
name,
display_name,
description,
attribute_schema: Vec::default(),
parent_category: None,
is_active: true,
}
}
/// Add attribute definition to category schema
pub fn add_attribute_definition(&mut self, definition: AttributeDefinition) {
self.attribute_schema.push(definition);
}
}
impl Product {
/// Create a slice product from farmer configuration
pub fn create_slice_product(
base_data: BaseModelData::new(),
// id: String - moved to base_data,
farmer_name: String,
slice_name: String,
slice_config: SliceConfiguration,
price_per_hour: Decimal,
) -> Self {
let category = ProductCategory {
base_data: BaseModelData::new(),
// id: "compute_slices".to_string() - moved to base_data,
name: "Compute Slices".to_string(),
display_name: "Compute Slices".to_string(),
description: "Virtual compute resources".to_string(),
attribute_schema: Vec::new(),
parent_category: None,
is_active: true,
};
let price = Price {
base_amount: price_per_hour,
currency: 1, // USD currency ID
};
let mut product = Self::new(
base_data,
slice_name,
category,
format!("Compute slice with {} vCPU, {}GB RAM, {}GB storage",
slice_config.cpu_cores, slice_config.memory_gb, slice_config.storage_gb),
price,
farmer_id,
farmer_name,
);
// Add slice-specific attributes
product.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.cpu_cores)),
AttributeType::Number,
);
product.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.memory_gb)),
AttributeType::Number,
);
product.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.storage_gb)),
AttributeType::Number,
);
product.add_attribute(
"bandwidth_mbps".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.bandwidth_mbps)),
AttributeType::Number,
);
product.add_attribute(
"min_uptime_sla".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(slice_config.min_uptime_sla as f64).unwrap()),
AttributeType::Number,
);
product.add_attribute(
"public_ips".to_string(),
serde_json::Value::Number(serde_json::Number::from(slice_config.public_ips)),
AttributeType::Number,
);
if let Some(ref node_id) = slice_config.node_id {
product.add_attribute(
"node_id".to_string(),
serde_json::Value::String(node_id.clone()),
AttributeType::Text,
);
}
product.add_attribute(
"slice_type".to_string(),
serde_json::Value::String(format!("{:?}", slice_config.slice_type)),
AttributeType::Text,
);
// Add slice configuration as a complex attribute
product.add_attribute(
"slice_configuration".to_string(),
serde_json::to_value(&slice_config).unwrap(),
AttributeType::SliceConfiguration,
);
// Add relevant tags
product.add_tag("compute".to_string());
product.add_tag("slice".to_string());
product.add_tag(format!("{:?}", slice_config.slice_type).to_lowercase());
product
}
/// Check if this product is a slice
pub fn is_slice(&self) -> bool {
self.category.id == "compute_slices" ||
self.attributes.contains_key("slice_configuration")
}
/// Get slice configuration from product attributes
pub fn get_slice_configuration(&self) -> Option<SliceConfiguration> {
self.attributes.get("slice_configuration")
.and_then(|attr| serde_json::from_value(attr.value.clone()).ok())
}
/// Update slice configuration
pub fn update_slice_configuration(&mut self, config: SliceConfiguration) {
if self.is_slice() {
self.add_attribute(
"slice_configuration".to_string(),
serde_json::to_value(&config).unwrap(),
AttributeType::SliceConfiguration,
);
// Update individual attributes for searchability
self.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.cpu_cores)),
AttributeType::Number,
);
self.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.memory_gb)),
AttributeType::Number,
);
self.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(config.storage_gb)),
AttributeType::Number,
);
}
}
/// Check if slice fits within node capacity
pub fn slice_fits_in_node(&self, node_capacity: &crate::models::user::NodeCapacity) -> bool {
if let Some(config) = self.get_slice_configuration() {
config.cpu_cores <= node_capacity.cpu_cores &&
config.memory_gb <= node_capacity.memory_gb &&
config.storage_gb <= node_capacity.storage_gb &&
config.bandwidth_mbps <= node_capacity.bandwidth_mbps
} else {
false
}
}
/// Create a full node product from a FarmNode
pub fn create_full_node_product(
node: &crate::models::user::FarmNode,
farmer_email: &str,
farmer_name: &str,
) -> Self {
let category = ProductCategory {
base_data: BaseModelData::new(),
// id: "3nodes".to_string() - moved to base_data,
name: "3Nodes".to_string(),
display_name: "3Nodes".to_string(),
description: "Full node rentals".to_string(),
attribute_schema: Vec::new(),
parent_category: None,
is_active: true,
};
let price = Price {
base_amount: node.rental_options
.as_ref()
.and_then(|opts| opts.full_node_pricing.as_ref())
.map(|pricing| pricing.monthly)
.unwrap_or_else(|| Decimal::from(200)), // Default price
currency: 1, // USD currency ID
};
let mut product = Product {
base_data: BaseModelData::new(),
name: format!("Full Node: {}", node.name),
category,
description: format!(
"Exclusive access to {} with {} CPU cores, {}GB RAM, {}GB storage in {}",
node.name, node.capacity.cpu_cores, node.capacity.memory_gb,
node.capacity.storage_gb, node.location
),
price,
attributes: HashMap::new(),
provider_base_data: BaseModelData::new(),
// id: farmer_email.to_string() - moved to base_data,
provider_name: farmer_name.to_string(),
availability: match node.availability_status {
crate::models::user::NodeAvailabilityStatus::Available => ProductAvailability::Available,
crate::models::user::NodeAvailabilityStatus::PartiallyRented => ProductAvailability::Limited,
_ => ProductAvailability::Unavailable,
},
metadata: ProductMetadata {
tags: vec!["full-node".to_string(), "exclusive".to_string(), node.region.clone()],
location: Some(node.location.clone()),
rating: None,
review_count: 0,
featured: false,
last_updated: chrono::Utc::now(),
visibility: ProductVisibility::Public,
seo_keywords: Vec::new(),
custom_fields: HashMap::new(),
},
};
// Add node-specific attributes
product.add_attribute(
"node_id".to_string(),
serde_json::Value::String(node.id.clone()),
AttributeType::Text,
);
product.add_attribute(
"rental_type".to_string(),
serde_json::Value::String("full_node".to_string()),
AttributeType::Text,
);
product.add_attribute(
"cpu_cores".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.cpu_cores)),
AttributeType::Number,
);
product.add_attribute(
"memory_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.memory_gb)),
AttributeType::Number,
);
product.add_attribute(
"storage_gb".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.storage_gb)),
AttributeType::Number,
);
product.add_attribute(
"bandwidth_mbps".to_string(),
serde_json::Value::Number(serde_json::Number::from(node.capacity.bandwidth_mbps)),
AttributeType::Number,
);
product.add_attribute(
"location".to_string(),
serde_json::Value::String(node.location.clone()),
AttributeType::Text,
);
product.add_attribute(
"uptime_percentage".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(node.uptime_percentage as f64).unwrap_or_else(|| serde_json::Number::from(0))),
AttributeType::Number,
);
product.add_attribute(
"health_score".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(node.health_score as f64).unwrap_or_else(|| serde_json::Number::from(0))),
AttributeType::Number,
);
product
}
/// Check if this product represents a full node
pub fn is_full_node(&self) -> bool {
self.attributes.get("rental_type")
.and_then(|attr| attr.value.as_str())
.map(|s| s == "full_node")
.unwrap_or(false)
}
/// Get the node ID if this is a node product
pub fn get_node_id(&self) -> Option<String> {
self.attributes.get("node_id")
.and_then(|attr| attr.value.as_str())
.map(|s| s.to_string())
}
}
impl ProductCategory {
pub fn set_parent_category(&mut self, parent_id: String) {
self.parent_category = Some(parent_id);
}
}
impl AttributeDefinition {
pub fn new(
key: String,
name: String,
attribute_type: AttributeType,
is_required: bool,
) -> Self {
Self {
key,
name,
attribute_type,
is_required,
is_searchable: true,
is_filterable: true,
validation_rules: Vec::default(),
}
}
pub fn add_validation_rule(&mut self, rule: ValidationRule) {
self.validation_rules.push(rule);
}
}
#[derive(Default)]
pub struct ProductBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
name: Option<String>,
category_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
description: Option<String>,
base_price: Option<Decimal>,
base_currency: Option<String>,
attributes: HashMap<String, ProductAttribute>,
provider_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
provider_name: Option<String>,
availability: Option<ProductAvailability>,
metadata: Option<ProductMetadata>,
// created_at: Option<DateTime<Utc>> - moved to base_data,
// updated_at: Option<DateTime<Utc>> - moved to base_data,
}
impl ProductBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self, id: impl Into<String>) -> Self {
self.base_data.id = Some(id.into());
self
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn category_id(mut self, category_id: impl Into<String>) -> Self {
self.category_id = Some(category_id.into());
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn base_price(mut self, price: Decimal) -> Self {
self.base_price = Some(price);
self
}
pub fn base_currency(mut self, currency: impl Into<String>) -> Self {
self.base_currency = Some(currency.into());
self
}
pub fn add_attribute(mut self, key: impl Into<String>, attribute: ProductAttribute) -> Self {
self.attributes.insert(key.into(), attribute);
self
}
pub fn provider_id(mut self, provider_id: impl Into<String>) -> Self {
self.provider_id = Some(provider_id.into());
self
}
pub fn provider_name(mut self, provider_name: impl Into<String>) -> Self {
self.provider_name = Some(provider_name.into());
self
}
pub fn availability(mut self, availability: ProductAvailability) -> Self {
self.availability = Some(availability);
self
}
pub fn metadata(mut self, metadata: ProductMetadata) -> Self {
self.metadata = Some(metadata);
self
}
pub fn build(self) -> Result<Product, String> {
let now = Utc::now();
Ok(Product {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("id is required")? - moved to base_data,
name: self.name.ok_or("name is required")?,
category_base_data: BaseModelData::new(),
// id: self.category_id.ok_or("category_id is required")? - moved to base_data,
description: self.description.unwrap_or_default(),
base_price: self.base_price.ok_or("base_price is required")?,
base_currency: self.base_currency.unwrap_or_else(|| "USD".to_string()),
attributes: self.attributes,
provider_base_data: BaseModelData::new(),
// id: self.provider_id.ok_or("provider_id is required")? - moved to base_data,
provider_name: self.provider_name.ok_or("provider_name is required")?,
availability: self.availability.unwrap_or_default(),
metadata: self.metadata.unwrap_or_default(),
// created_at: self.base_data.created_at.unwrap_or(now) - moved to base_data,
// updated_at: self.base_data.updated_at.unwrap_or(now) - moved to base_data,
})
}
}
impl Product {
pub fn builder() -> ProductBuilder {
ProductBuilder::new()
}
}

View File

@@ -0,0 +1,297 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize, Deserializer};
use rust_decimal::Decimal;
use std::str::FromStr;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Service Provider-specific data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceProviderData {
pub active_services: i32,
pub total_clients: i32,
pub monthly_revenue_usd: i32,
pub total_revenue_usd: i32,
pub service_rating: f32,
pub services: Vec<Service>,
pub client_requests: Vec<ServiceRequest>,
pub revenue_history: Vec<RevenueRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Service {
pub base_data: BaseModelData::new(),
// id: String - moved to base_data,
pub name: String,
pub category: String,
pub description: String,
pub price_per_hour_usd: i32,
pub status: String,
pub clients: i32,
pub rating: f32,
pub total_hours: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceRequest {
/// Base model data (includes id, created_at, updated_at)
pub base_data: BaseModelData,
pub client_name: String,
pub service_name: String,
pub status: String,
pub requested_date: String,
pub estimated_hours: i32,
pub budget: i32,
pub priority: String,
#[serde(default)]
pub progress: Option<i32>,
#[serde(default)]
pub completed_date: Option<String>,
#[serde(default)]
pub client_email: Option<String>,
#[serde(default)]
pub client_phone: Option<String>,
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub created_date: Option<String>,
}
/// Service booking record for customers who purchase services
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceBooking {
pub base_data: BaseModelData::new(),
// id: String - moved to base_data, // Same as ServiceRequest.id for cross-reference
pub service_base_data: BaseModelData::new(),
// id: String - moved to base_data, // Reference to original service
pub service_name: String,
pub provider_email: String, // Who provides the service
pub customer_email: String, // Who booked the service
pub budget: i32,
pub estimated_hours: i32,
pub status: String, // "Pending", "In Progress", "Completed"
pub requested_date: String,
pub priority: String,
pub description: Option<String>,
pub booking_date: String, // When customer booked
pub client_phone: Option<String>,
pub progress: Option<i32>,
pub completed_date: Option<String>,
}
/// Customer Service-specific data (for users who book services)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomerServiceData {
pub active_bookings: i32,
pub completed_bookings: i32,
pub total_spent: i32,
pub monthly_spending: i32,
pub average_rating_given: f32,
pub service_bookings: Vec<ServiceBooking>,
pub favorite_providers: Vec<String>,
pub spending_history: Vec<SpendingRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpendingRecord {
pub date: String,
pub amount: i32,
pub service_name: String,
pub provider_name: String,
}
#[derive(Default)]
pub struct ServiceBookingBuilder {
base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
service_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
service_name: Option<String>,
provider_email: Option<String>,
customer_email: Option<String>,
budget: Option<i32>,
estimated_hours: Option<i32>,
status: Option<String>,
requested_date: Option<String>,
priority: Option<String>,
description: Option<String>,
booking_date: Option<String>,
}
impl ServiceBookingBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn id(mut self) -> Self{
self.base_data.id = Some(id.to_string());
self
}
pub fn service_id(mut self, service_id: &str, name: &str) -> Self{
self.service_id = Some(service_id.to_string());
self
}
pub fn service_name(mut self, service_name: &str) -> Self {
self.service_name = Some(service_name.to_string());
self
}
pub fn provider_email(mut self, provider_email: &str) -> Self {
self.provider_email = Some(provider_email.to_string());
self
}
pub fn customer_email(mut self, customer_email: &str) -> Self {
self.customer_email = Some(customer_email.to_string());
self
}
pub fn budget(mut self, budget: i32) -> Self {
self.budget = Some(budget);
self
}
pub fn estimated_hours(mut self, hours: i32) -> Self {
self.estimated_hours = Some(hours);
self
}
pub fn status(mut self, status: &str) -> Self {
self.status = Some(status.to_string());
self
}
pub fn requested_date(mut self, date: &str) -> Self {
self.requested_date = Some(date.to_string());
self
}
pub fn priority(mut self, priority: &str) -> Self {
self.priority = Some(priority.to_string());
self
}
pub fn description(mut self, description: Option<String>) -> Self {
self.description = description;
self
}
pub fn booking_date(mut self, date: &str) -> Self {
self.booking_date = Some(date.to_string());
self
}
pub fn build(self) -> Result<ServiceBooking, String> {
Ok(ServiceBooking {
base_data: BaseModelData::new(),
// id: self.base_data.id.ok_or("ID is required")? - moved to base_data,
service_base_data: BaseModelData::new(),
// id: self.service_id.ok_or("Service ID is required")? - moved to base_data,
service_name: self.service_name.ok_or("Service name is required")?,
provider_email: self.provider_email.ok_or("Provider email is required")?,
customer_email: self.customer_email.ok_or("Customer email is required")?,
budget: self.budget.unwrap_or(0),
estimated_hours: self.estimated_hours.unwrap_or(0),
status: self.status.unwrap_or_else(|| "Pending".to_string()),
requested_date: self.requested_date.unwrap_or_else(|| chrono::Utc::now().format("%Y-%m-%d").to_string()),
priority: self.priority.unwrap_or_else(|| "Medium".to_string()),
description: self.description,
booking_date: self.booking_date.unwrap_or_else(|| chrono::Utc::now().format("%Y-%m-%d").to_string()),
client_phone: None,
progress: None,
completed_date: None,
})
}
}
impl ServiceBooking {
pub fn builder() -> ServiceBookingBuilder {
ServiceBookingBuilder::new()
}
}
// =============================================================================
// CUSTOMER SERVICE DATA BUILDER
// =============================================================================
#[derive(Default)]
pub struct CustomerServiceDataBuilder {
active_bookings: Option<i32>,
completed_bookings: Option<i32>,
total_spent: Option<i32>,
monthly_spending: Option<i32>,
average_rating_given: Option<f32>,
service_bookings: Option<Vec<crate::models::user::ServiceBooking>>,
favorite_providers: Option<Vec<String>>,
spending_history: Option<Vec<crate::models::user::SpendingRecord>>,
}
impl CustomerServiceDataBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn active_bookings(mut self, count: i32) -> Self {
self.active_bookings = Some(count);
self
}
pub fn completed_bookings(mut self, count: i32) -> Self {
self.completed_bookings = Some(count);
self
}
pub fn total_spent(mut self, amount: i32) -> Self {
self.total_spent = Some(amount);
self
}
pub fn monthly_spending(mut self, amount: i32) -> Self {
self.monthly_spending = Some(amount);
self
}
pub fn average_rating_given(mut self, rating: f32) -> Self {
self.average_rating_given = Some(rating);
self
}
pub fn service_bookings(mut self, bookings: Vec<crate::models::user::ServiceBooking>) -> Self {
self.service_bookings = Some(bookings);
self
}
pub fn favorite_providers(mut self, providers: Vec<String>) -> Self {
self.favorite_providers = Some(providers);
self
}
pub fn spending_history(mut self, history: Vec<crate::models::user::SpendingRecord>) -> Self {
self.spending_history = Some(history);
self
}
pub fn build(self) -> Result<crate::models::user::CustomerServiceData, String> {
Ok(crate::models::user::CustomerServiceData {
active_bookings: self.active_bookings.unwrap_or(0),
completed_bookings: self.completed_bookings.unwrap_or(0),
total_spent: self.total_spent.unwrap_or(0),
monthly_spending: self.monthly_spending.unwrap_or(0),
average_rating_given: self.average_rating_given.unwrap_or(0.0),
service_bookings: self.service_bookings.unwrap_or_default(),
favorite_providers: self.favorite_providers.unwrap_or_default(),
spending_history: self.spending_history.unwrap_or_default(),
})
}
}
impl crate::models::user::CustomerServiceData {
pub fn builder() -> CustomerServiceDataBuilder {
CustomerServiceDataBuilder::new()
}
}

View File

@@ -0,0 +1,200 @@
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc};
use rust_decimal::Decimal;
use std::collections::HashMap;
use heromodels_core::BaseModelData;
use crate::models::tfmarketplace::user::ResourceUtilization;
/// Slice configuration data structure for product attributes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SliceConfiguration {
pub cpu_cores: i32,
pub memory_gb: i32,
pub storage_gb: i32,
pub bandwidth_mbps: i32,
pub min_uptime_sla: f32,
pub public_ips: i32,
pub node_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
pub slice_type: SliceType,
#[serde(default)]
pub pricing: SlicePricing,
}
/// Enhanced pricing structure for slices with multiple time periods
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SlicePricing {
pub hourly: Decimal,
pub daily: Decimal,
pub monthly: Decimal,
pub yearly: Decimal,
}
impl Default for SlicePricing {
fn default() -> Self {
Self {
hourly: Decimal::ZERO,
daily: Decimal::ZERO,
monthly: Decimal::ZERO,
yearly: Decimal::ZERO,
}
}
}
impl SlicePricing {
/// Create pricing from hourly rate with automatic calculation
pub fn from_hourly(hourly_rate: Decimal, daily_discount: f32, monthly_discount: f32, yearly_discount: f32) -> Self {
let base_daily = hourly_rate * Decimal::from(24);
let base_monthly = hourly_rate * Decimal::from(24 * 30);
let base_yearly = hourly_rate * Decimal::from(24 * 365);
Self {
hourly: hourly_rate,
daily: base_daily * Decimal::try_from(1.0 - daily_discount / 100.0).unwrap_or(Decimal::ONE),
monthly: base_monthly * Decimal::try_from(1.0 - monthly_discount / 100.0).unwrap_or(Decimal::ONE),
yearly: base_yearly * Decimal::try_from(1.0 - yearly_discount / 100.0).unwrap_or(Decimal::ONE),
}
}
/// Calculate savings compared to hourly rate
pub fn calculate_savings(&self) -> (Decimal, Decimal, Decimal) {
let hourly_equivalent_daily = self.hourly * Decimal::from(24);
let hourly_equivalent_monthly = self.hourly * Decimal::from(24 * 30);
let hourly_equivalent_yearly = self.hourly * Decimal::from(24 * 365);
let daily_savings = hourly_equivalent_daily - self.daily;
let monthly_savings = hourly_equivalent_monthly - self.monthly;
let yearly_savings = hourly_equivalent_yearly - self.yearly;
(daily_savings, monthly_savings, yearly_savings)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SliceType {
Basic,
Standard,
Premium,
Custom,
}
#[derive(Default)]
pub struct SliceProductBuilder {
farmer_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
farmer_name: Option<String>,
slice_name: Option<String>,
cpu_cores: Option<i32>,
memory_gb: Option<i32>,
storage_gb: Option<i32>,
bandwidth_mbps: Option<i32>,
min_uptime_sla: Option<f32>,
public_ips: Option<i32>,
node_base_data: BaseModelData::new(),
// id: Option<String> - moved to base_data,
slice_type: Option<crate::models::tfmarketplace::product::SliceType>,
price_per_hour: Option<rust_decimal::Decimal>,
}
impl SliceProductBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn farmer_id(mut self, farmer_id: &str, name: &str) -> Self{
self.farmer_id = Some(farmer_id.into());
self
}
pub fn farmer_name(mut self, farmer_name: impl Into<String>) -> Self {
self.farmer_name = Some(farmer_name.into());
self
}
pub fn slice_name(mut self, slice_name: impl Into<String>) -> Self {
self.slice_name = Some(slice_name.into());
self
}
pub fn cpu_cores(mut self, cpu_cores: i32) -> Self {
self.cpu_cores = Some(cpu_cores);
self
}
pub fn memory_gb(mut self, memory_gb: i32) -> Self {
self.memory_gb = Some(memory_gb);
self
}
pub fn storage_gb(mut self, storage_gb: i32) -> Self {
self.storage_gb = Some(storage_gb);
self
}
pub fn bandwidth_mbps(mut self, bandwidth_mbps: i32) -> Self {
self.bandwidth_mbps = Some(bandwidth_mbps);
self
}
pub fn min_uptime_sla(mut self, min_uptime_sla: f32) -> Self {
self.min_uptime_sla = Some(min_uptime_sla);
self
}
pub fn public_ips(mut self, public_ips: i32) -> Self {
self.public_ips = Some(public_ips);
self
}
pub fn node_id(mut self, node_id: &str, name: &str) -> Self{
self.node_id = Some(node_id.into());
self
}
pub fn slice_type(mut self, slice_type: crate::models::tfmarketplace::product::SliceType) -> Self {
self.slice_type = Some(slice_type);
self
}
pub fn price_per_hour(mut self, price_per_hour: rust_decimal::Decimal) -> Self {
self.price_per_hour = Some(price_per_hour);
self
}
pub fn build(self) -> Result<crate::models::tfmarketplace::product::Product, String> {
let farmer_id = self.farmer_id.ok_or("farmer_id is required")?;
let farmer_name = self.farmer_name.ok_or("farmer_name is required")?;
let slice_name = self.slice_name.ok_or("slice_name is required")?;
let cpu_cores = self.cpu_cores.ok_or("cpu_cores is required")?;
let memory_gb = self.memory_gb.ok_or("memory_gb is required")?;
let storage_gb = self.storage_gb.ok_or("storage_gb is required")?;
let bandwidth_mbps = self.bandwidth_mbps.ok_or("bandwidth_mbps is required")?;
let price_per_hour = self.price_per_hour.ok_or("price_per_hour is required")?;
let slice_config = crate::models::tfmarketplace::product::SliceConfiguration {
cpu_cores,
memory_gb,
storage_gb,
bandwidth_mbps,
min_uptime_sla: self.min_uptime_sla.unwrap_or(99.0),
public_ips: self.public_ips.unwrap_or(0),
node_base_data: BaseModelData::new(),
// id: self.node_id - moved to base_data,
slice_type: self.slice_type.unwrap_or(crate::models::tfmarketplace::product::SliceType::Basic),
pricing: crate::models::tfmarketplace::product::SlicePricing::from_hourly(
price_per_hour,
5.0, // 5% daily discount
15.0, // 15% monthly discount
25.0 // 25% yearly discount
),
};
Ok(crate::models::tfmarketplace::product::Product::create_slice_product(
farmer_id,
farmer_name,
slice_name,
slice_config,
price_per_hour,
))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Config matches examples/tests
PGHOST=${PGHOST:-localhost}
PGPORT=${PGPORT:-5432}
PGUSER=${PGUSER:-postgres}
PGPASSWORD=${PGPASSWORD:-test123}
export PGPASSWORD
echo "[test.sh] Checking Postgres at ${PGHOST}:${PGPORT} (user=${PGUSER})..."
# Require pg_isready
if ! command -v pg_isready >/dev/null 2>&1; then
echo "[test.sh] ERROR: pg_isready not found. Install PostgreSQL client tools (e.g., brew install libpq && brew link --force libpq)." >&2
exit 1
fi
# Wait for Postgres to be ready (30s timeout)
ATTEMPTS=30
until pg_isready -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" >/dev/null 2>&1; do
((ATTEMPTS--)) || {
echo "[test.sh] ERROR: Postgres not ready after 30s. Ensure it's running with user=$PGUSER password=$PGPASSWORD host=$PGHOST port=$PGPORT." >&2
exit 1
}
sleep 1
echo "[test.sh] Waiting for Postgres..."
done
echo "[test.sh] Postgres is ready. Running tests..."
# Run fast OurDB test first (no Postgres dependency)
echo "[test.sh] Running OurDB test: grid4_ourdb"
cargo test -p heromodels --test grid4_ourdb
# Run Postgres-backed tests (marked ignored)
echo "[test.sh] Running Postgres test: heroledger_postgres (ignored)"
cargo test -p heromodels --test heroledger_postgres -- --ignored
echo "[test.sh] Running Postgres test: grid4_postgres (ignored)"
cargo test -p heromodels --test grid4_postgres -- --ignored
echo "[test.sh] Done."

View File

@@ -1,117 +0,0 @@
use serde_json;
use heromodels::models::grid4::{
ComputeSlice, DeviceInfo, Node, NodeCapacity, PricingPolicy, Reservation, ReservationStatus,
SLAPolicy, StorageDevice, StorageSlice,
};
#[test]
fn build_and_serde_roundtrip_compute_storage_slices() {
let pricing = PricingPolicy::new()
.marketplace_year_discounts(vec![20, 30, 40])
.volume_discounts(vec![5, 10, 15])
.build();
let sla = SLAPolicy::new()
.sla_uptime(99)
.sla_bandwidth_mbit(1000)
.sla_penalty(150)
.build();
let cs = ComputeSlice::new()
.nodeid(42)
.slice_id(1)
.mem_gb(16.0)
.storage_gb(200.0)
.passmark(5000)
.vcores(8)
.cpu_oversubscription(2)
.storage_oversubscription(1)
.price_range(vec![0.5, 2.0])
.gpus(1)
.price_cc(1.25)
.pricing_policy(pricing.clone())
.sla_policy(sla.clone());
let ss = StorageSlice::new()
.nodeid(42)
.slice_id(2)
.price_cc(0.15)
.pricing_policy(pricing)
.sla_policy(sla);
// serde roundtrip compute slice
let s = serde_json::to_string(&cs).expect("serialize compute slice");
let cs2: ComputeSlice = serde_json::from_str(&s).expect("deserialize compute slice");
assert_eq!(cs, cs2);
// serde roundtrip storage slice
let s2 = serde_json::to_string(&ss).expect("serialize storage slice");
let ss2: StorageSlice = serde_json::from_str(&s2).expect("deserialize storage slice");
assert_eq!(ss, ss2);
}
#[test]
fn build_and_serde_roundtrip_node() {
let dev = DeviceInfo {
vendor: "AcmeVendor".into(),
storage: vec![StorageDevice { id: "sda".into(), size_gb: 512.0, description: "NVMe".into() }],
memory: vec![],
cpu: vec![],
gpu: vec![],
network: vec![],
};
let cap = NodeCapacity { storage_gb: 2048.0, mem_gb: 128.0, mem_gb_gpu: 24.0, passmark: 12000, vcores: 32 };
let cs = ComputeSlice::new().nodeid(1).slice_id(1).mem_gb(8.0).storage_gb(100.0).passmark(2500).vcores(4);
let ss = StorageSlice::new().nodeid(1).slice_id(2).price_cc(0.2);
let node = Node::new()
.nodegroupid(7)
.uptime(99)
.add_compute_slice(cs)
.add_storage_slice(ss)
.devices(dev)
.country("NL")
.capacity(cap)
.provisiontime(1710000000)
.pubkey("node_pubkey")
.signature_node("sig_node")
.signature_farmer("sig_farmer");
let s = serde_json::to_string(&node).expect("serialize node");
let node2: Node = serde_json::from_str(&s).expect("deserialize node");
assert_eq!(node.nodegroupid, node2.nodegroupid);
assert_eq!(node.uptime, node2.uptime);
assert_eq!(node.country, node2.country);
assert_eq!(node.pubkey, node2.pubkey);
assert_eq!(node.signature_node, node2.signature_node);
assert_eq!(node.signature_farmer, node2.signature_farmer);
assert_eq!(node.computeslices.len(), node2.computeslices.len());
assert_eq!(node.storageslices.len(), node2.storageslices.len());
}
#[test]
fn build_and_serde_roundtrip_reservation() {
let reservation = Reservation::new()
.customer_id(1234)
.add_compute_slice(11)
.add_storage_slice(22)
.status(ReservationStatus::Confirmed)
.obligation(true)
.start_date(1_710_000_000)
.end_date(1_720_000_000);
let s = serde_json::to_string(&reservation).expect("serialize reservation");
let reservation2: Reservation = serde_json::from_str(&s).expect("deserialize reservation");
assert_eq!(reservation.customer_id, reservation2.customer_id);
assert_eq!(reservation.status, reservation2.status);
assert_eq!(reservation.obligation, reservation2.obligation);
assert_eq!(reservation.start_date, reservation2.start_date);
assert_eq!(reservation.end_date, reservation2.end_date);
assert_eq!(reservation.compute_slices, reservation2.compute_slices);
assert_eq!(reservation.storage_slices, reservation2.storage_slices);
}

View File

@@ -1,82 +0,0 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels_core::Model;
use std::sync::Arc;
fn create_test_db() -> Arc<OurDB> {
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let path = format!("/tmp/grid4_node_test_{}", ts);
let _ = std::fs::remove_dir_all(&path);
Arc::new(OurDB::new(path, true).expect("create OurDB"))
}
#[test]
fn grid4_node_basic_roundtrip_and_indexes() {
let db = create_test_db();
let nodes = db.collection::<Node>().expect("open node collection");
// Clean any leftover
if let Ok(existing) = nodes.get_all() {
for n in existing {
let _ = nodes.delete_by_id(n.get_id());
}
}
// Build a node with some compute slices and device info
let cs = ComputeSlice::new()
.nodeid(1)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(5000)
.vcores(16)
.gpus(1)
.price_cc(0.25);
let dev = DeviceInfo {
vendor: "ACME".into(),
..Default::default()
};
let n = Node::new()
.nodegroupid(42)
.uptime(99)
.add_compute_slice(cs)
.devices(dev)
.country("BE")
.pubkey("PUB_NODE_1")
.build();
let (id, stored) = nodes.set(&n).expect("store node");
assert!(id > 0);
assert_eq!(stored.country, "BE");
// get by id
let fetched = nodes.get_by_id(id).expect("get by id").expect("exists");
assert_eq!(fetched.pubkey, "PUB_NODE_1");
// query by top-level indexes
let by_country = nodes.get::<country, _>("BE").expect("query country");
assert_eq!(by_country.len(), 1);
assert_eq!(by_country[0].get_id(), id);
let by_group = nodes.get::<nodegroupid, _>(&42).expect("query group");
assert_eq!(by_group.len(), 1);
let by_pubkey = nodes.get::<pubkey, _>("PUB_NODE_1").expect("query pubkey");
assert_eq!(by_pubkey.len(), 1);
// update
let updated = fetched.clone().country("NL");
let (_, back) = nodes.set(&updated).expect("update node");
assert_eq!(back.country, "NL");
// delete
nodes.delete_by_id(id).expect("delete");
assert!(nodes.get_by_id(id).expect("get after delete").is_none());
}

View File

@@ -1,125 +0,0 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey};
use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node};
use heromodels_core::Model;
// Requires local Postgres (user=postgres password=test123 host=localhost port=5432)
// Run with: cargo test -p heromodels --test grid4_postgres -- --ignored
#[test]
#[ignore]
fn grid4_node_postgres_roundtrip_like_example() {
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("can connect to Postgres");
let nodes = db.collection::<Node>().expect("open node collection");
// Clean existing
if let Ok(existing) = nodes.get_all() {
for n in existing {
let _ = nodes.delete_by_id(n.get_id());
}
}
// Build and store multiple nodes via builder and then persist via collection.set(), like examples
let cs1 = ComputeSlice::new()
.nodeid(10)
.slice_id(1)
.mem_gb(32.0)
.storage_gb(512.0)
.passmark(5000)
.vcores(16)
.gpus(1)
.price_cc(0.25);
let cs2 = ComputeSlice::new()
.nodeid(10)
.slice_id(2)
.mem_gb(64.0)
.storage_gb(2048.0)
.passmark(7000)
.vcores(24)
.gpus(2)
.price_cc(0.50);
let cs3 = ComputeSlice::new()
.nodeid(11)
.slice_id(1)
.mem_gb(16.0)
.storage_gb(256.0)
.passmark(3000)
.vcores(8)
.gpus(0)
.price_cc(0.10);
let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() };
let n1 = Node::new()
.nodegroupid(99)
.uptime(97)
.add_compute_slice(cs1)
.devices(dev.clone())
.country("BE")
.pubkey("PG_NODE_1")
.build();
let n2 = Node::new()
.nodegroupid(99)
.uptime(96)
.add_compute_slice(cs2)
.devices(dev.clone())
.country("NL")
.pubkey("PG_NODE_2")
.build();
let n3 = Node::new()
.nodegroupid(7)
.uptime(95)
.add_compute_slice(cs3)
.devices(dev)
.country("BE")
.pubkey("PG_NODE_3")
.build();
let (id1, s1) = nodes.set(&n1).expect("store n1");
let (id2, s2) = nodes.set(&n2).expect("store n2");
let (id3, s3) = nodes.set(&n3).expect("store n3");
assert!(id1 > 0 && id2 > 0 && id3 > 0);
// Query by top-level indexes similar to the example style
let be_nodes = nodes.get::<country, _>("BE").expect("by country");
assert_eq!(be_nodes.len(), 2);
let grp_99 = nodes.get::<nodegroupid, _>(&99).expect("by group");
assert_eq!(grp_99.len(), 2);
let by_key = nodes.get::<pubkey, _>("PG_NODE_2").expect("by pubkey");
assert_eq!(by_key.len(), 1);
assert_eq!(by_key[0].get_id(), id2);
// Update: change country of n1
let updated = s1.clone().country("DE");
let (_, back) = nodes.set(&updated).expect("update n1");
assert_eq!(back.country, "DE");
// Cardinality after update
let de_nodes = nodes.get::<country, _>("DE").expect("by country DE");
assert_eq!(de_nodes.len(), 1);
// Delete by id and by index
nodes.delete_by_id(id2).expect("delete n2 by id");
assert!(nodes.get_by_id(id2).unwrap().is_none());
nodes.delete::<pubkey, _>("PG_NODE_3").expect("delete n3 by pubkey");
assert!(nodes.get_by_id(id3).unwrap().is_none());
// Remaining should be updated n1 only; verify via targeted queries
let de_nodes = nodes.get::<country, _>("DE").expect("country DE after deletes");
assert_eq!(de_nodes.len(), 1);
assert_eq!(de_nodes[0].get_id(), id1);
let by_key = nodes.get::<pubkey, _>("PG_NODE_1").expect("by pubkey PG_NODE_1");
assert_eq!(by_key.len(), 1);
assert_eq!(by_key[0].get_id(), id1);
}

View File

@@ -1,97 +0,0 @@
use heromodels::db::postgres::{Config, Postgres};
use heromodels::db::{Collection, Db};
use heromodels::models::heroledger::user::user_index::username;
use heromodels::models::heroledger::user::User;
use heromodels_core::Model;
// NOTE: Requires a local Postgres running with user=postgres password=test123 host=localhost port=5432
// Marked ignored by default. Run with: cargo test -p heromodels --test heroledger_postgres -- --ignored
#[test]
#[ignore]
fn heroledger_user_postgres_roundtrip() {
// Connect
let db = Postgres::new(
Config::new()
.user(Some("postgres".into()))
.password(Some("test123".into()))
.host(Some("localhost".into()))
.port(Some(5432)),
)
.expect("can connect to Postgres");
// Open collection (will create table and indexes for top-level fields)
let users = db.collection::<User>().expect("can open user collection");
// Clean slate
if let Ok(existing) = users.get_all() {
for u in existing {
let _ = users.delete_by_id(u.get_id());
}
}
// Unique suffix to avoid collisions with any pre-existing rows
let uniq = format!("{}", std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos());
let alice = format!("alice_{}", uniq);
let bob = format!("bob_{}", uniq);
let carol = format!("carol_{}", uniq);
// Build and store multiple users
let u1 = User::new(0)
.username(&alice)
.pubkey("PUBKEY_A")
.add_email("alice@example.com")
.build();
let u2 = User::new(0)
.username(&bob)
.pubkey("PUBKEY_B")
.add_email("bob@example.com")
.build();
let u3 = User::new(0)
.username(&carol)
.pubkey("PUBKEY_C")
.add_email("carol@example.com")
.build();
let (id1, db_u1) = users.set(&u1).expect("store u1");
let (id2, db_u2) = users.set(&u2).expect("store u2");
let (id3, db_u3) = users.set(&u3).expect("store u3");
assert!(id1 > 0 && id2 > 0 && id3 > 0);
// Fetch by id
assert_eq!(users.get_by_id(id1).unwrap().unwrap().username, alice);
assert_eq!(users.get_by_id(id2).unwrap().unwrap().username, bob);
assert_eq!(users.get_by_id(id3).unwrap().unwrap().username, carol);
// Fetch by index (top-level username)
let by_username = users.get::<username, _>(&alice).expect("by username");
assert_eq!(by_username.len(), 1);
assert_eq!(by_username[0].get_id(), id1);
// Update one
let updated = db_u1.clone().add_email("work@alice.example");
let (id1b, updated_back) = users.set(&updated).expect("update alice");
assert_eq!(id1b, id1);
assert!(updated_back.email.len() >= 2);
// Targeted queries to avoid legacy rows in the same table
// Verify three users exist via index queries
assert_eq!(users.get::<username, _>(&alice).unwrap().len(), 1);
assert_eq!(users.get::<username, _>(&bob).unwrap().len(), 1);
assert_eq!(users.get::<username, _>(&carol).unwrap().len(), 1);
// Delete by id
users.delete_by_id(id2).expect("delete bob by id");
assert!(users.get_by_id(id2).unwrap().is_none());
// Delete by index (username)
users.delete::<username, _>(&carol).expect("delete carol by username");
assert!(users.get_by_id(id3).unwrap().is_none());
// Remaining should be just alice; verify via index
let remain = users.get::<username, _>(&alice).expect("get alice after delete");
assert_eq!(remain.len(), 1);
assert_eq!(remain[0].get_id(), id1);
}

View File

@@ -1,5 +1,4 @@
use heromodels::db::Collection;
use heromodels::db::Db;
use heromodels::db::hero::OurDB;
use heromodels::models::biz::{BusinessType, Company, CompanyStatus, Payment, PaymentStatus};
use heromodels_core::Model;
@@ -198,18 +197,12 @@ fn test_payment_database_persistence() {
);
// Save payment
let (payment_id, saved_payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
let (payment_id, saved_payment) = db.set(&payment).expect("Failed to save payment");
assert!(payment_id > 0);
assert_eq!(saved_payment.payment_intent_id, "pi_db_test");
// Retrieve payment
let retrieved_payment: Payment = db
.collection::<Payment>()
.expect("open payment collection")
.get_by_id(payment_id)
.expect("Failed to get payment")
.unwrap();
@@ -231,34 +224,20 @@ fn test_payment_status_transitions() {
1360.0,
);
let (payment_id, mut payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
let (payment_id, mut payment) = db.set(&payment).expect("Failed to save payment");
// Test pending -> completed
payment = payment.complete_payment(Some("cus_transition_test".to_string()));
let (_, mut payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to update payment");
let (_, mut payment) = db.set(&payment).expect("Failed to update payment");
assert!(payment.is_completed());
// Test completed -> refunded
payment = payment.refund_payment();
let (_, payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to update payment");
let (_, payment) = db.set(&payment).expect("Failed to update payment");
assert!(payment.is_refunded());
// Verify final state in database
let final_payment: Payment = db
.collection::<Payment>()
.expect("open payment collection")
.get_by_id(payment_id)
.expect("Failed to get payment")
.unwrap();
@@ -291,18 +270,15 @@ fn test_company_payment_integration() {
let db = create_test_db();
// Create company with default PendingPayment status
let company = Company::new()
.name("Integration Test Corp")
.registration_number("ITC-001")
.incorporation_date(chrono::Utc::now().timestamp())
.email("test@integration.com")
.business_type(BusinessType::Starter);
let company = Company::new(
"Integration Test Corp".to_string(),
"ITC-001".to_string(),
chrono::Utc::now().timestamp(),
)
.email("test@integration.com".to_string())
.business_type(BusinessType::Starter);
let (company_id, company) = db
.collection::<Company>()
.expect("open company collection")
.set(&company)
.expect("Failed to save company");
let (company_id, company) = db.set(&company).expect("Failed to save company");
assert_eq!(company.status, CompanyStatus::PendingPayment);
// Create payment for the company
@@ -315,28 +291,18 @@ fn test_company_payment_integration() {
305.0,
);
let (_payment_id, payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&payment)
.expect("Failed to save payment");
let (_payment_id, payment) = db.set(&payment).expect("Failed to save payment");
assert_eq!(payment.company_id, company_id);
// Complete payment
let completed_payment = payment.complete_payment(Some("cus_integration_test".to_string()));
let (_, completed_payment) = db
.collection::<Payment>()
.expect("open payment collection")
.set(&completed_payment)
.expect("Failed to update payment");
// Update company status to Active
let active_company = company.status(CompanyStatus::Active);
let (_, active_company) = db
.collection::<Company>()
.expect("open company collection")
.set(&active_company)
.expect("Failed to update company");
let (_, active_company) = db.set(&active_company).expect("Failed to update company");
// Verify final states
assert!(completed_payment.is_completed());

View File

@@ -1,345 +0,0 @@
### 2.1 Accounts
* **id**: `BIGINT` identity (non-negative), unique account id
* **pubkey**: `BYTEA` unique public key for signing/encryption
* **display\_name**: `TEXT` (optional)
* **created\_at**: `TIMESTAMPTZ`
### 2.2 Currencies
* **asset\_code**: `TEXT` PK (e.g., `USDC-ETH`, `EUR`, `LND`)
* **name**: `TEXT`
* **symbol**: `TEXT`
* **decimals**: `INT` (default 2)
---
## 3) Services & Groups
### 3.1 Services
* **id**: `BIGINT` identity
* **name**: `TEXT` unique
* **description**: `TEXT`
* **default\_billing\_mode**: `ENUM('per_second','per_request')`
* **default\_price**: `NUMERIC(38,18)` (≥0)
* **default\_currency**: FK → `currencies(asset_code)`
* **max\_request\_seconds**: `INT` (>0 or `NULL`)
* **schema\_heroscript**: `TEXT`
* **schema\_json**: `JSONB`
* **created\_at**: `TIMESTAMPTZ`
#### Accepted Currencies (per service)
* **service\_id**: FK → `services(id)`
* **asset\_code**: FK → `currencies(asset_code)`
* **price\_override**: `NUMERIC(38,18)` (optional)
* **billing\_mode\_override**: `ENUM` (optional)
Primary key: `(service_id, asset_code)`
### 3.2 Service Groups
* **id**: `BIGINT` identity
* **name**: `TEXT` unique
* **description**: `TEXT`
* **created\_at**: `TIMESTAMPTZ`
#### Group Memberships
* **group\_id**: FK → `service_groups(id)`
* **service\_id**: FK → `services(id)`
Primary key: `(group_id, service_id)`
---
## 4) Providers & Runners
### 4.1 Service Providers
* **id**: `BIGINT` identity
* **account\_id**: FK → `accounts(id)` (the owning account)
* **name**: `TEXT` unique
* **description**: `TEXT`
* **created\_at**: `TIMESTAMPTZ`
#### Providers Offer Groups
* **provider\_id**: FK → `service_providers(id)`
* **group\_id**: FK → `service_groups(id)`
Primary key: `(provider_id, group_id)`
#### Provider Pricing Overrides (optional)
* **provider\_id**: FK → `service_providers(id)`
* **service\_id**: FK → `services(id)`
* **asset\_code**: FK → `currencies(asset_code)` (nullable for currency-agnostic override)
* **price\_override**: `NUMERIC(38,18)` (optional)
* **billing\_mode\_override**: `ENUM` (optional)
* **max\_request\_seconds\_override**: `INT` (optional)
Primary key: `(provider_id, service_id, asset_code)`
### 4.2 Runners
* **id**: `BIGINT` identity
* **address**: `INET` (must be IPv6)
* **name**: `TEXT`
* **description**: `TEXT`
* **pubkey**: `BYTEA` (optional)
* **created\_at**: `TIMESTAMPTZ`
#### Runner Ownership (many-to-many)
* **runner\_id**: FK → `runners(id)`
* **provider\_id**: FK → `service_providers(id)`
Primary key: `(runner_id, provider_id)`
#### Routing (provider → service/service\_group → runners)
* **provider\_service\_runners**: `(provider_id, service_id, runner_id)` PK
* **provider\_service\_group\_runners**: `(provider_id, group_id, runner_id)` PK
---
## 5) Subscriptions & Spend Control
A subscription authorizes an **account** to use either a **service** **or** a **service group**, with optional spend limits and allowed providers.
* **id**: `BIGINT` identity
* **account\_id**: FK → `accounts(id)`
* **service\_id** *xor* **group\_id**: FK (exactly one must be set)
* **secret**: `BYTEA` (random, provided by subscriber; recommend storing a hash)
* **subscription\_data**: `JSONB` (free-form)
* **limit\_amount**: `NUMERIC(38,18)` (optional)
* **limit\_currency**: FK → `currencies(asset_code)` (optional)
* **limit\_period**: `ENUM('hour','day','month')` (optional)
* **active**: `BOOLEAN` default `TRUE`
* **created\_at**: `TIMESTAMPTZ`
#### Allowed Providers per Subscription
* **subscription\_id**: FK → `subscriptions(id)`
* **provider\_id**: FK → `service_providers(id)`
Primary key: `(subscription_id, provider_id)`
**Intended Use:**
* Subscribers bound spending by amount/currency/period.
* Merchant (provider) can claim charges for requests fulfilled under an active subscription, within limits, and only if listed in `subscription_providers`.
---
## 6) Requests & Billing
### 6.1 Request Lifecycle
* **id**: `BIGINT` identity
* **account\_id**: FK → `accounts(id)`
* **subscription\_id**: FK → `subscriptions(id)`
* **provider\_id**: FK → `service_providers(id)`
* **service\_id**: FK → `services(id)`
* **runner\_id**: FK → `runners(id)` (nullable)
* **request\_schema**: `JSONB` (payload matching `schema_json`/`schema_heroscript`)
* **started\_at**, **ended\_at**: `TIMESTAMPTZ`
* **status**: `ENUM('pending','running','succeeded','failed','canceled')`
* **created\_at**: `TIMESTAMPTZ`
### 6.2 Billing Ledger (append-only)
* **id**: `BIGINT` identity
* **account\_id**: FK → `accounts(id)`
* **provider\_id**: FK → `service_providers(id)` (nullable)
* **service\_id**: FK → `services(id)` (nullable)
* **request\_id**: FK → `requests(id)` (nullable)
* **amount**: `NUMERIC(38,18)` (debit = positive, credit/refund = negative)
* **asset\_code**: FK → `currencies(asset_code)`
* **entry\_type**: `ENUM('debit','credit','adjustment')`
* **description**: `TEXT`
* **created\_at**: `TIMESTAMPTZ`
**Balances View (example):**
* `account_balances(account_id, asset_code, balance)` as a view over `billing_ledger`.
---
## 7) Pricing Precedence
When computing the **effective** pricing, billing mode, and max duration for a `(provider, service, currency)`:
1. **Provider override for (service, asset\_code)** — if present, use it.
2. **Service accepted currency override** — if present, use it.
3. **Service defaults** — fallback.
If `billing_mode` or `max_request_seconds` are not overridden at steps (1) or (2), inherit from the next step down.
---
## 8) Key Constraints & Validations
* All identity ids are non-negative (`CHECK (id >= 0)`).
* Runner IPv6 enforcement: `CHECK (family(address) = 6)`.
* Subscriptions must point to **exactly one** of `service_id` or `group_id`.
* Prices and limits must be non-negative if set.
* Unique natural keys where appropriate: service names, provider names, currency asset codes, account pubkeys.
---
## 9) Mermaid Diagrams
### 9.1 EntityRelationship Overview
```mermaid
erDiagram
ACCOUNTS ||--o{ SERVICE_PROVIDERS : "owns via account_id"
ACCOUNTS ||--o{ SUBSCRIPTIONS : has
CURRENCIES ||--o{ SERVICES : "default_currency"
CURRENCIES ||--o{ SERVICE_ACCEPTED_CURRENCIES : "asset_code"
CURRENCIES ||--o{ PROVIDER_SERVICE_OVERRIDES : "asset_code"
CURRENCIES ||--o{ BILLING_LEDGER : "asset_code"
SERVICES ||--o{ SERVICE_ACCEPTED_CURRENCIES : has
SERVICES ||--o{ SERVICE_GROUP_MEMBERS : member_of
SERVICE_GROUPS ||--o{ SERVICE_GROUP_MEMBERS : contains
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUPS : offers
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_OVERRIDES : sets
SERVICE_PROVIDERS ||--o{ RUNNER_OWNERS : owns
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_RUNNERS : routes
SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : routes
RUNNERS ||--o{ RUNNER_OWNERS : owned_by
RUNNERS ||--o{ PROVIDER_SERVICE_RUNNERS : executes
RUNNERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : executes
SUBSCRIPTIONS ||--o{ SUBSCRIPTION_PROVIDERS : allow
SERVICE_PROVIDERS ||--o{ SUBSCRIPTION_PROVIDERS : allowed
REQUESTS }o--|| ACCOUNTS : by
REQUESTS }o--|| SUBSCRIPTIONS : under
REQUESTS }o--|| SERVICE_PROVIDERS : via
REQUESTS }o--|| SERVICES : for
REQUESTS }o--o{ RUNNERS : executed_by
BILLING_LEDGER }o--|| ACCOUNTS : charges
BILLING_LEDGER }o--o{ SERVICES : reference
BILLING_LEDGER }o--o{ SERVICE_PROVIDERS : reference
BILLING_LEDGER }o--o{ REQUESTS : reference
```
### 9.2 Request Flow (Happy Path)
```mermaid
sequenceDiagram
autonumber
participant AC as Account
participant API as Broker/API
participant PR as Provider
participant RU as Runner
participant DB as PostgreSQL
AC->>API: Submit request (subscription_id, service_id, payload, secret)
API->>DB: Validate subscription (active, provider allowed, spend limits)
DB-->>API: OK + effective pricing (resolve precedence)
API->>PR: Dispatch request (service, payload)
PR->>DB: Select runner (provider_service_runners / group runners)
PR->>RU: Start job (payload)
RU-->>PR: Job started (started_at)
PR->>DB: Update REQUESTS (status=running, started_at)
RU-->>PR: Job finished (duration, result)
PR->>DB: Update REQUESTS (status=succeeded, ended_at)
API->>DB: Insert BILLING_LEDGER (debit per effective price)
DB-->>API: Ledger entry id
API-->>AC: Return result + charge info
```
### 9.3 Pricing Resolution
```mermaid
flowchart TD
A[Input: provider_id, service_id, asset_code] --> B{Provider override exists for (service, asset_code)?}
B -- Yes --> P1[Use provider price/mode/max]
B -- No --> C{Service accepted currency override exists?}
C -- Yes --> P2[Use service currency price/mode]
C -- No --> P3[Use service defaults]
P1 --> OUT[Effective pricing]
P2 --> OUT
P3 --> OUT
```
---
## 10) Operational Notes
* **Secrets:** store a hash (e.g., `digest(secret,'sha256')`) rather than raw `secret`. Keep the original only client-side.
* **Limits enforcement:** before insert of a debit ledger entry, compute period window (hour/day/month UTC or tenant TZ) and enforce `SUM(amount) + new_amount ≤ limit_amount`.
* **Durations:** enforce `max_request_seconds` (effective) at orchestration and/or via DB trigger on `REQUESTS` when transitioning to `running/succeeded`.
* **Routing:** prefer `provider_service_runners` when a request targets a service directly; otherwise use the union of runners from `provider_service_group_runners` for the group.
* **Balances:** serve balance queries via the `account_balances` view or a materialized cache updated by triggers/jobs.
---
## 11) Example Effective Pricing Query (sketch)
```sql
-- Inputs: :provider_id, :service_id, :asset_code
WITH p AS (
SELECT price_override, billing_mode_override, max_request_seconds_override
FROM provider_service_overrides
WHERE provider_id = :provider_id
AND service_id = :service_id
AND (asset_code = :asset_code)
),
sac AS (
SELECT price_override, billing_mode_override
FROM service_accepted_currencies
WHERE service_id = :service_id AND asset_code = :asset_code
),
svc AS (
SELECT default_price AS price, default_billing_mode AS mode, max_request_seconds
FROM services WHERE id = :service_id
)
SELECT
COALESCE(p.price_override, sac.price_override, svc.price) AS effective_price,
COALESCE(p.billing_mode_override, sac.billing_mode_override, svc.mode) AS effective_mode,
COALESCE(p.max_request_seconds_override, svc.max_request_seconds) AS effective_max_seconds;
```
---
## 12) Indices (non-exhaustive)
* `services(default_currency)`
* `service_accepted_currencies(service_id)`
* `provider_service_overrides(service_id, provider_id)`
* `requests(account_id)`, `requests(provider_id)`, `requests(service_id)`
* `billing_ledger(account_id, asset_code)`
* `subscriptions(account_id) WHERE active`
---
## 13) Migration & Compatibility
* Prefer additive migrations (new columns/tables) to avoid downtime.
* Use `ENUM` via `CREATE TYPE`; when extending, plan for `ALTER TYPE ... ADD VALUE`.
* For high-write ledgers, consider partitioning `billing_ledger` by `created_at` (monthly) and indexing partitions.
---
## 14) Non-Goals
* Wallet custody and on-chain settlement are out of scope.
* SLA tracking and detailed observability (metrics/log schema) are not part of this spec.
---
## 15) Acceptance Criteria
* Can represent services, groups, and providers with currency-specific pricing.
* Can route requests to runners by service or group.
* Can authorize usage via subscriptions, enforce spend limits, and record charges.
* Can reconstruct balances and audit via append-only ledger.
---
**End of Spec**

View File

@@ -1,225 +0,0 @@
# Concept Note: Generic Billing & Tracking Framework
## 1) Purpose
The model is designed to support a **flexible, generic, and auditable** billing environment that can be applied across diverse services and providers — from compute time billing to per-request API usage, across multiple currencies, with dynamic provider-specific overrides.
It is **not tied to a single business domain** — the same framework can be used for:
* Cloud compute time (per second)
* API transactions (per request)
* Data transfer charges
* Managed service subscriptions
* Brokered third-party service reselling
---
## 2) Key Concepts
### 2.1 Accounts
An **account** represents an economic actor in the system — typically a customer or a service provider.
* Identified by a **public key** (for authentication & cryptographic signing).
* Every billing action traces back to an account.
---
### 2.2 Currencies & Asset Codes
The system supports **multiple currencies** (crypto or fiat) via **asset codes**.
* Asset codes identify the unit of billing (e.g. `USDC-ETH`, `EUR`, `LND`).
* Currencies are **decoupled from services** so you can add or remove supported assets at any time.
---
### 2.3 Services & Groups
* **Service** = a billable offering (e.g., "Speech-to-Text", "VM Hosting").
* Has a **billing mode** (`per_second` or `per_request`).
* Has a **default price** and **default currency**.
* Supports **multiple accepted currencies** with optional per-currency pricing overrides.
* Has execution constraints (e.g. `max_request_seconds`).
* Includes structured schemas for request payloads.
* **Service Group** = a logical grouping of services.
* Groups make it easy to **bundle related services** and manage them together.
* Providers can offer entire groups rather than individual services.
---
### 2.4 Service Providers
A **service provider** is an **account** that offers services or service groups.
They can:
* Override **pricing** for their offered services (per currency).
* Route requests to their own **runners** (execution agents).
* Manage multiple **service groups** under one provider identity.
---
### 2.5 Runners
A **runner** is an execution agent — a node, VM, or service endpoint that can fulfill requests.
* Identified by an **IPv6 address** (supports Mycelium or other overlay networks).
* Can be owned by one or multiple providers.
* Providers map **services/groups → runners** to define routing.
---
### 2.6 Subscriptions
A **subscription** is **the authorization mechanism** for usage and spending control:
* Links an **account** to a **service** or **service group**.
* Defines **spending limits** (amount, currency, period: hour/day/month).
* Restricts which **providers** are allowed to serve the subscription.
* Uses a **secret** chosen by the subscriber — providers use this to claim charges.
---
### 2.7 Requests
A **request** represents a single execution under a subscription:
* Tied to **account**, **subscription**, **provider**, **service**, and optionally **runner**.
* Has **status** (`pending`, `running`, `succeeded`, `failed`, `canceled`).
* Records start/end times for duration-based billing.
---
### 2.8 Billing Ledger
The **ledger** is **append-only** — the source of truth for all charges and credits.
* Each entry records:
* `amount` (positive = debit, negative = credit/refund)
* `asset_code`
* Links to `account`, `provider`, `service`, and/or `request`
* From the ledger, **balances** can be reconstructed at any time.
---
## 3) How Billing Works — Step by Step
### 3.1 Setup
1. **Define services** with default pricing & schemas.
2. **Define currencies** and accepted currencies for services.
3. **Group services** into service groups.
4. **Onboard providers** (accounts) and associate them with service groups.
5. **Assign runners** to services or groups for execution routing.
---
### 3.2 Subscription Creation
1. Customer **creates a subscription**:
* Chooses service or service group.
* Sets **spending limit** (amount, currency, period).
* Chooses **secret**.
* Selects **allowed providers**.
2. Subscription is stored in DB.
---
### 3.3 Request Execution
1. Customer sends a request to broker/API with:
* `subscription_id`
* Target `service_id`
* Payload + signature using account pubkey.
2. Broker:
* Validates **subscription active**.
* Validates **provider allowed**.
* Checks **spend limit** hasnt been exceeded for current period.
* Resolves **effective price** via:
1. Provider override (currency-specific)
2. Service accepted currency override
3. Service default
3. Broker selects **runner** from providers routing tables.
4. Runner executes request and returns result.
---
### 3.4 Billing Entry
1. When the request completes:
* If `per_second` mode → calculate `duration × rate`.
* If `per_request` mode → apply flat rate.
2. Broker **inserts ledger entry**:
* Debit from customer account.
* Credit to provider account (can be separate entries or aggregated).
3. Ledger is append-only — historical billing cannot be altered.
---
### 3.5 Balance & Tracking
* **Current balances** are a sum of all ledger entries per account+currency.
* Spend limits are enforced by **querying the ledger** for the current period before each charge.
* Audit trails are guaranteed via immutable ledger entries.
---
## 4) Why This is Generic & Reusable
This design **decouples**:
* **Service definition** from **provider pricing** → multiple providers can sell the same service at different rates.
* **Execution agents** (runners) from **service definitions** → easy scaling or outsourcing of execution.
* **Billing rules** (per-second vs per-request) from **subscription limits** → same service can be sold in different billing modes.
* **Currencies** from the service → enabling multi-asset billing without changing the service definition.
Because of these separations, you can:
* Reuse the model for **compute**, **APIs**, **storage**, **SaaS features**, etc.
* Plug in different **payment backends** (on-chain, centralized payment processor, prepaid balance).
* Use the same model for **internal cost allocation** or **external customer billing**.
---
## 5) Potential Extensions
* **Prepaid model**: enforce that ledger debits cant exceed balance.
* **On-chain settlement**: periodically export ledger entries to blockchain transactions.
* **Discount models**: percentage or fixed-amount discounts per subscription.
* **Usage analytics**: aggregate requests/billing by time period, provider, or service.
* **SLAs**: link billing adjustments to performance metrics in requests.
---
## 6) Conceptual Diagram — Billing Flow
```mermaid
sequenceDiagram
participant C as Customer Account
participant B as Broker/API
participant P as Provider
participant R as Runner
participant DB as Ledger DB
C->>B: Request(service, subscription, payload, secret)
B->>DB: Validate subscription & spend limit
DB-->>B: OK + effective pricing
B->>P: Forward request
P->>R: Execute request
R-->>P: Result + execution time
P->>B: Return result
B->>DB: Insert debit (customer) + credit (provider)
DB-->>B: Ledger updated
B-->>C: Return result + charge info
```

View File

@@ -1,234 +0,0 @@
-- Enable useful extensions (optional)
CREATE EXTENSION IF NOT EXISTS pgcrypto; -- for digests/hashes if you want
CREATE EXTENSION IF NOT EXISTS btree_gist; -- for exclusion/partial indexes
-- =========================
-- Core: Accounts & Currency
-- =========================
CREATE TABLE accounts (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
pubkey BYTEA NOT NULL UNIQUE,
display_name TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CHECK (id >= 0)
);
CREATE TABLE currencies (
asset_code TEXT PRIMARY KEY, -- e.g. "USDC-ETH", "EUR", "LND"
name TEXT NOT NULL,
symbol TEXT, -- e.g. "$", "€"
decimals INT NOT NULL DEFAULT 2, -- how many decimal places
UNIQUE (name)
);
-- =========================
-- Services & Groups
-- =========================
CREATE TYPE billing_mode AS ENUM ('per_second', 'per_request');
CREATE TABLE services (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
description TEXT,
default_billing_mode billing_mode NOT NULL,
default_price NUMERIC(38, 18) NOT NULL, -- default price in "unit currency" (see accepted currencies)
default_currency TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
max_request_seconds INTEGER, -- nullable means no cap
schema_heroscript TEXT,
schema_json JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CHECK (id >= 0),
CHECK (default_price >= 0),
CHECK (max_request_seconds IS NULL OR max_request_seconds > 0)
);
-- Accepted currencies for a service (subset + optional specific price per currency)
CREATE TABLE service_accepted_currencies (
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
asset_code TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
price_override NUMERIC(38, 18), -- if set, overrides default_price for this currency
billing_mode_override billing_mode, -- if set, overrides default_billing_mode
PRIMARY KEY (service_id, asset_code),
CHECK (price_override IS NULL OR price_override >= 0)
);
CREATE TABLE service_groups (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
description TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CHECK (id >= 0)
);
CREATE TABLE service_group_members (
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE RESTRICT,
PRIMARY KEY (group_id, service_id)
);
-- =========================
-- Providers, Runners, Routing
-- =========================
CREATE TABLE service_providers (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, -- provider is an account
name TEXT NOT NULL,
description TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (name),
CHECK (id >= 0)
);
-- Providers can offer groups (which imply their services)
CREATE TABLE provider_service_groups (
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
PRIMARY KEY (provider_id, group_id)
);
-- Providers may set per-service overrides (price/mode/max seconds) (optionally per currency)
CREATE TABLE provider_service_overrides (
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
asset_code TEXT REFERENCES currencies(asset_code) ON UPDATE CASCADE,
price_override NUMERIC(38, 18),
billing_mode_override billing_mode,
max_request_seconds_override INTEGER,
PRIMARY KEY (provider_id, service_id, asset_code),
CHECK (price_override IS NULL OR price_override >= 0),
CHECK (max_request_seconds_override IS NULL OR max_request_seconds_override > 0)
);
-- Runners
CREATE TABLE runners (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
address INET NOT NULL, -- IPv6 (INET supports both IPv4/IPv6; require v6 via CHECK below if you like)
name TEXT NOT NULL,
description TEXT,
pubkey BYTEA, -- optional
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (address),
CHECK (id >= 0),
CHECK (family(address) = 6) -- ensure IPv6
);
-- Runner ownership: a runner can be owned by multiple providers
CREATE TABLE runner_owners (
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
PRIMARY KEY (runner_id, provider_id)
);
-- Routing: link providers' services to specific runners
CREATE TABLE provider_service_runners (
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE,
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
PRIMARY KEY (provider_id, service_id, runner_id)
);
-- Routing: link providers' service groups to runners
CREATE TABLE provider_service_group_runners (
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
group_id BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE,
runner_id BIGINT NOT NULL REFERENCES runners(id) ON DELETE CASCADE,
PRIMARY KEY (provider_id, group_id, runner_id)
);
-- =========================
-- Subscriptions & Spend Control
-- =========================
CREATE TYPE spend_period AS ENUM ('hour', 'day', 'month');
-- A subscription ties an account to a specific service OR a service group, with spend limits and allowed providers
CREATE TABLE subscriptions (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
service_id BIGINT REFERENCES services(id) ON DELETE CASCADE,
group_id BIGINT REFERENCES service_groups(id) ON DELETE CASCADE,
secret BYTEA NOT NULL, -- caller-chosen secret (consider storing a hash instead)
subscription_data JSONB, -- arbitrary client-supplied info
limit_amount NUMERIC(38, 18), -- allowed spend in the selected currency per period
limit_currency TEXT REFERENCES currencies(asset_code) ON UPDATE CASCADE,
limit_period spend_period, -- period for the limit
active BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
-- Ensure exactly one of service_id or group_id
CHECK ( (service_id IS NOT NULL) <> (group_id IS NOT NULL) ),
CHECK (limit_amount IS NULL OR limit_amount >= 0),
CHECK (id >= 0)
);
-- Providers that are allowed to serve under a subscription
CREATE TABLE subscription_providers (
subscription_id BIGINT NOT NULL REFERENCES subscriptions(id) ON DELETE CASCADE,
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE,
PRIMARY KEY (subscription_id, provider_id)
);
-- =========================
-- Usage, Requests & Billing
-- =========================
-- A request lifecycle record (optional but useful for auditing and max duration enforcement)
CREATE TYPE request_status AS ENUM ('pending', 'running', 'succeeded', 'failed', 'canceled');
CREATE TABLE requests (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
subscription_id BIGINT NOT NULL REFERENCES subscriptions(id) ON DELETE RESTRICT,
provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE RESTRICT,
service_id BIGINT NOT NULL REFERENCES services(id) ON DELETE RESTRICT,
runner_id BIGINT REFERENCES runners(id) ON DELETE SET NULL,
request_schema JSONB, -- concrete task payload (conforms to schema_json/heroscript)
started_at TIMESTAMPTZ,
ended_at TIMESTAMPTZ,
status request_status NOT NULL DEFAULT 'pending',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CHECK (id >= 0),
CHECK (ended_at IS NULL OR started_at IS NULL OR ended_at >= started_at)
);
-- Billing ledger (debits/credits). Positive amount = debit to account (charge). Negative = credit/refund.
CREATE TYPE ledger_entry_type AS ENUM ('debit', 'credit', 'adjustment');
CREATE TABLE billing_ledger (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
provider_id BIGINT REFERENCES service_providers(id) ON DELETE SET NULL,
service_id BIGINT REFERENCES services(id) ON DELETE SET NULL,
request_id BIGINT REFERENCES requests(id) ON DELETE SET NULL,
amount NUMERIC(38, 18) NOT NULL, -- positive for debit, negative for credit
asset_code TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE,
entry_type ledger_entry_type NOT NULL,
description TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CHECK (id >= 0)
);
-- Optional: running balances per account/currency (materialized view or real-time view)
-- This is a plain view; for performance, you might maintain a cached table.
CREATE VIEW account_balances AS
SELECT
account_id,
asset_code,
SUM(amount) AS balance
FROM billing_ledger
GROUP BY account_id, asset_code;
-- =========================
-- Helpful Indexes
-- =========================
CREATE INDEX idx_services_default_currency ON services(default_currency);
CREATE INDEX idx_service_accepted_currencies_service ON service_accepted_currencies(service_id);
CREATE INDEX idx_provider_overrides_service ON provider_service_overrides(service_id);
CREATE INDEX idx_requests_account ON requests(account_id);
CREATE INDEX idx_requests_provider ON requests(provider_id);
CREATE INDEX idx_requests_service ON requests(service_id);
CREATE INDEX idx_billing_account_currency ON billing_ledger(account_id, asset_code);
CREATE INDEX idx_subscriptions_account_active ON subscriptions(account_id) WHERE active;

View File

@@ -1,266 +0,0 @@
# Billing Logic — Whiteboard Version (for Devs)
## 1) Inputs You Always Need
* `account_id`, `subscription_id`
* `service_id` (or group → resolved to a service at dispatch)
* `provider_id`, `asset_code`
* `payload` (validated against service schema)
* (Optional) `runner_id`
* Idempotency key for the request (client-provided)
---
## 2) Gatekeeping (Hard Checks)
1. **Subscription**
* Must be `active`.
* Must target **exactly one** of {service, group}.
* If group: ensure `service_id` is a member.
2. **Provider Allowlist**
* If `subscription_providers` exists → `provider_id` must be listed.
3. **Spend Limit** (if set)
* Compute window by `limit_period` (`hour`/`day`/`month`, UTC unless tenant TZ).
* Current period spend = `SUM(ledger.amount WHERE account & currency & period)`.
* `current_spend + estimated_charge ≤ limit_amount`.
4. **Max Duration** (effective; see §3):
* If billing mode is `per_second`, reject if requested/max exceeds effective cap.
---
## 3) Effective Pricing (Single Resolution Function)
Inputs: `provider_id`, `service_id`, `asset_code`
Precedence:
1. `provider_service_overrides` for `(service_id, asset_code)`
2. `service_accepted_currencies` for `(service_id, asset_code)`
3. `services` defaults
Outputs:
* `effective_billing_mode ∈ {per_request, per_second}`
* `effective_price` (NUMERIC)
* `effective_max_request_seconds` (nullable)
---
## 4) Request Lifecycle (States)
* `pending``running` → (`succeeded` | `failed` | `canceled`)
* Timestamps: set `started_at` on `running`, `ended_at` on terminal states.
* Enforce `ended_at ≥ started_at` and `duration ≤ effective_max_request_seconds` (if set).
---
## 5) Charging Rules
### A) Per Request
```
charge = effective_price
```
### B) Per Second
```
duration_seconds = ceil(extract(epoch from (ended_at - started_at)))
charge = duration_seconds * effective_price
```
* Cap with `effective_max_request_seconds` if present.
* If ended early/failed before `started_at`: charge = 0.
---
## 6) Idempotency & Atomicity
* **Idempotency key** per `(account_id, subscription_id, provider_id, service_id, request_external_id)`; store on `requests` and enforce unique index.
* **Single transaction** to:
1. finalize `REQUESTS` status + timestamps,
2. insert **one** debit entry into `billing_ledger`.
* Never mutate ledger entries; use compensating **credit** entries for adjustments/refunds.
---
## 7) Spend-Limit Enforcement (Before Charging)
Pseudocode (SQL-ish):
```sql
WITH window AS (
SELECT tsrange(period_start(:limit_period), period_end(:limit_period)) AS w
),
spent AS (
SELECT COALESCE(SUM(amount), 0) AS total
FROM billing_ledger, window
WHERE account_id = :account_id
AND asset_code = :asset_code
AND created_at <@ (SELECT w FROM window)
),
check AS (
SELECT (spent.total + :estimated_charge) <= :limit_amount AS ok FROM spent
)
SELECT ok FROM check;
```
* If not ok → reject before dispatch, or allow but **set hard cap** on max seconds and auto-stop at limit.
---
## 8) Suggested DB Operations (Happy Path)
1. **Create request**
```sql
INSERT INTO requests (...)
VALUES (...)
ON CONFLICT (idempotency_key) DO NOTHING
RETURNING id;
```
2. **Start execution**
```sql
UPDATE requests
SET status='running', started_at=now()
WHERE id=:id AND status='pending';
```
3. **Finish & bill** (single transaction)
```sql
BEGIN;
-- lock for update to avoid double-billing
UPDATE requests
SET status=:final_status, ended_at=now()
WHERE id=:id AND status='running'
RETURNING started_at, ended_at;
-- compute charge in app (see §5), re-check spend window here
INSERT INTO billing_ledger (
account_id, provider_id, service_id, request_id,
amount, asset_code, entry_type, description
) VALUES (
:account_id, :provider_id, :service_id, :id,
:charge, :asset_code, 'debit', :desc
);
COMMIT;
```
---
## 9) Balances & Reporting
* **Current balance** = `SUM(billing_ledger.amount) GROUP BY account_id, asset_code`.
* Keep a **view** or **materialized view**; refresh asynchronously if needed.
* Never rely on cached balance for hard checks — re-check within the billing transaction if **prepaid** semantics are required.
---
## 10) Error & Edge Rules
* If runner fails before `running` → no charge.
* If runner starts, then fails:
* **per\_second**: bill actual seconds (can be 0).
* **per\_request**: default is **no charge** unless policy says otherwise; if charging partials, document it.
* Partial refunds/adjustments → insert **negative** ledger entries (type `credit`/`adjustment`) tied to the original `request_id`.
---
## 11) Minimal Pricing Resolver (Sketch)
```sql
WITH p AS (
SELECT price_override AS price,
billing_mode_override AS mode,
max_request_seconds_override AS maxsec
FROM provider_service_overrides
WHERE provider_id = :pid AND service_id = :sid AND asset_code = :asset
LIMIT 1
),
sac AS (
SELECT price_override AS price,
billing_mode_override AS mode
FROM service_accepted_currencies
WHERE service_id = :sid AND asset_code = :asset
LIMIT 1
),
svc AS (
SELECT default_price AS price,
default_billing_mode AS mode,
max_request_seconds AS maxsec
FROM services WHERE id = :sid
)
SELECT
COALESCE(p.price, sac.price, svc.price) AS price,
COALESCE(p.mode, sac.mode, svc.mode) AS mode,
COALESCE(p.maxsec, svc.maxsec) AS max_seconds;
```
---
## 12) Mermaid — Decision Trees
### Pricing & Duration
```mermaid
flowchart TD
A[provider_id, service_id, asset_code] --> B{Provider override exists?}
B -- yes --> P[Use provider price/mode/max]
B -- no --> C{Service currency override?}
C -- yes --> S[Use service currency price/mode]
C -- no --> D[Use service defaults]
P --> OUT[effective price/mode/max]
S --> OUT
D --> OUT
```
### Spend Check & Charge
```mermaid
flowchart TD
S[Has subscription limit?] -->|No| D1[Dispatch]
S -->|Yes| C{current_spend + est_charge <= limit?}
C -->|No| REJ[Reject or cap duration]
C -->|Yes| D1[Dispatch]
D1 --> RUN[Run request]
RUN --> DONE[Finalize + insert ledger]
```
---
## 13) Security Posture
* Store **hash of subscription secret**; compare hash on use.
* Sign client requests with **account pubkey**; verify before dispatch.
* Limit **request schema** to validated fields; reject unknowns.
* Enforce **IPv6** for runners where required.
---
## 14) What To Implement First
1. Pricing resolver (single function).
2. Spend-window checker (single query).
3. Request lifecycle + idempotency.
4. Ledger write (append-only) + balances view.
Everything else layers on top.
---
If you want, I can turn this into a small **README.md** with code blocks you can paste into the repo (plus a couple of SQL functions and example tests).

Some files were not shown because too many files have changed in this diff Show More