Compare commits
	
		
			14 Commits
		
	
	
		
			cedea2f305
			...
			developmen
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 53e9a2d4f0 | ||
| cb1fb0f0ec | |||
| 97c24d146b | |||
| 6bff52e8b7 | |||
| 7a999b7b6e | |||
| 095a4d0c69 | |||
|  | a7c978efd4 | ||
|  | 0b0d546b4e | ||
|  | 2f5e18df98 | ||
|  | 77169c073c | ||
|  | ce12f26a91 | ||
|  | 130822b69b | ||
|  | 7439980b33 | ||
|  | 453e86edd2 | 
							
								
								
									
										114
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										114
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							| @@ -65,13 +65,13 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "async-trait" | name = "async-trait" | ||||||
| version = "0.1.88" | version = "0.1.89" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" | checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -123,9 +123,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "bitflags" | name = "bitflags" | ||||||
| version = "2.9.1" | version = "2.9.2" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" | checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "bitvec" | name = "bitvec" | ||||||
| @@ -168,7 +168,7 @@ dependencies = [ | |||||||
|  "proc-macro-crate", |  "proc-macro-crate", | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -213,18 +213,18 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "cc" | name = "cc" | ||||||
| version = "1.2.31" | version = "1.2.33" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" | checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "shlex", |  "shlex", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "cfg-if" | name = "cfg-if" | ||||||
| version = "1.0.1" | version = "1.0.3" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" | checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "cfg_aliases" | name = "cfg_aliases" | ||||||
| @@ -372,7 +372,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -452,9 +452,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "hashbrown" | name = "hashbrown" | ||||||
| version = "0.15.4" | version = "0.15.5" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" | checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "heck" | name = "heck" | ||||||
| @@ -491,10 +491,12 @@ dependencies = [ | |||||||
| name = "heromodels-derive" | name = "heromodels-derive" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  |  "heromodels_core", | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "serde", |  "serde", | ||||||
|  "syn 2.0.104", |  "serde_json", | ||||||
|  |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -545,7 +547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" | |||||||
| checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" | checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "equivalent", |  "equivalent", | ||||||
|  "hashbrown 0.15.4", |  "hashbrown 0.15.5", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -597,7 +599,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -627,9 +629,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "jsonb" | name = "jsonb" | ||||||
| version = "0.5.3" | version = "0.5.4" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "96cbb4fba292867a2d86ed83dbe5f9d036f423bf6a491b7d884058b2fde42fcd" | checksum = "a452366d21e8d3cbca680c41388e01d6a88739afef7877961946a6da409f9ccd" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "byteorder", |  "byteorder", | ||||||
|  "ethnum", |  "ethnum", | ||||||
| @@ -647,9 +649,20 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "libc" | name = "libc" | ||||||
| version = "0.2.174" | version = "0.2.175" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" | checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" | ||||||
|  |  | ||||||
|  | [[package]] | ||||||
|  | name = "libredox" | ||||||
|  | version = "0.1.9" | ||||||
|  | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
|  | checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" | ||||||
|  | dependencies = [ | ||||||
|  |  "bitflags", | ||||||
|  |  "libc", | ||||||
|  |  "redox_syscall", | ||||||
|  | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "lock_api" | name = "lock_api" | ||||||
| @@ -760,6 +773,7 @@ dependencies = [ | |||||||
| [[package]] | [[package]] | ||||||
| name = "ourdb" | name = "ourdb" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
|  | source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "crc32fast", |  "crc32fast", | ||||||
|  "log", |  "log", | ||||||
| @@ -792,9 +806,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "percent-encoding" | name = "percent-encoding" | ||||||
| version = "2.3.1" | version = "2.3.2" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" | checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "phf" | name = "phf" | ||||||
| @@ -906,9 +920,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "proc-macro2" | name = "proc-macro2" | ||||||
| version = "1.0.95" | version = "1.0.101" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" | checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "unicode-ident", |  "unicode-ident", | ||||||
| ] | ] | ||||||
| @@ -1079,12 +1093,13 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "rhailib-macros" | name = "rhailib-macros" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
|  | source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "rhai", |  "rhai", | ||||||
|  "serde", |  "serde", | ||||||
| @@ -1143,9 +1158,9 @@ checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "rustversion" | name = "rustversion" | ||||||
| version = "1.0.21" | version = "1.0.22" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" | checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "ryu" | name = "ryu" | ||||||
| @@ -1191,14 +1206,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "serde_json" | name = "serde_json" | ||||||
| version = "1.0.142" | version = "1.0.143" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" | checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "indexmap", |  "indexmap", | ||||||
|  "itoa", |  "itoa", | ||||||
| @@ -1247,9 +1262,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "slab" | name = "slab" | ||||||
| version = "0.4.10" | version = "0.4.11" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" | checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "smallvec" | name = "smallvec" | ||||||
| @@ -1327,7 +1342,7 @@ dependencies = [ | |||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "rustversion", |  "rustversion", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -1349,9 +1364,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "syn" | name = "syn" | ||||||
| version = "2.0.104" | version = "2.0.106" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" | checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
| @@ -1387,7 +1402,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -1401,9 +1416,9 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "tinyvec" | name = "tinyvec" | ||||||
| version = "1.9.0" | version = "1.10.0" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" | checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "tinyvec_macros", |  "tinyvec_macros", | ||||||
| ] | ] | ||||||
| @@ -1442,7 +1457,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -1504,6 +1519,7 @@ dependencies = [ | |||||||
| [[package]] | [[package]] | ||||||
| name = "tst" | name = "tst" | ||||||
| version = "0.1.0" | version = "0.1.0" | ||||||
|  | source = "git+https://git.ourworld.tf/herocode/herolib_rust#aa0248ef17cb0117bb69f1d9f278f995bb417f16" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "ourdb", |  "ourdb", | ||||||
|  "thiserror", |  "thiserror", | ||||||
| @@ -1550,9 +1566,9 @@ checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "uuid" | name = "uuid" | ||||||
| version = "1.17.0" | version = "1.18.0" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" | checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "getrandom 0.3.3", |  "getrandom 0.3.3", | ||||||
|  "js-sys", |  "js-sys", | ||||||
| @@ -1614,7 +1630,7 @@ dependencies = [ | |||||||
|  "log", |  "log", | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
|  "wasm-bindgen-shared", |  "wasm-bindgen-shared", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| @@ -1636,7 +1652,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
|  "wasm-bindgen-backend", |  "wasm-bindgen-backend", | ||||||
|  "wasm-bindgen-shared", |  "wasm-bindgen-shared", | ||||||
| ] | ] | ||||||
| @@ -1662,11 +1678,11 @@ dependencies = [ | |||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| name = "whoami" | name = "whoami" | ||||||
| version = "1.6.0" | version = "1.6.1" | ||||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | source = "registry+https://github.com/rust-lang/crates.io-index" | ||||||
| checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" | checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" | ||||||
| dependencies = [ | dependencies = [ | ||||||
|  "redox_syscall", |  "libredox", | ||||||
|  "wasite", |  "wasite", | ||||||
|  "web-sys", |  "web-sys", | ||||||
| ] | ] | ||||||
| @@ -1692,7 +1708,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -1703,7 +1719,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|  |  | ||||||
| [[package]] | [[package]] | ||||||
| @@ -1856,5 +1872,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" | |||||||
| dependencies = [ | dependencies = [ | ||||||
|  "proc-macro2", |  "proc-macro2", | ||||||
|  "quote", |  "quote", | ||||||
|  "syn 2.0.104", |  "syn 2.0.106", | ||||||
| ] | ] | ||||||
|   | |||||||
| @@ -14,4 +14,6 @@ quote = "1.0" | |||||||
| proc-macro2 = "1.0" | proc-macro2 = "1.0" | ||||||
|  |  | ||||||
| [dev-dependencies] | [dev-dependencies] | ||||||
| serde = { version = "1.0", features = ["derive"] } | serde = { version = "1.0", features = ["derive"] } | ||||||
|  | serde_json = "1.0" | ||||||
|  | heromodels_core = { path = "../heromodels_core" } | ||||||
| @@ -1,6 +1,6 @@ | |||||||
| use proc_macro::TokenStream; | use proc_macro::TokenStream; | ||||||
| use quote::{format_ident, quote}; | use quote::{format_ident, quote}; | ||||||
| use syn::{Data, DeriveInput, Fields, parse_macro_input}; | use syn::{parse_macro_input, Data, DeriveInput, Fields, Lit, Meta, MetaList, MetaNameValue}; | ||||||
|  |  | ||||||
| /// Convert a string to snake_case | /// Convert a string to snake_case | ||||||
| fn to_snake_case(s: &str) -> String { | fn to_snake_case(s: &str) -> String { | ||||||
| @@ -47,86 +47,165 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream { | |||||||
|     let db_prefix = to_snake_case(&name_str); |     let db_prefix = to_snake_case(&name_str); | ||||||
|  |  | ||||||
|     // Extract fields with #[index] attribute |     // Extract fields with #[index] attribute | ||||||
|     let mut indexed_fields = Vec::new(); |     // Supports both top-level (no args) and nested path-based indexes declared on a field | ||||||
|     let mut custom_index_names = std::collections::HashMap::new(); |     #[derive(Clone)] | ||||||
|  |     enum IndexDecl { | ||||||
|  |         TopLevel { | ||||||
|  |             field_ident: syn::Ident, | ||||||
|  |             field_ty: syn::Type, | ||||||
|  |         }, | ||||||
|  |         NestedPath { | ||||||
|  |             on_field_ident: syn::Ident, | ||||||
|  |             path: String, // dotted path relative to the field | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let mut index_decls: Vec<IndexDecl> = Vec::new(); | ||||||
|  |  | ||||||
|     if let Data::Struct(ref mut data_struct) = input.data { |     if let Data::Struct(ref mut data_struct) = input.data { | ||||||
|         if let Fields::Named(ref mut fields_named) = data_struct.fields { |         if let Fields::Named(ref mut fields_named) = data_struct.fields { | ||||||
|             for field in &mut fields_named.named { |             for field in &mut fields_named.named { | ||||||
|                 let mut attr_idx = None; |                 let mut to_remove: Vec<usize> = Vec::new(); | ||||||
|                 for (i, attr) in field.attrs.iter().enumerate() { |                 for (i, attr) in field.attrs.iter().enumerate() { | ||||||
|                     if attr.path().is_ident("index") { |                     if !attr.path().is_ident("index") { | ||||||
|                         attr_idx = Some(i); |                         continue; | ||||||
|                         if let Some(ref field_name) = field.ident { |                     } | ||||||
|                             // Check if the attribute has parameters |                     to_remove.push(i); | ||||||
|                             let mut custom_name = None; |  | ||||||
|  |  | ||||||
|                             // Parse attribute arguments if any |                     if let Some(ref field_name) = field.ident { | ||||||
|                             let meta = attr.meta.clone(); |                         match &attr.meta { | ||||||
|                             if let syn::Meta::List(list) = meta { |                             Meta::Path(_) => { | ||||||
|                                 if let Ok(nested) = list.parse_args_with(syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated) { |                                 // Simple top-level index on this field | ||||||
|                                         for meta in nested { |                                 index_decls.push(IndexDecl::TopLevel { | ||||||
|                                             if let syn::Meta::NameValue(name_value) = meta { |                                     field_ident: field_name.clone(), | ||||||
|                                                 if name_value.path.is_ident("name") { |                                     field_ty: field.ty.clone(), | ||||||
|                                                     if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(lit_str), .. }) = name_value.value { |                                 }); | ||||||
|                                                         custom_name = Some(lit_str.value()); |                             } | ||||||
|                                                     } |                             Meta::List(MetaList { .. }) => { | ||||||
|  |                                 // Parse for path = "..."; name is assumed equal to path | ||||||
|  |                                 // We support syntax: #[index(path = "a.b.c")] | ||||||
|  |                                 if let Ok(nested) = attr.parse_args_with( | ||||||
|  |                                     syn::punctuated::Punctuated::<Meta, syn::Token![,]>::parse_terminated, | ||||||
|  |                                 ) { | ||||||
|  |                                     for meta in nested { | ||||||
|  |                                         if let Meta::NameValue(MetaNameValue { path, value, .. }) = meta { | ||||||
|  |                                             if path.is_ident("path") { | ||||||
|  |                                                 if let syn::Expr::Lit(syn::ExprLit { lit: Lit::Str(lit_str), .. }) = value { | ||||||
|  |                                                     let p = lit_str.value(); | ||||||
|  |                                                     index_decls.push(IndexDecl::NestedPath { | ||||||
|  |                                                         on_field_ident: field_name.clone(), | ||||||
|  |                                                         path: p, | ||||||
|  |                                                     }); | ||||||
|                                                 } |                                                 } | ||||||
|                                             } |                                             } | ||||||
|                                         } |                                         } | ||||||
|                                     } |                                     } | ||||||
|  |                                 } | ||||||
|                             } |                             } | ||||||
|  |                             _ => {} | ||||||
|                             indexed_fields.push((field_name.clone(), field.ty.clone())); |  | ||||||
|  |  | ||||||
|                             if let Some(name) = custom_name { |  | ||||||
|                                 custom_index_names.insert(field_name.to_string(), name); |  | ||||||
|                             } |  | ||||||
|                         } |                         } | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|  |  | ||||||
|                 if let Some(idx) = attr_idx { |                 // remove all #[index] attributes we processed | ||||||
|  |                 // remove from the back to keep indices valid | ||||||
|  |                 to_remove.sort_unstable(); | ||||||
|  |                 to_remove.drain(..).rev().for_each(|idx| { | ||||||
|                     field.attrs.remove(idx); |                     field.attrs.remove(idx); | ||||||
|                 } |                 }); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     // Generate Model trait implementation |     // Generate Model trait implementation | ||||||
|     let db_keys_impl = if indexed_fields.is_empty() { |     let db_keys_impl = if index_decls.is_empty() { | ||||||
|         quote! { |         quote! { | ||||||
|             fn db_keys(&self) -> Vec<heromodels_core::IndexKey> { |             fn db_keys(&self) -> Vec<heromodels_core::IndexKey> { | ||||||
|                 Vec::new() |                 Vec::new() | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     } else { |     } else { | ||||||
|         let field_keys = indexed_fields.iter().map(|(field_name, _)| { |         // Build code for keys from each index declaration | ||||||
|             let name_str = custom_index_names |         let mut key_snippets: Vec<proc_macro2::TokenStream> = Vec::new(); | ||||||
|                 .get(&field_name.to_string()) |  | ||||||
|                 .cloned() |         for decl in &index_decls { | ||||||
|                 .unwrap_or(field_name.to_string()); |             match decl.clone() { | ||||||
|             quote! { |                 IndexDecl::TopLevel { field_ident, .. } => { | ||||||
|                 heromodels_core::IndexKey { |                     let name_str = field_ident.to_string(); | ||||||
|                     name: #name_str, |                     key_snippets.push(quote! { | ||||||
|                     value: self.#field_name.to_string(), |                         keys.push(heromodels_core::IndexKey { | ||||||
|  |                             name: #name_str, | ||||||
|  |                             value: self.#field_ident.to_string(), | ||||||
|  |                         }); | ||||||
|  |                     }); | ||||||
|  |                 } | ||||||
|  |                 IndexDecl::NestedPath { on_field_ident, path } => { | ||||||
|  |                     // Name is equal to provided path | ||||||
|  |                     let name_str = path.clone(); | ||||||
|  |                     // Generate traversal code using serde_json to support arrays and objects generically | ||||||
|  |                     // Split the path into static segs for iteration | ||||||
|  |                     let segs: Vec<String> = path.split('.').map(|s| s.to_string()).collect(); | ||||||
|  |                     let segs_iter = segs.iter().map(|s| s.as_str()); | ||||||
|  |                     let segs_array = quote! { [ #( #segs_iter ),* ] }; | ||||||
|  |  | ||||||
|  |                     key_snippets.push(quote! { | ||||||
|  |                         // Serialize the target field to JSON for generic traversal | ||||||
|  |                         let __hm_json_val = ::serde_json::to_value(&self.#on_field_ident).unwrap_or(::serde_json::Value::Null); | ||||||
|  |                         let mut __hm_stack: Vec<&::serde_json::Value> = vec![&__hm_json_val]; | ||||||
|  |                         for __hm_seg in #segs_array.iter() { | ||||||
|  |                             let mut __hm_next: Vec<&::serde_json::Value> = Vec::new(); | ||||||
|  |                             for __hm_v in &__hm_stack { | ||||||
|  |                                 match __hm_v { | ||||||
|  |                                     ::serde_json::Value::Array(arr) => { | ||||||
|  |                                         for __hm_e in arr { | ||||||
|  |                                             if let ::serde_json::Value::Object(map) = __hm_e { | ||||||
|  |                                                 if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); } | ||||||
|  |                                             } | ||||||
|  |                                         } | ||||||
|  |                                     } | ||||||
|  |                                     ::serde_json::Value::Object(map) => { | ||||||
|  |                                         if let Some(x) = map.get(*__hm_seg) { __hm_next.push(x); } | ||||||
|  |                                     } | ||||||
|  |                                     _ => {} | ||||||
|  |                                 } | ||||||
|  |                             } | ||||||
|  |                             __hm_stack = __hm_next; | ||||||
|  |                             if __hm_stack.is_empty() { break; } | ||||||
|  |                         } | ||||||
|  |                         for __hm_leaf in __hm_stack { | ||||||
|  |                             match __hm_leaf { | ||||||
|  |                                 ::serde_json::Value::Null => {}, | ||||||
|  |                                 ::serde_json::Value::Array(_) => {}, | ||||||
|  |                                 ::serde_json::Value::Object(_) => {}, | ||||||
|  |                                 other => { | ||||||
|  |                                     // Convert primitives to string without surrounding quotes for strings | ||||||
|  |                                     let mut s = other.to_string(); | ||||||
|  |                                     if let ::serde_json::Value::String(_) = other { s = s.trim_matches('"').to_string(); } | ||||||
|  |                                     keys.push(heromodels_core::IndexKey { name: #name_str, value: s }); | ||||||
|  |                                 } | ||||||
|  |                             } | ||||||
|  |                         } | ||||||
|  |                     }); | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|         }); |         } | ||||||
|  |  | ||||||
|         quote! { |         quote! { | ||||||
|             fn db_keys(&self) -> Vec<heromodels_core::IndexKey> { |             fn db_keys(&self) -> Vec<heromodels_core::IndexKey> { | ||||||
|                 vec![ |                 let mut keys: Vec<heromodels_core::IndexKey> = Vec::new(); | ||||||
|                     #(#field_keys),* |                 #(#key_snippets)* | ||||||
|                 ] |                 keys | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|     let indexed_field_names = indexed_fields |     let indexed_field_names: Vec<String> = index_decls | ||||||
|         .iter() |         .iter() | ||||||
|         .map(|f| f.0.to_string()) |         .map(|d| match d { | ||||||
|         .collect::<Vec<_>>(); |             IndexDecl::TopLevel { field_ident, .. } => field_ident.to_string(), | ||||||
|  |             IndexDecl::NestedPath { path, .. } => path.clone(), | ||||||
|  |         }) | ||||||
|  |         .collect(); | ||||||
|  |  | ||||||
|     let model_impl = quote! { |     let model_impl = quote! { | ||||||
|         impl heromodels_core::Model for #struct_name { |         impl heromodels_core::Model for #struct_name { | ||||||
| @@ -152,51 +231,33 @@ pub fn model(_attr: TokenStream, item: TokenStream) -> TokenStream { | |||||||
|         } |         } | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|     // Generate Index trait implementations |     // Generate Index trait implementations only for top-level fields, keep existing behavior | ||||||
|     let mut index_impls = proc_macro2::TokenStream::new(); |     let mut index_impls = proc_macro2::TokenStream::new(); | ||||||
|  |     for decl in &index_decls { | ||||||
|  |         if let IndexDecl::TopLevel { field_ident, field_ty } = decl { | ||||||
|  |             let name_str = field_ident.to_string(); | ||||||
|  |             let index_struct_name = format_ident!("{}", &name_str); | ||||||
|  |             let field_type = field_ty.clone(); | ||||||
|  |  | ||||||
|     for (field_name, field_type) in &indexed_fields { |             let index_impl = quote! { | ||||||
|         let name_str = field_name.to_string(); |                 pub struct #index_struct_name; | ||||||
|  |  | ||||||
|         // Get custom index name if specified, otherwise use field name |                 impl heromodels_core::Index for #index_struct_name { | ||||||
|         let index_key = match custom_index_names.get(&name_str) { |                     type Model = super::#struct_name; | ||||||
|             Some(custom_name) => custom_name.clone(), |                     type Key = #field_type; | ||||||
|             None => name_str.clone(), |  | ||||||
|         }; |  | ||||||
|  |  | ||||||
|         // Convert field name to PascalCase for struct name |                     fn key() -> &'static str { #name_str } | ||||||
|         // let struct_name_str = to_pascal_case(&name_str); |  | ||||||
|         // let index_struct_name = format_ident!("{}", struct_name_str); |  | ||||||
|         let index_struct_name = format_ident!("{}", &name_str); |  | ||||||
|  |  | ||||||
|         // Default to str for key type |                     fn field_name() -> &'static str { #name_str } | ||||||
|         let index_impl = quote! { |  | ||||||
|             pub struct #index_struct_name; |  | ||||||
|  |  | ||||||
|             impl heromodels_core::Index for #index_struct_name { |  | ||||||
|                 type Model = super::#struct_name; |  | ||||||
|                 type Key = #field_type; |  | ||||||
|  |  | ||||||
|                 fn key() -> &'static str { |  | ||||||
|                     #index_key |  | ||||||
|                 } |                 } | ||||||
|  |             }; | ||||||
|                 fn field_name() -> &'static str { |             index_impls.extend(index_impl); | ||||||
|                     #name_str |         } | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|         }; |  | ||||||
|  |  | ||||||
|         index_impls.extend(index_impl); |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     if !index_impls.is_empty() { |     if !index_impls.is_empty() { | ||||||
|         let index_mod_name = format_ident!("{}_index", db_prefix); |         let index_mod_name = format_ident!("{}_index", db_prefix); | ||||||
|         index_impls = quote! { |         index_impls = quote! { pub mod #index_mod_name { #index_impls } }; | ||||||
|             pub mod #index_mod_name { |  | ||||||
|                 #index_impls |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     // Combine the original struct with the generated implementations |     // Combine the original struct with the generated implementations | ||||||
|   | |||||||
| @@ -1,7 +1,38 @@ | |||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
| // Define the necessary structs and traits for testing | // Make the current crate visible as an extern crate named `heromodels_core` | ||||||
|  | extern crate self as heromodels_core; | ||||||
|  | extern crate serde_json; // ensure ::serde_json path resolves | ||||||
|  |  | ||||||
|  | // Mock the heromodels_core API at crate root (visible via the alias above) | ||||||
|  | #[derive(Debug, Clone, PartialEq, Eq)] | ||||||
|  | pub struct IndexKey { | ||||||
|  |     pub name: &'static str, | ||||||
|  |     pub value: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub trait Model: std::fmt::Debug + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static { | ||||||
|  |     fn db_prefix() -> &'static str | ||||||
|  |     where | ||||||
|  |         Self: Sized; | ||||||
|  |     fn get_id(&self) -> u32; | ||||||
|  |     fn base_data_mut(&mut self) -> &mut BaseModelData; | ||||||
|  |     fn db_keys(&self) -> Vec<IndexKey> { | ||||||
|  |         Vec::new() | ||||||
|  |     } | ||||||
|  |     fn indexed_fields() -> Vec<&'static str> { | ||||||
|  |         Vec::new() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub trait Index { | ||||||
|  |     type Model: Model; | ||||||
|  |     type Key: ToString + ?Sized; | ||||||
|  |     fn key() -> &'static str; | ||||||
|  |     fn field_name() -> &'static str; | ||||||
|  | } | ||||||
|  |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
| pub struct BaseModelData { | pub struct BaseModelData { | ||||||
|     pub id: u32, |     pub id: u32, | ||||||
| @@ -11,41 +42,18 @@ pub struct BaseModelData { | |||||||
| } | } | ||||||
|  |  | ||||||
| impl BaseModelData { | impl BaseModelData { | ||||||
|     pub fn new(id: u32) -> Self { |     pub fn new() -> Self { | ||||||
|         let now = 1000; // Mock timestamp |         let now = 1000; | ||||||
|         Self { |         Self { id: 0, created_at: now, modified_at: now, comments: Vec::new() } | ||||||
|             id, |  | ||||||
|             created_at: now, |  | ||||||
|             modified_at: now, |  | ||||||
|             comments: Vec::new(), |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
|  |     pub fn update_modified(&mut self) { self.modified_at += 1; } | ||||||
| } | } | ||||||
|  |  | ||||||
| #[derive(Debug, Clone, PartialEq, Eq)] | // Top-level field index tests | ||||||
| pub struct IndexKey { |  | ||||||
|     pub name: &'static str, |  | ||||||
|     pub value: String, |  | ||||||
| } |  | ||||||
|  |  | ||||||
| pub trait Model: std::fmt::Debug + Clone { |  | ||||||
|     fn db_prefix() -> &'static str; |  | ||||||
|     fn get_id(&self) -> u32; |  | ||||||
|     fn base_data_mut(&mut self) -> &mut BaseModelData; |  | ||||||
|     fn db_keys(&self) -> Vec<IndexKey>; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| pub trait Index { |  | ||||||
|     type Model: Model; |  | ||||||
|     type Key: ?Sized; |  | ||||||
|     fn key() -> &'static str; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // Test struct using the model macro |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
| #[model] | #[model] | ||||||
| struct TestUser { | pub struct TestUser { | ||||||
|     base_data: BaseModelData, |     base_data: heromodels_core::BaseModelData, | ||||||
|  |  | ||||||
|     #[index] |     #[index] | ||||||
|     username: String, |     username: String, | ||||||
| @@ -54,25 +62,12 @@ struct TestUser { | |||||||
|     is_active: bool, |     is_active: bool, | ||||||
| } | } | ||||||
|  |  | ||||||
| // Test struct with custom index name |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize)] |  | ||||||
| #[model] |  | ||||||
| struct TestUserWithCustomIndex { |  | ||||||
|     base_data: BaseModelData, |  | ||||||
|  |  | ||||||
|     #[index(name = "custom_username")] |  | ||||||
|     username: String, |  | ||||||
|  |  | ||||||
|     #[index] |  | ||||||
|     is_active: bool, |  | ||||||
| } |  | ||||||
|  |  | ||||||
| #[test] | #[test] | ||||||
| fn test_basic_model() { | fn test_basic_model() { | ||||||
|     assert_eq!(TestUser::db_prefix(), "test_user"); |     assert_eq!(TestUser::db_prefix(), "test_user"); | ||||||
|  |  | ||||||
|     let user = TestUser { |     let user = TestUser { | ||||||
|         base_data: BaseModelData::new(1), |         base_data: heromodels_core::BaseModelData::new(), | ||||||
|         username: "test".to_string(), |         username: "test".to_string(), | ||||||
|         is_active: true, |         is_active: true, | ||||||
|     }; |     }; | ||||||
| @@ -85,22 +80,47 @@ fn test_basic_model() { | |||||||
|     assert_eq!(keys[1].value, "true"); |     assert_eq!(keys[1].value, "true"); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // Nested path index tests (including vector traversal) | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, Default)] | ||||||
|  | struct GPU { gpu_brand: String } | ||||||
|  |  | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, Default)] | ||||||
|  | struct CPU { cpu_brand: String } | ||||||
|  |  | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, Default)] | ||||||
|  | struct DeviceInfo { vendor: String, cpu: Vec<CPU>, gpu: Vec<GPU> } | ||||||
|  |  | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize)] | ||||||
|  | #[model] | ||||||
|  | pub struct NodeLike { | ||||||
|  |     base_data: heromodels_core::BaseModelData, | ||||||
|  |  | ||||||
|  |     #[index(path = "vendor")] | ||||||
|  |     #[index(path = "cpu.cpu_brand")] | ||||||
|  |     #[index(path = "gpu.gpu_brand")] | ||||||
|  |     devices: DeviceInfo, | ||||||
|  | } | ||||||
|  |  | ||||||
| #[test] | #[test] | ||||||
| fn test_custom_index_name() { | fn test_nested_indexes() { | ||||||
|     let user = TestUserWithCustomIndex { |     let n = NodeLike { | ||||||
|         base_data: BaseModelData::new(1), |         base_data: heromodels_core::BaseModelData::new(), | ||||||
|         username: "test".to_string(), |         devices: DeviceInfo { | ||||||
|         is_active: true, |             vendor: "SuperVendor".to_string(), | ||||||
|  |             cpu: vec![CPU { cpu_brand: "Intel".into() }, CPU { cpu_brand: "AMD".into() }], | ||||||
|  |             gpu: vec![GPU { gpu_brand: "NVIDIA".into() }, GPU { gpu_brand: "AMD".into() }], | ||||||
|  |         }, | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|     // Check that the Username struct uses the custom index name |     let mut keys = n.db_keys(); | ||||||
|     assert_eq!(Username::key(), "custom_username"); |     // Sort for deterministic assertions | ||||||
|  |     keys.sort_by(|a,b| a.name.cmp(b.name).then(a.value.cmp(&b.value))); | ||||||
|  |  | ||||||
|     // Check that the db_keys method returns the correct keys |     // Expect 1 (vendor) + 2 (cpu brands) + 2 (gpu brands) = 5 keys | ||||||
|     let keys = user.db_keys(); |     assert_eq!(keys.len(), 5); | ||||||
|     assert_eq!(keys.len(), 2); |     assert!(keys.iter().any(|k| k.name == "vendor" && k.value == "SuperVendor")); | ||||||
|     assert_eq!(keys[0].name, "custom_username"); |     assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "Intel")); | ||||||
|     assert_eq!(keys[0].value, "test"); |     assert!(keys.iter().any(|k| k.name == "cpu.cpu_brand" && k.value == "AMD")); | ||||||
|     assert_eq!(keys[1].name, "is_active"); |     assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "NVIDIA")); | ||||||
|     assert_eq!(keys[1].value, "true"); |     assert!(keys.iter().any(|k| k.name == "gpu.gpu_brand" && k.value == "AMD")); | ||||||
| } | } | ||||||
|   | |||||||
							
								
								
									
										1637
									
								
								heromodels/Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										1637
									
								
								heromodels/Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -10,11 +10,11 @@ serde = { version = "1.0", features = ["derive"] } | |||||||
| serde_json = "1.0" | serde_json = "1.0" | ||||||
| bincode = { version = "2", features = ["serde"] } | bincode = { version = "2", features = ["serde"] } | ||||||
| chrono = { version = "0.4", features = ["serde"] } | chrono = { version = "0.4", features = ["serde"] } | ||||||
| ourdb = { path = "../../herolib_rust/packages/data/ourdb" } | ourdb = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "ourdb" } | ||||||
| tst = { path = "../../herolib_rust/packages/data/tst" } | tst = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "tst" } | ||||||
| heromodels-derive = { path = "../heromodels-derive" } | heromodels-derive = { path = "../heromodels-derive" } | ||||||
| heromodels_core = { path = "../heromodels_core" } | heromodels_core = { path = "../heromodels_core" } | ||||||
| rhailib-macros = { path = "../../herolib_rust/rhailib/src/macros" } | rhailib-macros = { git = "https://git.ourworld.tf/herocode/herolib_rust", package = "rhailib-macros" } | ||||||
| rhai = { version = "1.21.0", features = [ | rhai = { version = "1.21.0", features = [ | ||||||
|   "std", |   "std", | ||||||
|   "sync", |   "sync", | ||||||
| @@ -53,11 +53,19 @@ path = "examples/finance_example/main.rs" | |||||||
| name = "flow_example" | name = "flow_example" | ||||||
| path = "examples/flow_example.rs" | path = "examples/flow_example.rs" | ||||||
|  |  | ||||||
| [[example]] | # [[example]] | ||||||
| name = "biz_rhai" | # name = "biz_rhai" | ||||||
| path = "examples/biz_rhai/example.rs" | # path = "examples/biz_rhai/example.rs" | ||||||
| required-features = ["rhai"] | # required-features = ["rhai"] | ||||||
|  |  | ||||||
| [[example]] | [[example]] | ||||||
| name = "postgres_model_example" | name = "postgres_model_example" | ||||||
| path = "examples/postgres_example/example.rs" | path = "examples/postgres_example/example.rs" | ||||||
|  |  | ||||||
|  | [[example]] | ||||||
|  | name = "heroledger_example" | ||||||
|  | path = "examples/heroledger_example/example.rs" | ||||||
|  |  | ||||||
|  | [[example]] | ||||||
|  | name = "grid4_example" | ||||||
|  | path = "examples/grid4_example/example.rs" | ||||||
|   | |||||||
| @@ -1,10 +1,25 @@ | |||||||
| use chrono::{Duration, Utc}; | use chrono::{Duration, Utc, NaiveDateTime}; | ||||||
| use heromodels::db::{Collection, Db}; | use heromodels::db::{Collection, Db}; | ||||||
| use heromodels::models::User; | use heromodels::models::User; | ||||||
| use heromodels::models::calendar::{AttendanceStatus, Attendee, Calendar, Event, EventStatus}; | use heromodels::models::calendar::{AttendanceStatus, Attendee, Calendar, Event, EventStatus}; | ||||||
| use heromodels_core::Model; | use heromodels_core::Model; | ||||||
|  |  | ||||||
| fn main() { | fn main() { | ||||||
|  |     // Helper to format i64 timestamps | ||||||
|  |     let fmt_time = |ts: i64| -> String { | ||||||
|  |         let ndt = NaiveDateTime::from_timestamp_opt(ts, 0) | ||||||
|  |             .unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap()); | ||||||
|  |         chrono::DateTime::<Utc>::from_utc(ndt, Utc) | ||||||
|  |             .format("%Y-%m-%d %H:%M") | ||||||
|  |             .to_string() | ||||||
|  |     }; | ||||||
|  |     let fmt_date = |ts: i64| -> String { | ||||||
|  |         let ndt = NaiveDateTime::from_timestamp_opt(ts, 0) | ||||||
|  |             .unwrap_or(NaiveDateTime::from_timestamp_opt(0, 0).unwrap()); | ||||||
|  |         chrono::DateTime::<Utc>::from_utc(ndt, Utc) | ||||||
|  |             .format("%Y-%m-%d") | ||||||
|  |             .to_string() | ||||||
|  |     }; | ||||||
|     // Create a new DB instance, reset before every run |     // Create a new DB instance, reset before every run | ||||||
|     let db_path = "/tmp/ourdb_calendar_example"; |     let db_path = "/tmp/ourdb_calendar_example"; | ||||||
|     let db = heromodels::db::hero::OurDB::new(db_path, true).expect("Can create DB"); |     let db = heromodels::db::hero::OurDB::new(db_path, true).expect("Can create DB"); | ||||||
| @@ -47,50 +62,21 @@ fn main() { | |||||||
|     println!("- User 2 (ID: {}): {}", user2_id, stored_user2.full_name); |     println!("- User 2 (ID: {}): {}", user2_id, stored_user2.full_name); | ||||||
|     println!("- User 3 (ID: {}): {}", user3_id, stored_user3.full_name); |     println!("- User 3 (ID: {}): {}", user3_id, stored_user3.full_name); | ||||||
|  |  | ||||||
|     // --- Create Attendees --- |     // --- Create Attendees (embedded in events, not stored separately) --- | ||||||
|     println!("\n--- Creating Attendees ---"); |     println!("\n--- Creating Attendees ---"); | ||||||
|     let attendee1 = Attendee::new(user1_id).status(AttendanceStatus::Accepted); |     let attendee1 = Attendee::new(user1_id).status(AttendanceStatus::Accepted); | ||||||
|     let attendee2 = Attendee::new(user2_id).status(AttendanceStatus::Tentative); |     let attendee2 = Attendee::new(user2_id).status(AttendanceStatus::Tentative); | ||||||
|     let attendee3 = Attendee::new(user3_id); // Default NoResponse |     let attendee3 = Attendee::new(user3_id); // Default NoResponse | ||||||
|  |  | ||||||
|     // Store attendees in database and get their IDs |  | ||||||
|     let attendee_collection = db |  | ||||||
|         .collection::<Attendee>() |  | ||||||
|         .expect("can open attendee collection"); |  | ||||||
|  |  | ||||||
|     let (attendee1_id, stored_attendee1) = attendee_collection |  | ||||||
|         .set(&attendee1) |  | ||||||
|         .expect("can set attendee1"); |  | ||||||
|     let (attendee2_id, stored_attendee2) = attendee_collection |  | ||||||
|         .set(&attendee2) |  | ||||||
|         .expect("can set attendee2"); |  | ||||||
|     let (attendee3_id, stored_attendee3) = attendee_collection |  | ||||||
|         .set(&attendee3) |  | ||||||
|         .expect("can set attendee3"); |  | ||||||
|  |  | ||||||
|     println!("Created attendees:"); |  | ||||||
|     println!( |  | ||||||
|         "- Attendee 1 (ID: {}): Contact ID {}, Status: {:?}", |  | ||||||
|         attendee1_id, stored_attendee1.contact_id, stored_attendee1.status |  | ||||||
|     ); |  | ||||||
|     println!( |  | ||||||
|         "- Attendee 2 (ID: {}): Contact ID {}, Status: {:?}", |  | ||||||
|         attendee2_id, stored_attendee2.contact_id, stored_attendee2.status |  | ||||||
|     ); |  | ||||||
|     println!( |  | ||||||
|         "- Attendee 3 (ID: {}): Contact ID {}, Status: {:?}", |  | ||||||
|         attendee3_id, stored_attendee3.contact_id, stored_attendee3.status |  | ||||||
|     ); |  | ||||||
|  |  | ||||||
|     // --- Create Events with Attendees --- |     // --- Create Events with Attendees --- | ||||||
|     println!("\n--- Creating Events with Enhanced Features ---"); |     println!("\n--- Creating Events with Enhanced Features ---"); | ||||||
|     let now = Utc::now(); |     let now = Utc::now(); | ||||||
|  |     let event1_start = (now + Duration::hours(1)).timestamp(); | ||||||
|  |     let event1_end = (now + Duration::hours(2)).timestamp(); | ||||||
|  |  | ||||||
|     let event1 = Event::new( |     let event1 = Event::new() | ||||||
|         "Team Meeting", |     .title("Team Meeting") | ||||||
|         now + Duration::hours(1), |     .reschedule(event1_start, event1_end) | ||||||
|         now + Duration::hours(2), |  | ||||||
|     ) |  | ||||||
|     .description("Weekly sync-up meeting to discuss project progress.") |     .description("Weekly sync-up meeting to discuss project progress.") | ||||||
|     .location("Conference Room A") |     .location("Conference Room A") | ||||||
|     .color("#FF5722") // Red-orange color |     .color("#FF5722") // Red-orange color | ||||||
| @@ -99,14 +85,14 @@ fn main() { | |||||||
|     .category("Work") |     .category("Work") | ||||||
|     .reminder_minutes(15) |     .reminder_minutes(15) | ||||||
|     .timezone("UTC") |     .timezone("UTC") | ||||||
|     .add_attendee(attendee1_id) |     .add_attendee(attendee1.clone()) | ||||||
|     .add_attendee(attendee2_id); |     .add_attendee(attendee2.clone()); | ||||||
|  |  | ||||||
|     let event2 = Event::new( |     let event2_start = (now + Duration::days(1)).timestamp(); | ||||||
|         "Project Brainstorm", |     let event2_end = (now + Duration::days(1) + Duration::minutes(90)).timestamp(); | ||||||
|         now + Duration::days(1), |     let event2 = Event::new() | ||||||
|         now + Duration::days(1) + Duration::minutes(90), |     .title("Project Brainstorm") | ||||||
|     ) |     .reschedule(event2_start, event2_end) | ||||||
|     .description("Brainstorming session for new project features.") |     .description("Brainstorming session for new project features.") | ||||||
|     .location("Innovation Lab") |     .location("Innovation Lab") | ||||||
|     .color("#4CAF50") // Green color |     .color("#4CAF50") // Green color | ||||||
| @@ -115,28 +101,28 @@ fn main() { | |||||||
|     .category("Planning") |     .category("Planning") | ||||||
|     .reminder_minutes(30) |     .reminder_minutes(30) | ||||||
|     .is_recurring(true) |     .is_recurring(true) | ||||||
|     .add_attendee(attendee1_id) |     .add_attendee(attendee1.clone()) | ||||||
|     .add_attendee(attendee3_id); |     .add_attendee(attendee3.clone()); | ||||||
|  |  | ||||||
|     let event3 = Event::new( |     let event3_start = (now + Duration::days(2)).timestamp(); | ||||||
|         "Client Call", |     let event3_end = (now + Duration::days(2) + Duration::hours(1)).timestamp(); | ||||||
|         now + Duration::days(2), |     let event3 = Event::new() | ||||||
|         now + Duration::days(2) + Duration::hours(1), |     .title("Client Call") | ||||||
|     ) |     .reschedule(event3_start, event3_end) | ||||||
|     .description("Quarterly review with key client.") |     .description("Quarterly review with key client.") | ||||||
|     .color("#9C27B0") // Purple color |     .color("#9C27B0") // Purple color | ||||||
|     .created_by(user3_id) |     .created_by(user3_id) | ||||||
|     .status(EventStatus::Published) |     .status(EventStatus::Published) | ||||||
|     .category("Client") |     .category("Client") | ||||||
|     .reminder_minutes(60) |     .reminder_minutes(60) | ||||||
|     .add_attendee(attendee2_id); |     .add_attendee(attendee2.clone()); | ||||||
|  |  | ||||||
|     // Create an all-day event |     // Create an all-day event | ||||||
|     let event4 = Event::new( |     let event4_start = (now + Duration::days(7)).timestamp(); | ||||||
|         "Company Holiday", |     let event4_end = (now + Duration::days(7) + Duration::hours(24)).timestamp(); | ||||||
|         now + Duration::days(7), |     let event4 = Event::new() | ||||||
|         now + Duration::days(7) + Duration::hours(24), |     .title("Company Holiday") | ||||||
|     ) |     .reschedule(event4_start, event4_end) | ||||||
|     .description("National holiday - office closed.") |     .description("National holiday - office closed.") | ||||||
|     .color("#FFC107") // Amber color |     .color("#FFC107") // Amber color | ||||||
|     .all_day(true) |     .all_day(true) | ||||||
| @@ -148,7 +134,7 @@ fn main() { | |||||||
|     println!( |     println!( | ||||||
|         "- Event 1: '{}' at {} with {} attendees", |         "- Event 1: '{}' at {} with {} attendees", | ||||||
|         event1.title, |         event1.title, | ||||||
|         event1.start_time.format("%Y-%m-%d %H:%M"), |         fmt_time(event1.start_time), | ||||||
|         event1.attendees.len() |         event1.attendees.len() | ||||||
|     ); |     ); | ||||||
|     println!( |     println!( | ||||||
| @@ -174,12 +160,19 @@ fn main() { | |||||||
|     ); |     ); | ||||||
|     println!("  All-day: {}", event1.all_day); |     println!("  All-day: {}", event1.all_day); | ||||||
|     println!("  Recurring: {}", event1.is_recurring); |     println!("  Recurring: {}", event1.is_recurring); | ||||||
|     println!("  Attendee IDs: {:?}", event1.attendees); |     println!( | ||||||
|  |         "  Attendee IDs: {:?}", | ||||||
|  |         event1 | ||||||
|  |             .attendees | ||||||
|  |             .iter() | ||||||
|  |             .map(|a| a.contact_id) | ||||||
|  |             .collect::<Vec<u32>>() | ||||||
|  |     ); | ||||||
|  |  | ||||||
|     println!( |     println!( | ||||||
|         "- Event 2: '{}' at {} with {} attendees", |         "- Event 2: '{}' at {} with {} attendees", | ||||||
|         event2.title, |         event2.title, | ||||||
|         event2.start_time.format("%Y-%m-%d %H:%M"), |         fmt_time(event2.start_time), | ||||||
|         event2.attendees.len() |         event2.attendees.len() | ||||||
|     ); |     ); | ||||||
|     println!( |     println!( | ||||||
| @@ -205,12 +198,19 @@ fn main() { | |||||||
|     ); |     ); | ||||||
|     println!("  All-day: {}", event2.all_day); |     println!("  All-day: {}", event2.all_day); | ||||||
|     println!("  Recurring: {}", event2.is_recurring); |     println!("  Recurring: {}", event2.is_recurring); | ||||||
|     println!("  Attendee IDs: {:?}", event2.attendees); |     println!( | ||||||
|  |         "  Attendee IDs: {:?}", | ||||||
|  |         event2 | ||||||
|  |             .attendees | ||||||
|  |             .iter() | ||||||
|  |             .map(|a| a.contact_id) | ||||||
|  |             .collect::<Vec<u32>>() | ||||||
|  |     ); | ||||||
|  |  | ||||||
|     println!( |     println!( | ||||||
|         "- Event 3: '{}' at {} with {} attendees", |         "- Event 3: '{}' at {} with {} attendees", | ||||||
|         event3.title, |         event3.title, | ||||||
|         event3.start_time.format("%Y-%m-%d %H:%M"), |         fmt_time(event3.start_time), | ||||||
|         event3.attendees.len() |         event3.attendees.len() | ||||||
|     ); |     ); | ||||||
|     println!( |     println!( | ||||||
| @@ -236,12 +236,19 @@ fn main() { | |||||||
|     ); |     ); | ||||||
|     println!("  All-day: {}", event3.all_day); |     println!("  All-day: {}", event3.all_day); | ||||||
|     println!("  Recurring: {}", event3.is_recurring); |     println!("  Recurring: {}", event3.is_recurring); | ||||||
|     println!("  Attendee IDs: {:?}", event3.attendees); |     println!( | ||||||
|  |         "  Attendee IDs: {:?}", | ||||||
|  |         event3 | ||||||
|  |             .attendees | ||||||
|  |             .iter() | ||||||
|  |             .map(|a| a.contact_id) | ||||||
|  |             .collect::<Vec<u32>>() | ||||||
|  |     ); | ||||||
|  |  | ||||||
|     println!( |     println!( | ||||||
|         "- Event 4: '{}' at {} (All-day: {})", |         "- Event 4: '{}' at {} (All-day: {})", | ||||||
|         event4.title, |         event4.title, | ||||||
|         event4.start_time.format("%Y-%m-%d"), |         fmt_date(event4.start_time), | ||||||
|         event4.all_day |         event4.all_day | ||||||
|     ); |     ); | ||||||
|     println!( |     println!( | ||||||
| @@ -262,25 +269,37 @@ fn main() { | |||||||
|     let new_start = now + Duration::hours(2); |     let new_start = now + Duration::hours(2); | ||||||
|     let new_end = now + Duration::hours(3); |     let new_end = now + Duration::hours(3); | ||||||
|     let mut updated_event1 = event1.clone(); |     let mut updated_event1 = event1.clone(); | ||||||
|     updated_event1 = updated_event1.reschedule(new_start, new_end); |     updated_event1 = updated_event1.reschedule(new_start.timestamp(), new_end.timestamp()); | ||||||
|     println!( |     println!( | ||||||
|         "Rescheduled '{}' to {}", |         "Rescheduled '{}' to {}", | ||||||
|         updated_event1.title, |         updated_event1.title, | ||||||
|         new_start.format("%Y-%m-%d %H:%M") |         fmt_time(new_start.timestamp()) | ||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     // Remove an attendee |     // Remove an attendee | ||||||
|     updated_event1 = updated_event1.remove_attendee(attendee1_id); |     updated_event1 = updated_event1.remove_attendee(user1_id); | ||||||
|     println!( |     println!( | ||||||
|         "Removed attendee {} from '{}'. Remaining attendee IDs: {:?}", |         "Removed attendee {} from '{}'. Remaining attendee IDs: {:?}", | ||||||
|         attendee1_id, updated_event1.title, updated_event1.attendees |         user1_id, | ||||||
|  |         updated_event1.title, | ||||||
|  |         updated_event1 | ||||||
|  |             .attendees | ||||||
|  |             .iter() | ||||||
|  |             .map(|a| a.contact_id) | ||||||
|  |             .collect::<Vec<u32>>() | ||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     // Add a new attendee |     // Add a new attendee | ||||||
|     updated_event1 = updated_event1.add_attendee(attendee3_id); |     updated_event1 = updated_event1.add_attendee(attendee3.clone()); | ||||||
|     println!( |     println!( | ||||||
|         "Added attendee {} to '{}'. Current attendee IDs: {:?}", |         "Added attendee {} to '{}'. Current attendee IDs: {:?}", | ||||||
|         attendee3_id, updated_event1.title, updated_event1.attendees |         user3_id, | ||||||
|  |         updated_event1.title, | ||||||
|  |         updated_event1 | ||||||
|  |             .attendees | ||||||
|  |             .iter() | ||||||
|  |             .map(|a| a.contact_id) | ||||||
|  |             .collect::<Vec<u32>>() | ||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     // --- Demonstrate Event Status Changes --- |     // --- Demonstrate Event Status Changes --- | ||||||
| @@ -300,11 +319,11 @@ fn main() { | |||||||
|     println!("Cancelled event: '{}'", cancelled_event.title); |     println!("Cancelled event: '{}'", cancelled_event.title); | ||||||
|  |  | ||||||
|     // Update event with new features |     // Update event with new features | ||||||
|     let enhanced_event = Event::new( |     let enhanced_start = (now + Duration::days(5)).timestamp(); | ||||||
|         "Enhanced Meeting", |     let enhanced_end = (now + Duration::days(5) + Duration::hours(2)).timestamp(); | ||||||
|         now + Duration::days(5), |     let enhanced_event = Event::new() | ||||||
|         now + Duration::days(5) + Duration::hours(2), |     .title("Enhanced Meeting") | ||||||
|     ) |     .reschedule(enhanced_start, enhanced_end) | ||||||
|     .description("Meeting with all new features demonstrated.") |     .description("Meeting with all new features demonstrated.") | ||||||
|     .location("Virtual - Zoom") |     .location("Virtual - Zoom") | ||||||
|     .color("#673AB7") // Deep purple |     .color("#673AB7") // Deep purple | ||||||
| @@ -314,9 +333,9 @@ fn main() { | |||||||
|     .reminder_minutes(45) |     .reminder_minutes(45) | ||||||
|     .timezone("America/New_York") |     .timezone("America/New_York") | ||||||
|     .is_recurring(true) |     .is_recurring(true) | ||||||
|     .add_attendee(attendee1_id) |     .add_attendee(attendee1) | ||||||
|     .add_attendee(attendee2_id) |     .add_attendee(attendee2) | ||||||
|     .add_attendee(attendee3_id); |     .add_attendee(attendee3); | ||||||
|  |  | ||||||
|     println!("Created enhanced event with all features:"); |     println!("Created enhanced event with all features:"); | ||||||
|     println!("  Title: {}", enhanced_event.title); |     println!("  Title: {}", enhanced_event.title); | ||||||
| @@ -485,13 +504,13 @@ fn main() { | |||||||
|     println!("\n--- Modifying Calendar ---"); |     println!("\n--- Modifying Calendar ---"); | ||||||
|  |  | ||||||
|     // Create and store a new event |     // Create and store a new event | ||||||
|     let new_event = Event::new( |     let ne_start = (now + Duration::days(3)).timestamp(); | ||||||
|         "1-on-1 Meeting", |     let ne_end = (now + Duration::days(3) + Duration::minutes(30)).timestamp(); | ||||||
|         now + Duration::days(3), |     let new_event = Event::new() | ||||||
|         now + Duration::days(3) + Duration::minutes(30), |         .title("1-on-1 Meeting") | ||||||
|     ) |         .reschedule(ne_start, ne_end) | ||||||
|     .description("One-on-one meeting with team member.") |         .description("One-on-one meeting with team member.") | ||||||
|     .location("Office"); |         .location("Office"); | ||||||
|  |  | ||||||
|     let (new_event_id, _stored_new_event) = |     let (new_event_id, _stored_new_event) = | ||||||
|         event_collection.set(&new_event).expect("can set new event"); |         event_collection.set(&new_event).expect("can set new event"); | ||||||
| @@ -565,7 +584,7 @@ fn main() { | |||||||
|             "- Event ID: {}, Title: '{}', Start: {}, Attendees: {}", |             "- Event ID: {}, Title: '{}', Start: {}, Attendees: {}", | ||||||
|             event.get_id(), |             event.get_id(), | ||||||
|             event.title, |             event.title, | ||||||
|             event.start_time.format("%Y-%m-%d %H:%M"), |             fmt_time(event.start_time), | ||||||
|             event.attendees.len() |             event.attendees.len() | ||||||
|         ); |         ); | ||||||
|     } |     } | ||||||
| @@ -583,22 +602,16 @@ fn main() { | |||||||
|             retrieved_event1.attendees.len() |             retrieved_event1.attendees.len() | ||||||
|         ); |         ); | ||||||
|  |  | ||||||
|         // Look up attendee details for each attendee ID |         // Look up attendee details directly from embedded attendees | ||||||
|         for &attendee_id in &retrieved_event1.attendees { |         for attendee in &retrieved_event1.attendees { | ||||||
|             if let Some(attendee) = attendee_collection |             if let Some(user) = user_collection | ||||||
|                 .get_by_id(attendee_id) |                 .get_by_id(attendee.contact_id) | ||||||
|                 .expect("can try to get attendee") |                 .expect("can try to get user") | ||||||
|             { |             { | ||||||
|                 // Look up user details for the attendee's contact_id |                 println!( | ||||||
|                 if let Some(user) = user_collection |                     "  - User {}: {} (Status: {:?})", | ||||||
|                     .get_by_id(attendee.contact_id) |                     attendee.contact_id, user.full_name, attendee.status | ||||||
|                     .expect("can try to get user") |                 ); | ||||||
|                 { |  | ||||||
|                     println!( |  | ||||||
|                         "  - Attendee ID {}: {} (User: {}, Status: {:?})", |  | ||||||
|                         attendee_id, user.full_name, attendee.contact_id, attendee.status |  | ||||||
|                     ); |  | ||||||
|                 } |  | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|   | |||||||
| @@ -1,26 +1,26 @@ | |||||||
| use circles_launcher::{new_launcher}; | use circles_launcher::new_launcher; | ||||||
| use heromodels::models::circle::circle::{new_circle}; | use heromodels::models::circle::circle::new_circle; | ||||||
| use secp256k1::{Secp256k1, SecretKey, PublicKey}; |  | ||||||
| use rand::rngs::OsRng; | use rand::rngs::OsRng; | ||||||
|  | use secp256k1::{PublicKey, Secp256k1, SecretKey}; | ||||||
|  |  | ||||||
| #[tokio::main] | #[tokio::main] | ||||||
| async fn main() -> Result<(), Box<dyn std::error::Error>> {     | async fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||||
|     // Generate valid secp256k1 keypairs for testing |     // Generate valid secp256k1 keypairs for testing | ||||||
|     let secp = Secp256k1::new(); |     let secp = Secp256k1::new(); | ||||||
|     let mut rng = OsRng; |     let mut rng = OsRng; | ||||||
|      |  | ||||||
|     let secret_key1 = SecretKey::new(&mut rng); |     let secret_key1 = SecretKey::new(&mut rng); | ||||||
|     let public_key1 = PublicKey::from_secret_key(&secp, &secret_key1); |     let public_key1 = PublicKey::from_secret_key(&secp, &secret_key1); | ||||||
|     let pk1_hex = hex::encode(public_key1.serialize()); |     let pk1_hex = hex::encode(public_key1.serialize()); | ||||||
|      |  | ||||||
|     let secret_key2 = SecretKey::new(&mut rng); |     let secret_key2 = SecretKey::new(&mut rng); | ||||||
|     let public_key2 = PublicKey::from_secret_key(&secp, &secret_key2); |     let public_key2 = PublicKey::from_secret_key(&secp, &secret_key2); | ||||||
|     let pk2_hex = hex::encode(public_key2.serialize()); |     let pk2_hex = hex::encode(public_key2.serialize()); | ||||||
|      |  | ||||||
|     let secret_key3 = SecretKey::new(&mut rng); |     let secret_key3 = SecretKey::new(&mut rng); | ||||||
|     let public_key3 = PublicKey::from_secret_key(&secp, &secret_key3); |     let public_key3 = PublicKey::from_secret_key(&secp, &secret_key3); | ||||||
|     let pk3_hex = hex::encode(public_key3.serialize()); |     let pk3_hex = hex::encode(public_key3.serialize()); | ||||||
|      |  | ||||||
|     println!("Generated test public keys:"); |     println!("Generated test public keys:"); | ||||||
|     println!("  PK1: {}", pk1_hex); |     println!("  PK1: {}", pk1_hex); | ||||||
|     println!("  PK2: {}", pk2_hex); |     println!("  PK2: {}", pk2_hex); | ||||||
| @@ -36,4 +36,4 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> { | |||||||
|         .save(); |         .save(); | ||||||
|  |  | ||||||
|     Ok(()) |     Ok(()) | ||||||
| } | } | ||||||
|   | |||||||
							
								
								
									
										199
									
								
								heromodels/examples/grid4_bid_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										199
									
								
								heromodels/examples/grid4_bid_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,199 @@ | |||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::{Bid, BidStatus, BillingPeriod}; | ||||||
|  | use heromodels::models::grid4::bid::bid_index::customer_id; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Helper function to print bid details | ||||||
|  | fn print_bid_details(bid: &Bid) { | ||||||
|  |     println!("\n--- Bid Details ---"); | ||||||
|  |     println!("ID: {}", bid.get_id()); | ||||||
|  |     println!("Customer ID: {}", bid.customer_id); | ||||||
|  |     println!("Compute Slices: {}", bid.compute_slices_nr); | ||||||
|  |     println!("Compute Slice Price: ${:.2}", bid.compute_slice_price); | ||||||
|  |     println!("Storage Slices: {}", bid.storage_slices_nr); | ||||||
|  |     println!("Storage Slice Price: ${:.2}", bid.storage_slice_price); | ||||||
|  |     println!("Status: {:?}", bid.status); | ||||||
|  |     println!("Obligation: {}", bid.obligation); | ||||||
|  |     println!("Start Date: {}", bid.start_date); | ||||||
|  |     println!("End Date: {}", bid.end_date); | ||||||
|  |     println!("Billing Period: {:?}", bid.billing_period); | ||||||
|  |     println!("Signature User: {}", bid.signature_user); | ||||||
|  |     println!("Created At: {}", bid.base_data.created_at); | ||||||
|  |     println!("Modified At: {}", bid.base_data.modified_at); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a new DB instance in /tmp/grid4_db, and reset before every run | ||||||
|  |     let db = heromodels::db::hero::OurDB::new("/tmp/grid4_db", true).expect("Can create DB"); | ||||||
|  |  | ||||||
|  |     println!("Grid4 Bid Models - Basic Usage Example"); | ||||||
|  |     println!("====================================="); | ||||||
|  |  | ||||||
|  |     // Create bids with different configurations | ||||||
|  |  | ||||||
|  |     // Bid 1 - Small compute request | ||||||
|  |     let bid1 = Bid::new() | ||||||
|  |         .customer_id(101) | ||||||
|  |         .compute_slices_nr(4) | ||||||
|  |         .compute_slice_price(0.05) | ||||||
|  |         .storage_slices_nr(10) | ||||||
|  |         .storage_slice_price(0.02) | ||||||
|  |         .status(BidStatus::Pending) | ||||||
|  |         .obligation(false) | ||||||
|  |         .start_date(1640995200) // 2022-01-01 | ||||||
|  |         .end_date(1672531200)   // 2023-01-01 | ||||||
|  |         .billing_period(BillingPeriod::Monthly) | ||||||
|  |         .signature_user("sig_user_101_abc123".to_string()); | ||||||
|  |  | ||||||
|  |     // Bid 2 - Large compute request with obligation | ||||||
|  |     let bid2 = Bid::new() | ||||||
|  |         .customer_id(102) | ||||||
|  |         .compute_slices_nr(16) | ||||||
|  |         .compute_slice_price(0.04) | ||||||
|  |         .storage_slices_nr(50) | ||||||
|  |         .storage_slice_price(0.015) | ||||||
|  |         .status(BidStatus::Confirmed) | ||||||
|  |         .obligation(true) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1704067200)   // 2024-01-01 | ||||||
|  |         .billing_period(BillingPeriod::Yearly) | ||||||
|  |         .signature_user("sig_user_102_def456".to_string()); | ||||||
|  |  | ||||||
|  |     // Bid 3 - Storage-heavy request | ||||||
|  |     let bid3 = Bid::new() | ||||||
|  |         .customer_id(103) | ||||||
|  |         .compute_slices_nr(2) | ||||||
|  |         .compute_slice_price(0.06) | ||||||
|  |         .storage_slices_nr(100) | ||||||
|  |         .storage_slice_price(0.01) | ||||||
|  |         .status(BidStatus::Assigned) | ||||||
|  |         .obligation(true) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1672531200) | ||||||
|  |         .billing_period(BillingPeriod::Hourly) | ||||||
|  |         .signature_user("sig_user_103_ghi789".to_string()); | ||||||
|  |  | ||||||
|  |     // Bid 4 - Cancelled bid | ||||||
|  |     let bid4 = Bid::new() | ||||||
|  |         .customer_id(104) | ||||||
|  |         .compute_slices_nr(8) | ||||||
|  |         .compute_slice_price(0.055) | ||||||
|  |         .storage_slices_nr(25) | ||||||
|  |         .storage_slice_price(0.018) | ||||||
|  |         .status(BidStatus::Cancelled) | ||||||
|  |         .obligation(false) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1672531200) | ||||||
|  |         .billing_period(BillingPeriod::Monthly) | ||||||
|  |         .signature_user("sig_user_104_jkl012".to_string()); | ||||||
|  |  | ||||||
|  |     // Save all bids to database and get their assigned IDs and updated models | ||||||
|  |     let (bid1_id, db_bid1) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .set(&bid1) | ||||||
|  |         .expect("can set bid"); | ||||||
|  |     let (bid2_id, db_bid2) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .set(&bid2) | ||||||
|  |         .expect("can set bid"); | ||||||
|  |     let (bid3_id, db_bid3) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .set(&bid3) | ||||||
|  |         .expect("can set bid"); | ||||||
|  |     let (bid4_id, db_bid4) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .set(&bid4) | ||||||
|  |         .expect("can set bid"); | ||||||
|  |  | ||||||
|  |     println!("Bid 1 assigned ID: {}", bid1_id); | ||||||
|  |     println!("Bid 2 assigned ID: {}", bid2_id); | ||||||
|  |     println!("Bid 3 assigned ID: {}", bid3_id); | ||||||
|  |     println!("Bid 4 assigned ID: {}", bid4_id); | ||||||
|  |  | ||||||
|  |     // Print all bids retrieved from database | ||||||
|  |     println!("\n--- Bids Retrieved from Database ---"); | ||||||
|  |     println!("\n1. Small compute bid:"); | ||||||
|  |     print_bid_details(&db_bid1); | ||||||
|  |  | ||||||
|  |     println!("\n2. Large compute bid with obligation:"); | ||||||
|  |     print_bid_details(&db_bid2); | ||||||
|  |  | ||||||
|  |     println!("\n3. Storage-heavy bid:"); | ||||||
|  |     print_bid_details(&db_bid3); | ||||||
|  |  | ||||||
|  |     println!("\n4. Cancelled bid:"); | ||||||
|  |     print_bid_details(&db_bid4); | ||||||
|  |  | ||||||
|  |     // Demonstrate different ways to retrieve bids from the database | ||||||
|  |     println!("\n--- Retrieving Bids by Different Methods ---"); | ||||||
|  |     println!("\n1. By Customer ID Index (Customer 102):"); | ||||||
|  |      | ||||||
|  |     let customer_bids = db | ||||||
|  |         .collection::<Bid>() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .get::<customer_id, _>(&102u32) | ||||||
|  |         .expect("can load bids by customer"); | ||||||
|  |  | ||||||
|  |     assert_eq!(customer_bids.len(), 1); | ||||||
|  |     print_bid_details(&customer_bids[0]); | ||||||
|  |  | ||||||
|  |     println!("\n2. Updating Bid Status:"); | ||||||
|  |     let mut updated_bid = db_bid1.clone(); | ||||||
|  |     updated_bid.status = BidStatus::Confirmed; | ||||||
|  |  | ||||||
|  |     let (_, confirmed_bid) = db | ||||||
|  |         .collection::<Bid>() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .set(&updated_bid) | ||||||
|  |         .expect("can update bid"); | ||||||
|  |  | ||||||
|  |     println!("Updated bid status to Confirmed:"); | ||||||
|  |     print_bid_details(&confirmed_bid); | ||||||
|  |  | ||||||
|  |     // 3. Delete a bid and show the updated results | ||||||
|  |     println!("\n3. After Deleting a Bid:"); | ||||||
|  |     println!("Deleting bid with ID: {}", bid4_id); | ||||||
|  |     db.collection::<Bid>() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .delete_by_id(bid4_id) | ||||||
|  |         .expect("can delete existing bid"); | ||||||
|  |  | ||||||
|  |     // Show remaining bids | ||||||
|  |     let all_bids = db | ||||||
|  |         .collection::<Bid>() | ||||||
|  |         .expect("can open bid collection") | ||||||
|  |         .get_all() | ||||||
|  |         .expect("can load all bids"); | ||||||
|  |  | ||||||
|  |     println!("Remaining bids count: {}", all_bids.len()); | ||||||
|  |     assert_eq!(all_bids.len(), 3); | ||||||
|  |  | ||||||
|  |     // Calculate total compute and storage requested | ||||||
|  |     println!("\n--- Bid Analytics ---"); | ||||||
|  |     let total_compute_slices: i32 = all_bids.iter().map(|b| b.compute_slices_nr).sum(); | ||||||
|  |     let total_storage_slices: i32 = all_bids.iter().map(|b| b.storage_slices_nr).sum(); | ||||||
|  |     let avg_compute_price: f64 = all_bids.iter().map(|b| b.compute_slice_price).sum::<f64>() / all_bids.len() as f64; | ||||||
|  |     let avg_storage_price: f64 = all_bids.iter().map(|b| b.storage_slice_price).sum::<f64>() / all_bids.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("Total Compute Slices Requested: {}", total_compute_slices); | ||||||
|  |     println!("Total Storage Slices Requested: {}", total_storage_slices); | ||||||
|  |     println!("Average Compute Price: ${:.3}", avg_compute_price); | ||||||
|  |     println!("Average Storage Price: ${:.3}", avg_storage_price); | ||||||
|  |  | ||||||
|  |     // Count bids by status | ||||||
|  |     let confirmed_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Confirmed)).count(); | ||||||
|  |     let assigned_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Assigned)).count(); | ||||||
|  |     let pending_count = all_bids.iter().filter(|b| matches!(b.status, BidStatus::Pending)).count(); | ||||||
|  |  | ||||||
|  |     println!("\nBids by Status:"); | ||||||
|  |     println!("  Confirmed: {}", confirmed_count); | ||||||
|  |     println!("  Assigned: {}", assigned_count); | ||||||
|  |     println!("  Pending: {}", pending_count); | ||||||
|  |  | ||||||
|  |     println!("\n--- Model Information ---"); | ||||||
|  |     println!("Bid DB Prefix: {}", Bid::db_prefix()); | ||||||
|  | } | ||||||
							
								
								
									
										301
									
								
								heromodels/examples/grid4_contract_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										301
									
								
								heromodels/examples/grid4_contract_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,301 @@ | |||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::{Contract, ContractStatus}; | ||||||
|  | use heromodels::models::grid4::contract::contract_index::customer_id; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Helper function to print contract details | ||||||
|  | fn print_contract_details(contract: &Contract) { | ||||||
|  |     println!("\n--- Contract Details ---"); | ||||||
|  |     println!("ID: {}", contract.get_id()); | ||||||
|  |     println!("Customer ID: {}", contract.customer_id); | ||||||
|  |     println!("Compute Slices: {}", contract.compute_slices.len()); | ||||||
|  |     println!("Storage Slices: {}", contract.storage_slices.len()); | ||||||
|  |     println!("Compute Slice Price: ${:.2}", contract.compute_slice_price); | ||||||
|  |     println!("Storage Slice Price: ${:.2}", contract.storage_slice_price); | ||||||
|  |     println!("Network Slice Price: ${:.2}", contract.network_slice_price); | ||||||
|  |     println!("Status: {:?}", contract.status); | ||||||
|  |     println!("Start Date: {}", contract.start_date); | ||||||
|  |     println!("End Date: {}", contract.end_date); | ||||||
|  |     println!("Billing Period: {:?}", contract.billing_period); | ||||||
|  |     println!("Signature User: {}", contract.signature_user); | ||||||
|  |     println!("Signature Hoster: {}", contract.signature_hoster); | ||||||
|  |     println!("Created At: {}", contract.base_data.created_at); | ||||||
|  |     println!("Modified At: {}", contract.base_data.modified_at); | ||||||
|  |      | ||||||
|  |     // Print compute slices details | ||||||
|  |     if !contract.compute_slices.is_empty() { | ||||||
|  |         println!("  Compute Slices:"); | ||||||
|  |         for (i, slice) in contract.compute_slices.iter().enumerate() { | ||||||
|  |             println!("    {}. Node: {}, ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, Passmark: {}, vCores: {}",  | ||||||
|  |                 i + 1, slice.node_id, slice.id, slice.mem_gb, slice.storage_gb, slice.passmark, slice.vcores); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     // Print storage slices details | ||||||
|  |     if !contract.storage_slices.is_empty() { | ||||||
|  |         println!("  Storage Slices:"); | ||||||
|  |         for (i, slice) in contract.storage_slices.iter().enumerate() { | ||||||
|  |             println!("    {}. Node: {}, ID: {}, Size: {}GB",  | ||||||
|  |                 i + 1, slice.node_id, slice.id, slice.storage_size_gb); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a new DB instance in /tmp/grid4_contracts_db, and reset before every run | ||||||
|  |     let db = heromodels::db::hero::OurDB::new("/tmp/grid4_contracts_db", true).expect("Can create DB"); | ||||||
|  |  | ||||||
|  |     println!("Grid4 Contract Models - Basic Usage Example"); | ||||||
|  |     println!("=========================================="); | ||||||
|  |  | ||||||
|  |     // Create compute slices for contracts | ||||||
|  |     let compute_slice1 = ComputeSliceProvisioned::new() | ||||||
|  |         .node_id(1001) | ||||||
|  |         .id(1) | ||||||
|  |         .mem_gb(2.0) | ||||||
|  |         .storage_gb(20.0) | ||||||
|  |         .passmark(2500) | ||||||
|  |         .vcores(2) | ||||||
|  |         .cpu_oversubscription(150) | ||||||
|  |         .tags("web-server,production".to_string()); | ||||||
|  |  | ||||||
|  |     let compute_slice2 = ComputeSliceProvisioned::new() | ||||||
|  |         .node_id(1002) | ||||||
|  |         .id(2) | ||||||
|  |         .mem_gb(4.0) | ||||||
|  |         .storage_gb(40.0) | ||||||
|  |         .passmark(5000) | ||||||
|  |         .vcores(4) | ||||||
|  |         .cpu_oversubscription(120) | ||||||
|  |         .tags("database,high-performance".to_string()); | ||||||
|  |  | ||||||
|  |     let compute_slice3 = ComputeSliceProvisioned::new() | ||||||
|  |         .node_id(1003) | ||||||
|  |         .id(1) | ||||||
|  |         .mem_gb(8.0) | ||||||
|  |         .storage_gb(80.0) | ||||||
|  |         .passmark(10000) | ||||||
|  |         .vcores(8) | ||||||
|  |         .cpu_oversubscription(100) | ||||||
|  |         .tags("ml-training,gpu-enabled".to_string()); | ||||||
|  |  | ||||||
|  |     // Create storage slices for contracts | ||||||
|  |     let storage_slice1 = StorageSliceProvisioned::new() | ||||||
|  |         .node_id(2001) | ||||||
|  |         .id(1) | ||||||
|  |         .storage_size_gb(100) | ||||||
|  |         .tags("backup,cold-storage".to_string()); | ||||||
|  |  | ||||||
|  |     let storage_slice2 = StorageSliceProvisioned::new() | ||||||
|  |         .node_id(2002) | ||||||
|  |         .id(2) | ||||||
|  |         .storage_size_gb(500) | ||||||
|  |         .tags("data-lake,analytics".to_string()); | ||||||
|  |  | ||||||
|  |     let storage_slice3 = StorageSliceProvisioned::new() | ||||||
|  |         .node_id(2003) | ||||||
|  |         .id(1) | ||||||
|  |         .storage_size_gb(1000) | ||||||
|  |         .tags("archive,long-term".to_string()); | ||||||
|  |  | ||||||
|  |     // Create contracts with different configurations | ||||||
|  |  | ||||||
|  |     // Contract 1 - Small web hosting contract | ||||||
|  |     let contract1 = Contract::new() | ||||||
|  |         .customer_id(201) | ||||||
|  |         .add_compute_slice(compute_slice1.clone()) | ||||||
|  |         .add_storage_slice(storage_slice1.clone()) | ||||||
|  |         .compute_slice_price(0.05) | ||||||
|  |         .storage_slice_price(0.02) | ||||||
|  |         .network_slice_price(0.01) | ||||||
|  |         .status(ContractStatus::Active) | ||||||
|  |         .start_date(1640995200) // 2022-01-01 | ||||||
|  |         .end_date(1672531200)   // 2023-01-01 | ||||||
|  |         .billing_period(BillingPeriod::Monthly) | ||||||
|  |         .signature_user("contract_user_201_abc123".to_string()) | ||||||
|  |         .signature_hoster("hoster_node1001_xyz789".to_string()); | ||||||
|  |  | ||||||
|  |     // Contract 2 - Database hosting contract | ||||||
|  |     let contract2 = Contract::new() | ||||||
|  |         .customer_id(202) | ||||||
|  |         .add_compute_slice(compute_slice2.clone()) | ||||||
|  |         .add_storage_slice(storage_slice2.clone()) | ||||||
|  |         .compute_slice_price(0.04) | ||||||
|  |         .storage_slice_price(0.015) | ||||||
|  |         .network_slice_price(0.008) | ||||||
|  |         .status(ContractStatus::Active) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1704067200)   // 2024-01-01 | ||||||
|  |         .billing_period(BillingPeriod::Yearly) | ||||||
|  |         .signature_user("contract_user_202_def456".to_string()) | ||||||
|  |         .signature_hoster("hoster_node1002_uvw123".to_string()); | ||||||
|  |  | ||||||
|  |     // Contract 3 - ML training contract (paused) | ||||||
|  |     let contract3 = Contract::new() | ||||||
|  |         .customer_id(203) | ||||||
|  |         .add_compute_slice(compute_slice3.clone()) | ||||||
|  |         .add_storage_slice(storage_slice3.clone()) | ||||||
|  |         .compute_slice_price(0.08) | ||||||
|  |         .storage_slice_price(0.01) | ||||||
|  |         .network_slice_price(0.015) | ||||||
|  |         .status(ContractStatus::Paused) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1672531200) | ||||||
|  |         .billing_period(BillingPeriod::Hourly) | ||||||
|  |         .signature_user("contract_user_203_ghi789".to_string()) | ||||||
|  |         .signature_hoster("hoster_node1003_rst456".to_string()); | ||||||
|  |  | ||||||
|  |     // Contract 4 - Multi-slice enterprise contract | ||||||
|  |     let contract4 = Contract::new() | ||||||
|  |         .customer_id(204) | ||||||
|  |         .add_compute_slice(compute_slice1.clone()) | ||||||
|  |         .add_compute_slice(compute_slice2.clone()) | ||||||
|  |         .add_storage_slice(storage_slice1.clone()) | ||||||
|  |         .add_storage_slice(storage_slice2.clone()) | ||||||
|  |         .compute_slice_price(0.045) | ||||||
|  |         .storage_slice_price(0.018) | ||||||
|  |         .network_slice_price(0.012) | ||||||
|  |         .status(ContractStatus::Active) | ||||||
|  |         .start_date(1640995200) | ||||||
|  |         .end_date(1735689600)   // 2025-01-01 | ||||||
|  |         .billing_period(BillingPeriod::Monthly) | ||||||
|  |         .signature_user("contract_user_204_jkl012".to_string()) | ||||||
|  |         .signature_hoster("hoster_enterprise_mno345".to_string()); | ||||||
|  |  | ||||||
|  |     // Save all contracts to database and get their assigned IDs and updated models | ||||||
|  |     let (contract1_id, db_contract1) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&contract1) | ||||||
|  |         .expect("can set contract"); | ||||||
|  |     let (contract2_id, db_contract2) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&contract2) | ||||||
|  |         .expect("can set contract"); | ||||||
|  |     let (contract3_id, db_contract3) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&contract3) | ||||||
|  |         .expect("can set contract"); | ||||||
|  |     let (contract4_id, db_contract4) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&contract4) | ||||||
|  |         .expect("can set contract"); | ||||||
|  |  | ||||||
|  |     println!("Contract 1 assigned ID: {}", contract1_id); | ||||||
|  |     println!("Contract 2 assigned ID: {}", contract2_id); | ||||||
|  |     println!("Contract 3 assigned ID: {}", contract3_id); | ||||||
|  |     println!("Contract 4 assigned ID: {}", contract4_id); | ||||||
|  |  | ||||||
|  |     // Print all contracts retrieved from database | ||||||
|  |     println!("\n--- Contracts Retrieved from Database ---"); | ||||||
|  |     println!("\n1. Web hosting contract:"); | ||||||
|  |     print_contract_details(&db_contract1); | ||||||
|  |  | ||||||
|  |     println!("\n2. Database hosting contract:"); | ||||||
|  |     print_contract_details(&db_contract2); | ||||||
|  |  | ||||||
|  |     println!("\n3. ML training contract (paused):"); | ||||||
|  |     print_contract_details(&db_contract3); | ||||||
|  |  | ||||||
|  |     println!("\n4. Enterprise multi-slice contract:"); | ||||||
|  |     print_contract_details(&db_contract4); | ||||||
|  |  | ||||||
|  |     // Demonstrate different ways to retrieve contracts from the database | ||||||
|  |  | ||||||
|  |     // 1. Retrieve by customer ID index | ||||||
|  |     println!("\n--- Retrieving Contracts by Different Methods ---"); | ||||||
|  |     println!("\n1. By Customer ID Index (Customer 202):"); | ||||||
|  |     let customer_contracts = db | ||||||
|  |         .collection::<Contract>() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .get::<customer_id, _>(&202u32) | ||||||
|  |         .expect("can load contracts by customer"); | ||||||
|  |  | ||||||
|  |     assert_eq!(customer_contracts.len(), 1); | ||||||
|  |     print_contract_details(&customer_contracts[0]); | ||||||
|  |  | ||||||
|  |     // 2. Update contract status | ||||||
|  |     println!("\n2. Resuming Paused Contract:"); | ||||||
|  |     let mut updated_contract = db_contract3.clone(); | ||||||
|  |     updated_contract.status = ContractStatus::Active; | ||||||
|  |  | ||||||
|  |     let (_, resumed_contract) = db | ||||||
|  |         .collection::<Contract>() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&updated_contract) | ||||||
|  |         .expect("can update contract"); | ||||||
|  |  | ||||||
|  |     println!("Updated contract status to Active:"); | ||||||
|  |     print_contract_details(&resumed_contract); | ||||||
|  |  | ||||||
|  |     // 3. Cancel a contract | ||||||
|  |     println!("\n3. Cancelling a Contract:"); | ||||||
|  |     let mut cancelled_contract = db_contract1.clone(); | ||||||
|  |     cancelled_contract.status = ContractStatus::Cancelled; | ||||||
|  |  | ||||||
|  |     let (_, final_contract) = db | ||||||
|  |         .collection::<Contract>() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .set(&cancelled_contract) | ||||||
|  |         .expect("can update contract"); | ||||||
|  |  | ||||||
|  |     println!("Cancelled contract:"); | ||||||
|  |     print_contract_details(&final_contract); | ||||||
|  |  | ||||||
|  |     // Show remaining active contracts | ||||||
|  |     let all_contracts = db | ||||||
|  |         .collection::<Contract>() | ||||||
|  |         .expect("can open contract collection") | ||||||
|  |         .get_all() | ||||||
|  |         .expect("can load all contracts"); | ||||||
|  |  | ||||||
|  |     println!("\n--- Contract Analytics ---"); | ||||||
|  |     let active_contracts: Vec<_> = all_contracts.iter() | ||||||
|  |         .filter(|c| matches!(c.status, ContractStatus::Active)) | ||||||
|  |         .collect(); | ||||||
|  |     let paused_contracts: Vec<_> = all_contracts.iter() | ||||||
|  |         .filter(|c| matches!(c.status, ContractStatus::Paused)) | ||||||
|  |         .collect(); | ||||||
|  |     let cancelled_contracts: Vec<_> = all_contracts.iter() | ||||||
|  |         .filter(|c| matches!(c.status, ContractStatus::Cancelled)) | ||||||
|  |         .collect(); | ||||||
|  |  | ||||||
|  |     println!("Total Contracts: {}", all_contracts.len()); | ||||||
|  |     println!("Active Contracts: {}", active_contracts.len()); | ||||||
|  |     println!("Paused Contracts: {}", paused_contracts.len()); | ||||||
|  |     println!("Cancelled Contracts: {}", cancelled_contracts.len()); | ||||||
|  |  | ||||||
|  |     // Calculate total provisioned resources | ||||||
|  |     let total_compute_slices: usize = all_contracts.iter().map(|c| c.compute_slices.len()).sum(); | ||||||
|  |     let total_storage_slices: usize = all_contracts.iter().map(|c| c.storage_slices.len()).sum(); | ||||||
|  |     let total_memory_gb: f64 = all_contracts.iter() | ||||||
|  |         .flat_map(|c| &c.compute_slices) | ||||||
|  |         .map(|s| s.mem_gb) | ||||||
|  |         .sum(); | ||||||
|  |     let total_storage_gb: i32 = all_contracts.iter() | ||||||
|  |         .flat_map(|c| &c.storage_slices) | ||||||
|  |         .map(|s| s.storage_size_gb) | ||||||
|  |         .sum(); | ||||||
|  |  | ||||||
|  |     println!("\nProvisioned Resources:"); | ||||||
|  |     println!("  Total Compute Slices: {}", total_compute_slices); | ||||||
|  |     println!("  Total Storage Slices: {}", total_storage_slices); | ||||||
|  |     println!("  Total Memory: {:.1} GB", total_memory_gb); | ||||||
|  |     println!("  Total Storage: {} GB", total_storage_gb); | ||||||
|  |  | ||||||
|  |     // Calculate average pricing | ||||||
|  |     let avg_compute_price: f64 = all_contracts.iter().map(|c| c.compute_slice_price).sum::<f64>() / all_contracts.len() as f64; | ||||||
|  |     let avg_storage_price: f64 = all_contracts.iter().map(|c| c.storage_slice_price).sum::<f64>() / all_contracts.len() as f64; | ||||||
|  |     let avg_network_price: f64 = all_contracts.iter().map(|c| c.network_slice_price).sum::<f64>() / all_contracts.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("\nAverage Pricing:"); | ||||||
|  |     println!("  Compute: ${:.3} per slice", avg_compute_price); | ||||||
|  |     println!("  Storage: ${:.3} per slice", avg_storage_price); | ||||||
|  |     println!("  Network: ${:.3} per slice", avg_network_price); | ||||||
|  |  | ||||||
|  |     println!("\n--- Model Information ---"); | ||||||
|  |     println!("Contract DB Prefix: {}", Contract::db_prefix()); | ||||||
|  | } | ||||||
							
								
								
									
										12
									
								
								heromodels/examples/grid4_example/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								heromodels/examples/grid4_example/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | |||||||
|  | # Grid4 Node Example (OurDB) | ||||||
|  |  | ||||||
|  | This example demonstrates how to use the Grid4 `Node` model against the embedded OurDB backend. | ||||||
|  |  | ||||||
|  | - Creates an in-memory/on-disk OurDB under `/tmp`. | ||||||
|  | - Demonstrates CRUD and simple index lookups on `country`, `nodegroupid`, and `pubkey`. | ||||||
|  |  | ||||||
|  | Run it: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | cargo run -p heromodels --example grid4_example | ||||||
|  | ``` | ||||||
							
								
								
									
										66
									
								
								heromodels/examples/grid4_example/example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								heromodels/examples/grid4_example/example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,66 @@ | |||||||
|  | use heromodels::db::hero::OurDB; | ||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey}; | ||||||
|  | use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node}; | ||||||
|  | use std::sync::Arc; | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a temp OurDB | ||||||
|  |     let ts = std::time::SystemTime::now() | ||||||
|  |         .duration_since(std::time::UNIX_EPOCH) | ||||||
|  |         .unwrap() | ||||||
|  |         .as_nanos(); | ||||||
|  |     let path = format!("/tmp/grid4_example_{}", ts); | ||||||
|  |     let _ = std::fs::remove_dir_all(&path); | ||||||
|  |     let db = Arc::new(OurDB::new(&path, true).expect("create OurDB")); | ||||||
|  |  | ||||||
|  |     let nodes = db.collection::<Node>().expect("open node collection"); | ||||||
|  |  | ||||||
|  |     // Build a node | ||||||
|  |     let cs = ComputeSlice::new() | ||||||
|  |         .nodeid(1) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(64.0) | ||||||
|  |         .storage_gb(1024.0) | ||||||
|  |         .passmark(8000) | ||||||
|  |         .vcores(24) | ||||||
|  |         .gpus(2) | ||||||
|  |         .price_cc(0.5); | ||||||
|  |  | ||||||
|  |     let dev = DeviceInfo { | ||||||
|  |         vendor: "ACME".into(), | ||||||
|  |         ..Default::default() | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     let n = Node::new() | ||||||
|  |         .nodegroupid(7) | ||||||
|  |         .uptime(98) | ||||||
|  |         .add_compute_slice(cs) | ||||||
|  |         .devices(dev) | ||||||
|  |         .country("BE") | ||||||
|  |         .pubkey("PUB_NODE_X") | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     // Store | ||||||
|  |     let (id, stored) = nodes.set(&n).expect("store node"); | ||||||
|  |     println!("Stored node id={id} pubkey={} country={}", stored.pubkey, stored.country); | ||||||
|  |  | ||||||
|  |     // Query by indexes | ||||||
|  |     let by_country = nodes.get::<country, _>("BE").expect("query country"); | ||||||
|  |     println!("Found {} nodes in country=BE", by_country.len()); | ||||||
|  |  | ||||||
|  |     let by_group = nodes.get::<nodegroupid, _>(&7).expect("query group"); | ||||||
|  |     println!("Found {} nodes in group=7", by_group.len()); | ||||||
|  |  | ||||||
|  |     let by_key = nodes.get::<pubkey, _>("PUB_NODE_X").expect("query pubkey"); | ||||||
|  |     println!("Found {} with pubkey PUB_NODE_X", by_key.len()); | ||||||
|  |  | ||||||
|  |     // Update | ||||||
|  |     let updated = stored.clone().country("NL"); | ||||||
|  |     let (_, back) = nodes.set(&updated).expect("update node"); | ||||||
|  |     println!("Updated node country={}", back.country); | ||||||
|  |  | ||||||
|  |     // Delete | ||||||
|  |     nodes.delete_by_id(id).expect("delete node"); | ||||||
|  |     println!("Deleted node id={id}"); | ||||||
|  | } | ||||||
							
								
								
									
										390
									
								
								heromodels/examples/grid4_node_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										390
									
								
								heromodels/examples/grid4_node_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,390 @@ | |||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::{Node, NodeDevice, ComputeSlice, StorageSlice}; | ||||||
|  | use heromodels::models::grid4::node::node_index::{nodegroupid, country}; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Helper function to print node details | ||||||
|  | fn print_node_details(node: &Node) { | ||||||
|  |     println!("\n--- Node Details ---"); | ||||||
|  |     println!("ID: {}", node.get_id()); | ||||||
|  |     println!("NodeGroup ID: {}", node.nodegroupid); | ||||||
|  |     println!("Uptime: {}%", node.uptime); | ||||||
|  |     println!("Country: {}", node.country); | ||||||
|  |     println!("Birth Time: {}", node.birthtime); | ||||||
|  |     println!("Public Key: {}", node.pubkey); | ||||||
|  |     println!("Compute Slices: {}", node.computeslices.len()); | ||||||
|  |     println!("Storage Slices: {}", node.storageslices.len()); | ||||||
|  |     println!("Created At: {}", node.base_data.created_at); | ||||||
|  |     println!("Modified At: {}", node.base_data.modified_at); | ||||||
|  |      | ||||||
|  |     // Print capacity details | ||||||
|  |     println!("  Capacity:"); | ||||||
|  |     println!("    Storage: {:.1} GB", node.capacity.storage_gb); | ||||||
|  |     println!("    Memory: {:.1} GB", node.capacity.mem_gb); | ||||||
|  |     println!("    GPU Memory: {:.1} GB", node.capacity.mem_gb_gpu); | ||||||
|  |     println!("    Passmark: {}", node.capacity.passmark); | ||||||
|  |     println!("    vCores: {}", node.capacity.vcores); | ||||||
|  |      | ||||||
|  |     // Print device info | ||||||
|  |     println!("  Devices:"); | ||||||
|  |     println!("    Vendor: {}", node.devices.vendor); | ||||||
|  |     println!("    CPUs: {}", node.devices.cpu.len()); | ||||||
|  |     println!("    GPUs: {}", node.devices.gpu.len()); | ||||||
|  |     println!("    Memory: {}", node.devices.memory.len()); | ||||||
|  |     println!("    Storage: {}", node.devices.storage.len()); | ||||||
|  |     println!("    Network: {}", node.devices.network.len()); | ||||||
|  |      | ||||||
|  |     // Print compute slices | ||||||
|  |     if !node.computeslices.is_empty() { | ||||||
|  |         println!("  Compute Slices:"); | ||||||
|  |         for (i, slice) in node.computeslices.iter().enumerate() { | ||||||
|  |             println!("    {}. ID: {}, Memory: {:.1}GB, Storage: {:.1}GB, vCores: {}, GPUs: {}",  | ||||||
|  |                 i + 1, slice.id, slice.mem_gb, slice.storage_gb, slice.vcores, slice.gpus); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     // Print storage slices | ||||||
|  |     if !node.storageslices.is_empty() { | ||||||
|  |         println!("  Storage Slices:"); | ||||||
|  |         for (i, slice) in node.storageslices.iter().enumerate() { | ||||||
|  |             println!("    {}. ID: {}", i + 1, slice.id); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a new DB instance in /tmp/grid4_nodes_db, and reset before every run | ||||||
|  |     let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodes_db", true).expect("Can create DB"); | ||||||
|  |  | ||||||
|  |     println!("Grid4 Node Models - Basic Usage Example"); | ||||||
|  |     println!("======================================"); | ||||||
|  |  | ||||||
|  |     // Create device components for nodes | ||||||
|  |      | ||||||
|  |     // CPU devices | ||||||
|  |     let cpu1 = CPUDevice { | ||||||
|  |         id: "cpu_intel_i7_12700k".to_string(), | ||||||
|  |         cores: 12, | ||||||
|  |         passmark: 28500, | ||||||
|  |         description: "Intel Core i7-12700K".to_string(), | ||||||
|  |         cpu_brand: "Intel".to_string(), | ||||||
|  |         cpu_version: "12th Gen".to_string(), | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let cpu2 = CPUDevice { | ||||||
|  |         id: "cpu_amd_ryzen_9_5900x".to_string(), | ||||||
|  |         cores: 12, | ||||||
|  |         passmark: 32000, | ||||||
|  |         description: "AMD Ryzen 9 5900X".to_string(), | ||||||
|  |         cpu_brand: "AMD".to_string(), | ||||||
|  |         cpu_version: "Zen 3".to_string(), | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // GPU devices | ||||||
|  |     let gpu1 = GPUDevice { | ||||||
|  |         id: "gpu_rtx_3080".to_string(), | ||||||
|  |         cores: 8704, | ||||||
|  |         memory_gb: 10.0, | ||||||
|  |         description: "NVIDIA GeForce RTX 3080".to_string(), | ||||||
|  |         gpu_brand: "NVIDIA".to_string(), | ||||||
|  |         gpu_version: "RTX 30 Series".to_string(), | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let gpu2 = GPUDevice { | ||||||
|  |         id: "gpu_rtx_4090".to_string(), | ||||||
|  |         cores: 16384, | ||||||
|  |         memory_gb: 24.0, | ||||||
|  |         description: "NVIDIA GeForce RTX 4090".to_string(), | ||||||
|  |         gpu_brand: "NVIDIA".to_string(), | ||||||
|  |         gpu_version: "RTX 40 Series".to_string(), | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Memory devices | ||||||
|  |     let memory1 = MemoryDevice { | ||||||
|  |         id: "mem_ddr4_32gb".to_string(), | ||||||
|  |         size_gb: 32.0, | ||||||
|  |         description: "DDR4-3200 32GB Kit".to_string(), | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let memory2 = MemoryDevice { | ||||||
|  |         id: "mem_ddr5_64gb".to_string(), | ||||||
|  |         size_gb: 64.0, | ||||||
|  |         description: "DDR5-5600 64GB Kit".to_string(), | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Storage devices | ||||||
|  |     let storage1 = StorageDevice { | ||||||
|  |         id: "ssd_nvme_1tb".to_string(), | ||||||
|  |         size_gb: 1000.0, | ||||||
|  |         description: "NVMe SSD 1TB".to_string(), | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let storage2 = StorageDevice { | ||||||
|  |         id: "hdd_sata_4tb".to_string(), | ||||||
|  |         size_gb: 4000.0, | ||||||
|  |         description: "SATA HDD 4TB".to_string(), | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Network devices | ||||||
|  |     let network1 = NetworkDevice { | ||||||
|  |         id: "eth_1gbit".to_string(), | ||||||
|  |         speed_mbps: 1000, | ||||||
|  |         description: "Gigabit Ethernet".to_string(), | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let network2 = NetworkDevice { | ||||||
|  |         id: "eth_10gbit".to_string(), | ||||||
|  |         speed_mbps: 10000, | ||||||
|  |         description: "10 Gigabit Ethernet".to_string(), | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Create device info configurations | ||||||
|  |     let devices1 = DeviceInfo { | ||||||
|  |         vendor: "Dell".to_string(), | ||||||
|  |         cpu: vec![cpu1.clone()], | ||||||
|  |         gpu: vec![gpu1.clone()], | ||||||
|  |         memory: vec![memory1.clone()], | ||||||
|  |         storage: vec![storage1.clone(), storage2.clone()], | ||||||
|  |         network: vec![network1.clone()], | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let devices2 = DeviceInfo { | ||||||
|  |         vendor: "HP".to_string(), | ||||||
|  |         cpu: vec![cpu2.clone()], | ||||||
|  |         gpu: vec![gpu2.clone()], | ||||||
|  |         memory: vec![memory2.clone()], | ||||||
|  |         storage: vec![storage1.clone()], | ||||||
|  |         network: vec![network2.clone()], | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Create node capacities | ||||||
|  |     let capacity1 = NodeCapacity { | ||||||
|  |         storage_gb: 5000.0, | ||||||
|  |         mem_gb: 32.0, | ||||||
|  |         mem_gb_gpu: 10.0, | ||||||
|  |         passmark: 28500, | ||||||
|  |         vcores: 24, | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let capacity2 = NodeCapacity { | ||||||
|  |         storage_gb: 1000.0, | ||||||
|  |         mem_gb: 64.0, | ||||||
|  |         mem_gb_gpu: 24.0, | ||||||
|  |         passmark: 32000, | ||||||
|  |         vcores: 24, | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Create compute slices | ||||||
|  |     let compute_slice1 = ComputeSlice::new() | ||||||
|  |         .id(1) | ||||||
|  |         .mem_gb(4.0) | ||||||
|  |         .storage_gb(100.0) | ||||||
|  |         .passmark(3000) | ||||||
|  |         .vcores(2) | ||||||
|  |         .cpu_oversubscription(150) | ||||||
|  |         .storage_oversubscription(120) | ||||||
|  |         .gpus(0); | ||||||
|  |      | ||||||
|  |     let compute_slice2 = ComputeSlice::new() | ||||||
|  |         .id(2) | ||||||
|  |         .mem_gb(8.0) | ||||||
|  |         .storage_gb(200.0) | ||||||
|  |         .passmark(6000) | ||||||
|  |         .vcores(4) | ||||||
|  |         .cpu_oversubscription(130) | ||||||
|  |         .storage_oversubscription(110) | ||||||
|  |         .gpus(1); | ||||||
|  |      | ||||||
|  |     let compute_slice3 = ComputeSlice::new() | ||||||
|  |         .id(1) | ||||||
|  |         .mem_gb(16.0) | ||||||
|  |         .storage_gb(400.0) | ||||||
|  |         .passmark(12000) | ||||||
|  |         .vcores(8) | ||||||
|  |         .cpu_oversubscription(110) | ||||||
|  |         .storage_oversubscription(100) | ||||||
|  |         .gpus(1); | ||||||
|  |  | ||||||
|  |     // Create storage slices | ||||||
|  |     let storage_slice1 = StorageSlice::new().id(1); | ||||||
|  |     let storage_slice2 = StorageSlice::new().id(2); | ||||||
|  |     let storage_slice3 = StorageSlice::new().id(3); | ||||||
|  |  | ||||||
|  |     // Create nodes with different configurations | ||||||
|  |  | ||||||
|  |     // Node 1 - Web hosting node | ||||||
|  |     let node1 = Node::new() | ||||||
|  |         .nodegroupid(1001) | ||||||
|  |         .uptime(98) | ||||||
|  |         .add_compute_slice(compute_slice1.clone()) | ||||||
|  |         .add_compute_slice(compute_slice2.clone()) | ||||||
|  |         .add_storage_slice(storage_slice1.clone()) | ||||||
|  |         .add_storage_slice(storage_slice2.clone()) | ||||||
|  |         .devices(devices1.clone()) | ||||||
|  |         .country("US".to_string()) | ||||||
|  |         .capacity(capacity1.clone()) | ||||||
|  |         .birthtime(1640995200) // 2022-01-01 | ||||||
|  |         .pubkey("node1_pubkey_abc123xyz789".to_string()) | ||||||
|  |         .signature_node("node1_signature_def456".to_string()) | ||||||
|  |         .signature_farmer("farmer1_signature_ghi789".to_string()); | ||||||
|  |  | ||||||
|  |     // Node 2 - High-performance computing node | ||||||
|  |     let node2 = Node::new() | ||||||
|  |         .nodegroupid(1002) | ||||||
|  |         .uptime(99) | ||||||
|  |         .add_compute_slice(compute_slice3.clone()) | ||||||
|  |         .add_storage_slice(storage_slice3.clone()) | ||||||
|  |         .devices(devices2.clone()) | ||||||
|  |         .country("DE".to_string()) | ||||||
|  |         .capacity(capacity2.clone()) | ||||||
|  |         .birthtime(1672531200) // 2023-01-01 | ||||||
|  |         .pubkey("node2_pubkey_jkl012mno345".to_string()) | ||||||
|  |         .signature_node("node2_signature_pqr678".to_string()) | ||||||
|  |         .signature_farmer("farmer2_signature_stu901".to_string()); | ||||||
|  |  | ||||||
|  |     // Node 3 - Storage-focused node | ||||||
|  |     let node3 = Node::new() | ||||||
|  |         .nodegroupid(1001) | ||||||
|  |         .uptime(95) | ||||||
|  |         .add_storage_slice(storage_slice1.clone()) | ||||||
|  |         .add_storage_slice(storage_slice2.clone()) | ||||||
|  |         .add_storage_slice(storage_slice3.clone()) | ||||||
|  |         .devices(devices1.clone()) | ||||||
|  |         .country("NL".to_string()) | ||||||
|  |         .capacity(capacity1.clone()) | ||||||
|  |         .birthtime(1704067200) // 2024-01-01 | ||||||
|  |         .pubkey("node3_pubkey_vwx234yzab567".to_string()) | ||||||
|  |         .signature_node("node3_signature_cde890".to_string()) | ||||||
|  |         .signature_farmer("farmer1_signature_fgh123".to_string()); | ||||||
|  |  | ||||||
|  |     // Save all nodes to database and get their assigned IDs and updated models | ||||||
|  |     let (node1_id, db_node1) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .set(&node1) | ||||||
|  |         .expect("can set node"); | ||||||
|  |     let (node2_id, db_node2) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .set(&node2) | ||||||
|  |         .expect("can set node"); | ||||||
|  |     let (node3_id, db_node3) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .set(&node3) | ||||||
|  |         .expect("can set node"); | ||||||
|  |  | ||||||
|  |     println!("Node 1 assigned ID: {}", node1_id); | ||||||
|  |     println!("Node 2 assigned ID: {}", node2_id); | ||||||
|  |     println!("Node 3 assigned ID: {}", node3_id); | ||||||
|  |  | ||||||
|  |     // Print all nodes retrieved from database | ||||||
|  |     println!("\n--- Nodes Retrieved from Database ---"); | ||||||
|  |     println!("\n1. Web hosting node:"); | ||||||
|  |     print_node_details(&db_node1); | ||||||
|  |  | ||||||
|  |     println!("\n2. High-performance computing node:"); | ||||||
|  |     print_node_details(&db_node2); | ||||||
|  |  | ||||||
|  |     println!("\n3. Storage-focused node:"); | ||||||
|  |     print_node_details(&db_node3); | ||||||
|  |  | ||||||
|  |     // Demonstrate different ways to retrieve nodes from the database | ||||||
|  |  | ||||||
|  |     // 1. Retrieve by nodegroup ID index | ||||||
|  |     println!("\n--- Retrieving Nodes by Different Methods ---"); | ||||||
|  |     println!("\n1. By NodeGroup ID Index (NodeGroup 1001):"); | ||||||
|  |     let nodegroup_nodes = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .get::<nodegroupid, _>(&1001i32) | ||||||
|  |         .expect("can load nodes by nodegroup"); | ||||||
|  |  | ||||||
|  |     assert_eq!(nodegroup_nodes.len(), 2); | ||||||
|  |     for (i, node) in nodegroup_nodes.iter().enumerate() { | ||||||
|  |         println!("  Node {}: ID {}, Country: {}, Uptime: {}%",  | ||||||
|  |             i + 1, node.get_id(), node.country, node.uptime); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // 2. Retrieve by country index | ||||||
|  |     println!("\n2. By Country Index (Germany - DE):"); | ||||||
|  |     let country_nodes = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .get::<country, _>("DE") | ||||||
|  |         .expect("can load nodes by country"); | ||||||
|  |  | ||||||
|  |     assert_eq!(country_nodes.len(), 1); | ||||||
|  |     print_node_details(&country_nodes[0]); | ||||||
|  |  | ||||||
|  |     // 3. Update node uptime | ||||||
|  |     println!("\n3. Updating Node Uptime:"); | ||||||
|  |     let mut updated_node = db_node1.clone(); | ||||||
|  |     updated_node.uptime = 99; | ||||||
|  |  | ||||||
|  |     let (_, uptime_updated_node) = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .set(&updated_node) | ||||||
|  |         .expect("can update node"); | ||||||
|  |  | ||||||
|  |     println!("Updated node uptime to 99%:"); | ||||||
|  |     println!("  Node ID: {}, New Uptime: {}%", uptime_updated_node.get_id(), uptime_updated_node.uptime); | ||||||
|  |  | ||||||
|  |     // Show all nodes and calculate analytics | ||||||
|  |     let all_nodes = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .get_all() | ||||||
|  |         .expect("can load all nodes"); | ||||||
|  |  | ||||||
|  |     println!("\n--- Node Analytics ---"); | ||||||
|  |     println!("Total Nodes: {}", all_nodes.len()); | ||||||
|  |  | ||||||
|  |     // Calculate total capacity | ||||||
|  |     let total_storage_gb: f64 = all_nodes.iter().map(|n| n.capacity.storage_gb).sum(); | ||||||
|  |     let total_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb).sum(); | ||||||
|  |     let total_gpu_memory_gb: f64 = all_nodes.iter().map(|n| n.capacity.mem_gb_gpu).sum(); | ||||||
|  |     let total_vcores: i32 = all_nodes.iter().map(|n| n.capacity.vcores).sum(); | ||||||
|  |     let avg_uptime: f64 = all_nodes.iter().map(|n| n.uptime as f64).sum::<f64>() / all_nodes.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("Total Capacity:"); | ||||||
|  |     println!("  Storage: {:.1} GB", total_storage_gb); | ||||||
|  |     println!("  Memory: {:.1} GB", total_memory_gb); | ||||||
|  |     println!("  GPU Memory: {:.1} GB", total_gpu_memory_gb); | ||||||
|  |     println!("  vCores: {}", total_vcores); | ||||||
|  |     println!("  Average Uptime: {:.1}%", avg_uptime); | ||||||
|  |  | ||||||
|  |     // Count nodes by country | ||||||
|  |     let mut country_counts = std::collections::HashMap::new(); | ||||||
|  |     for node in &all_nodes { | ||||||
|  |         *country_counts.entry(&node.country).or_insert(0) += 1; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\nNodes by Country:"); | ||||||
|  |     for (country, count) in country_counts { | ||||||
|  |         println!("  {}: {}", country, count); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Count total slices | ||||||
|  |     let total_compute_slices: usize = all_nodes.iter().map(|n| n.computeslices.len()).sum(); | ||||||
|  |     let total_storage_slices: usize = all_nodes.iter().map(|n| n.storageslices.len()).sum(); | ||||||
|  |  | ||||||
|  |     println!("\nTotal Slices:"); | ||||||
|  |     println!("  Compute Slices: {}", total_compute_slices); | ||||||
|  |     println!("  Storage Slices: {}", total_storage_slices); | ||||||
|  |  | ||||||
|  |     // Vendor distribution | ||||||
|  |     let mut vendor_counts = std::collections::HashMap::new(); | ||||||
|  |     for node in &all_nodes { | ||||||
|  |         *vendor_counts.entry(&node.devices.vendor).or_insert(0) += 1; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\nNodes by Vendor:"); | ||||||
|  |     for (vendor, count) in vendor_counts { | ||||||
|  |         println!("  {}: {}", vendor, count); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n--- Model Information ---"); | ||||||
|  |     println!("Node DB Prefix: {}", Node::db_prefix()); | ||||||
|  | } | ||||||
							
								
								
									
										284
									
								
								heromodels/examples/grid4_nodegroup_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										284
									
								
								heromodels/examples/grid4_nodegroup_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,284 @@ | |||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::{NodeGroup, PricingPolicy, SLAPolicy}; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Helper function to print nodegroup details | ||||||
|  | fn print_nodegroup_details(nodegroup: &NodeGroup) { | ||||||
|  |     println!("\n--- NodeGroup Details ---"); | ||||||
|  |     println!("ID: {}", nodegroup.get_id()); | ||||||
|  |     println!("Farmer ID: {}", nodegroup.farmerid); | ||||||
|  |     println!("Description: {}", nodegroup.description); | ||||||
|  |     println!("Secret: {}", nodegroup.secret); | ||||||
|  |     println!("Compute Slice Pricing (CC): {:.4}", nodegroup.compute_slice_normalized_pricing_cc); | ||||||
|  |     println!("Storage Slice Pricing (CC): {:.4}", nodegroup.storage_slice_normalized_pricing_cc); | ||||||
|  |     println!("Signature Farmer: {}", nodegroup.signature_farmer); | ||||||
|  |     println!("Created At: {}", nodegroup.base_data.created_at); | ||||||
|  |     println!("Modified At: {}", nodegroup.base_data.modified_at); | ||||||
|  |      | ||||||
|  |     // Print SLA Policy details | ||||||
|  |     println!("  SLA Policy:"); | ||||||
|  |     println!("    Uptime: {}%", nodegroup.slapolicy.sla_uptime); | ||||||
|  |     println!("    Bandwidth: {} Mbit/s", nodegroup.slapolicy.sla_bandwidth_mbit); | ||||||
|  |     println!("    Penalty: {}%", nodegroup.slapolicy.sla_penalty); | ||||||
|  |      | ||||||
|  |     // Print Pricing Policy details | ||||||
|  |     println!("  Pricing Policy:"); | ||||||
|  |     println!("    Marketplace Year Discounts: {:?}%", nodegroup.pricingpolicy.marketplace_year_discounts); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a new DB instance in /tmp/grid4_nodegroups_db, and reset before every run | ||||||
|  |     let db = heromodels::db::hero::OurDB::new("/tmp/grid4_nodegroups_db", true).expect("Can create DB"); | ||||||
|  |  | ||||||
|  |     println!("Grid4 NodeGroup Models - Basic Usage Example"); | ||||||
|  |     println!("==========================================="); | ||||||
|  |  | ||||||
|  |     // Create SLA policies | ||||||
|  |     let sla_policy_premium = SLAPolicy { | ||||||
|  |         sla_uptime: 99, | ||||||
|  |         sla_bandwidth_mbit: 1000, | ||||||
|  |         sla_penalty: 200, | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let sla_policy_standard = SLAPolicy { | ||||||
|  |         sla_uptime: 95, | ||||||
|  |         sla_bandwidth_mbit: 100, | ||||||
|  |         sla_penalty: 100, | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let sla_policy_basic = SLAPolicy { | ||||||
|  |         sla_uptime: 90, | ||||||
|  |         sla_bandwidth_mbit: 50, | ||||||
|  |         sla_penalty: 50, | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Create pricing policies | ||||||
|  |     let pricing_policy_aggressive = PricingPolicy { | ||||||
|  |         marketplace_year_discounts: vec![40, 50, 60], | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let pricing_policy_standard = PricingPolicy { | ||||||
|  |         marketplace_year_discounts: vec![30, 40, 50], | ||||||
|  |     }; | ||||||
|  |      | ||||||
|  |     let pricing_policy_conservative = PricingPolicy { | ||||||
|  |         marketplace_year_discounts: vec![20, 30, 40], | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Create nodegroups with different configurations | ||||||
|  |  | ||||||
|  |     // NodeGroup 1 - Premium hosting provider | ||||||
|  |     let nodegroup1 = NodeGroup::new() | ||||||
|  |         .farmerid(501) | ||||||
|  |         .secret("encrypted_boot_secret_premium_abc123".to_string()) | ||||||
|  |         .description("Premium hosting with 99% uptime SLA and high-speed connectivity".to_string()) | ||||||
|  |         .slapolicy(sla_policy_premium.clone()) | ||||||
|  |         .pricingpolicy(pricing_policy_aggressive.clone()) | ||||||
|  |         .compute_slice_normalized_pricing_cc(0.0450) | ||||||
|  |         .storage_slice_normalized_pricing_cc(0.0180) | ||||||
|  |         .signature_farmer("farmer_501_premium_signature_xyz789".to_string()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 2 - Standard business provider | ||||||
|  |     let nodegroup2 = NodeGroup::new() | ||||||
|  |         .farmerid(502) | ||||||
|  |         .secret("encrypted_boot_secret_standard_def456".to_string()) | ||||||
|  |         .description("Standard business hosting with reliable performance".to_string()) | ||||||
|  |         .slapolicy(sla_policy_standard.clone()) | ||||||
|  |         .pricingpolicy(pricing_policy_standard.clone()) | ||||||
|  |         .compute_slice_normalized_pricing_cc(0.0350) | ||||||
|  |         .storage_slice_normalized_pricing_cc(0.0150) | ||||||
|  |         .signature_farmer("farmer_502_standard_signature_uvw012".to_string()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 3 - Budget-friendly provider | ||||||
|  |     let nodegroup3 = NodeGroup::new() | ||||||
|  |         .farmerid(503) | ||||||
|  |         .secret("encrypted_boot_secret_budget_ghi789".to_string()) | ||||||
|  |         .description("Cost-effective hosting for development and testing".to_string()) | ||||||
|  |         .slapolicy(sla_policy_basic.clone()) | ||||||
|  |         .pricingpolicy(pricing_policy_conservative.clone()) | ||||||
|  |         .compute_slice_normalized_pricing_cc(0.0250) | ||||||
|  |         .storage_slice_normalized_pricing_cc(0.0120) | ||||||
|  |         .signature_farmer("farmer_503_budget_signature_rst345".to_string()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 4 - Enterprise provider | ||||||
|  |     let nodegroup4 = NodeGroup::new() | ||||||
|  |         .farmerid(504) | ||||||
|  |         .secret("encrypted_boot_secret_enterprise_jkl012".to_string()) | ||||||
|  |         .description("Enterprise-grade infrastructure with maximum reliability".to_string()) | ||||||
|  |         .slapolicy(sla_policy_premium.clone()) | ||||||
|  |         .pricingpolicy(pricing_policy_standard.clone()) | ||||||
|  |         .compute_slice_normalized_pricing_cc(0.0500) | ||||||
|  |         .storage_slice_normalized_pricing_cc(0.0200) | ||||||
|  |         .signature_farmer("farmer_504_enterprise_signature_mno678".to_string()); | ||||||
|  |  | ||||||
|  |     // Save all nodegroups to database and get their assigned IDs and updated models | ||||||
|  |     let (nodegroup1_id, db_nodegroup1) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&nodegroup1) | ||||||
|  |         .expect("can set nodegroup"); | ||||||
|  |     let (nodegroup2_id, db_nodegroup2) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&nodegroup2) | ||||||
|  |         .expect("can set nodegroup"); | ||||||
|  |     let (nodegroup3_id, db_nodegroup3) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&nodegroup3) | ||||||
|  |         .expect("can set nodegroup"); | ||||||
|  |     let (nodegroup4_id, db_nodegroup4) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&nodegroup4) | ||||||
|  |         .expect("can set nodegroup"); | ||||||
|  |  | ||||||
|  |     println!("NodeGroup 1 assigned ID: {}", nodegroup1_id); | ||||||
|  |     println!("NodeGroup 2 assigned ID: {}", nodegroup2_id); | ||||||
|  |     println!("NodeGroup 3 assigned ID: {}", nodegroup3_id); | ||||||
|  |     println!("NodeGroup 4 assigned ID: {}", nodegroup4_id); | ||||||
|  |  | ||||||
|  |     // Print all nodegroups retrieved from database | ||||||
|  |     println!("\n--- NodeGroups Retrieved from Database ---"); | ||||||
|  |     println!("\n1. Premium hosting provider:"); | ||||||
|  |     print_nodegroup_details(&db_nodegroup1); | ||||||
|  |  | ||||||
|  |     println!("\n2. Standard business provider:"); | ||||||
|  |     print_nodegroup_details(&db_nodegroup2); | ||||||
|  |  | ||||||
|  |     println!("\n3. Budget-friendly provider:"); | ||||||
|  |     print_nodegroup_details(&db_nodegroup3); | ||||||
|  |  | ||||||
|  |     println!("\n4. Enterprise provider:"); | ||||||
|  |     print_nodegroup_details(&db_nodegroup4); | ||||||
|  |  | ||||||
|  |     // Demonstrate different ways to retrieve nodegroups from the database | ||||||
|  |  | ||||||
|  |     // 1. Retrieve by farmer ID index | ||||||
|  |     println!("\n--- Retrieving NodeGroups by Different Methods ---"); | ||||||
|  |     println!("\n1. By Farmer ID Index (Farmer 502):"); | ||||||
|  |     let farmer_nodegroups = db | ||||||
|  |         .collection::<NodeGroup>() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .get_by_index("farmerid", &502u32) | ||||||
|  |         .expect("can load nodegroups by farmer"); | ||||||
|  |  | ||||||
|  |     assert_eq!(farmer_nodegroups.len(), 1); | ||||||
|  |     print_nodegroup_details(&farmer_nodegroups[0]); | ||||||
|  |  | ||||||
|  |     // 2. Update nodegroup pricing | ||||||
|  |     println!("\n2. Updating NodeGroup Pricing:"); | ||||||
|  |     let mut updated_nodegroup = db_nodegroup3.clone(); | ||||||
|  |     updated_nodegroup.compute_slice_normalized_pricing_cc = 0.0280; | ||||||
|  |     updated_nodegroup.storage_slice_normalized_pricing_cc = 0.0130; | ||||||
|  |  | ||||||
|  |     let (_, price_updated_nodegroup) = db | ||||||
|  |         .collection::<NodeGroup>() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&updated_nodegroup) | ||||||
|  |         .expect("can update nodegroup"); | ||||||
|  |  | ||||||
|  |     println!("Updated pricing for budget provider:"); | ||||||
|  |     println!("  Compute: {:.4} CC", price_updated_nodegroup.compute_slice_normalized_pricing_cc); | ||||||
|  |     println!("  Storage: {:.4} CC", price_updated_nodegroup.storage_slice_normalized_pricing_cc); | ||||||
|  |  | ||||||
|  |     // 3. Update SLA policy | ||||||
|  |     println!("\n3. Updating SLA Policy:"); | ||||||
|  |     let mut sla_updated_nodegroup = db_nodegroup2.clone(); | ||||||
|  |     sla_updated_nodegroup.slapolicy.sla_uptime = 98; | ||||||
|  |     sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit = 500; | ||||||
|  |  | ||||||
|  |     let (_, sla_updated_nodegroup) = db | ||||||
|  |         .collection::<NodeGroup>() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .set(&sla_updated_nodegroup) | ||||||
|  |         .expect("can update nodegroup"); | ||||||
|  |  | ||||||
|  |     println!("Updated SLA policy for standard provider:"); | ||||||
|  |     println!("  Uptime: {}%", sla_updated_nodegroup.slapolicy.sla_uptime); | ||||||
|  |     println!("  Bandwidth: {} Mbit/s", sla_updated_nodegroup.slapolicy.sla_bandwidth_mbit); | ||||||
|  |  | ||||||
|  |     // Show all nodegroups and calculate analytics | ||||||
|  |     let all_nodegroups = db | ||||||
|  |         .collection::<NodeGroup>() | ||||||
|  |         .expect("can open nodegroup collection") | ||||||
|  |         .get_all() | ||||||
|  |         .expect("can load all nodegroups"); | ||||||
|  |  | ||||||
|  |     println!("\n--- NodeGroup Analytics ---"); | ||||||
|  |     println!("Total NodeGroups: {}", all_nodegroups.len()); | ||||||
|  |  | ||||||
|  |     // Calculate pricing statistics | ||||||
|  |     let avg_compute_price: f64 = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.compute_slice_normalized_pricing_cc) | ||||||
|  |         .sum::<f64>() / all_nodegroups.len() as f64; | ||||||
|  |     let avg_storage_price: f64 = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.storage_slice_normalized_pricing_cc) | ||||||
|  |         .sum::<f64>() / all_nodegroups.len() as f64; | ||||||
|  |  | ||||||
|  |     let min_compute_price = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.compute_slice_normalized_pricing_cc) | ||||||
|  |         .fold(f64::INFINITY, f64::min); | ||||||
|  |     let max_compute_price = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.compute_slice_normalized_pricing_cc) | ||||||
|  |         .fold(f64::NEG_INFINITY, f64::max); | ||||||
|  |  | ||||||
|  |     println!("Pricing Statistics:"); | ||||||
|  |     println!("  Average Compute Price: {:.4} CC", avg_compute_price); | ||||||
|  |     println!("  Average Storage Price: {:.4} CC", avg_storage_price); | ||||||
|  |     println!("  Compute Price Range: {:.4} - {:.4} CC", min_compute_price, max_compute_price); | ||||||
|  |  | ||||||
|  |     // Calculate SLA statistics | ||||||
|  |     let avg_uptime: f64 = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.slapolicy.sla_uptime as f64) | ||||||
|  |         .sum::<f64>() / all_nodegroups.len() as f64; | ||||||
|  |     let avg_bandwidth: f64 = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.slapolicy.sla_bandwidth_mbit as f64) | ||||||
|  |         .sum::<f64>() / all_nodegroups.len() as f64; | ||||||
|  |     let avg_penalty: f64 = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.slapolicy.sla_penalty as f64) | ||||||
|  |         .sum::<f64>() / all_nodegroups.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("\nSLA Statistics:"); | ||||||
|  |     println!("  Average Uptime Guarantee: {:.1}%", avg_uptime); | ||||||
|  |     println!("  Average Bandwidth Guarantee: {:.0} Mbit/s", avg_bandwidth); | ||||||
|  |     println!("  Average Penalty Rate: {:.0}%", avg_penalty); | ||||||
|  |  | ||||||
|  |     // Count farmers | ||||||
|  |     let unique_farmers: std::collections::HashSet<_> = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.farmerid) | ||||||
|  |         .collect(); | ||||||
|  |  | ||||||
|  |     println!("\nFarmer Statistics:"); | ||||||
|  |     println!("  Unique Farmers: {}", unique_farmers.len()); | ||||||
|  |     println!("  NodeGroups per Farmer: {:.1}", all_nodegroups.len() as f64 / unique_farmers.len() as f64); | ||||||
|  |  | ||||||
|  |     // Analyze discount policies | ||||||
|  |     let total_discount_tiers: usize = all_nodegroups.iter() | ||||||
|  |         .map(|ng| ng.pricingpolicy.marketplace_year_discounts.len()) | ||||||
|  |         .sum(); | ||||||
|  |     let avg_discount_tiers: f64 = total_discount_tiers as f64 / all_nodegroups.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("\nDiscount Policy Statistics:"); | ||||||
|  |     println!("  Average Discount Tiers: {:.1}", avg_discount_tiers); | ||||||
|  |  | ||||||
|  |     // Find best value providers (high SLA, low price) | ||||||
|  |     println!("\n--- Provider Rankings ---"); | ||||||
|  |     let mut providers_with_scores: Vec<_> = all_nodegroups.iter() | ||||||
|  |         .map(|ng| { | ||||||
|  |             let value_score = (ng.slapolicy.sla_uptime as f64) / ng.compute_slice_normalized_pricing_cc; | ||||||
|  |             (ng, value_score) | ||||||
|  |         }) | ||||||
|  |         .collect(); | ||||||
|  |      | ||||||
|  |     providers_with_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); | ||||||
|  |  | ||||||
|  |     println!("Best Value Providers (Uptime/Price ratio):"); | ||||||
|  |     for (i, (ng, score)) in providers_with_scores.iter().enumerate() { | ||||||
|  |         println!("  {}. Farmer {}: {:.0} ({}% uptime, {:.4} CC)",  | ||||||
|  |             i + 1, ng.farmerid, score, ng.slapolicy.sla_uptime, ng.compute_slice_normalized_pricing_cc); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n--- Model Information ---"); | ||||||
|  |     println!("NodeGroup DB Prefix: {}", NodeGroup::db_prefix()); | ||||||
|  | } | ||||||
							
								
								
									
										311
									
								
								heromodels/examples/grid4_reputation_example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										311
									
								
								heromodels/examples/grid4_reputation_example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,311 @@ | |||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::{NodeGroupReputation, NodeReputation}; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Helper function to print nodegroup reputation details | ||||||
|  | fn print_nodegroup_reputation_details(reputation: &NodeGroupReputation) { | ||||||
|  |     println!("\n--- NodeGroup Reputation Details ---"); | ||||||
|  |     println!("ID: {}", reputation.get_id()); | ||||||
|  |     println!("NodeGroup ID: {}", reputation.nodegroup_id); | ||||||
|  |     println!("Reputation Score: {}/100", reputation.reputation); | ||||||
|  |     println!("Uptime: {}%", reputation.uptime); | ||||||
|  |     println!("Node Count: {}", reputation.nodes.len()); | ||||||
|  |     println!("Created At: {}", reputation.base_data.created_at); | ||||||
|  |     println!("Modified At: {}", reputation.base_data.modified_at); | ||||||
|  |      | ||||||
|  |     // Print individual node reputations | ||||||
|  |     if !reputation.nodes.is_empty() { | ||||||
|  |         println!("  Individual Node Reputations:"); | ||||||
|  |         for (i, node_rep) in reputation.nodes.iter().enumerate() { | ||||||
|  |             println!("    {}. Node {}: Reputation {}/100, Uptime {}%",  | ||||||
|  |                 i + 1, node_rep.node_id, node_rep.reputation, node_rep.uptime); | ||||||
|  |         } | ||||||
|  |          | ||||||
|  |         // Calculate average node reputation and uptime | ||||||
|  |         let avg_node_reputation: f64 = reputation.nodes.iter() | ||||||
|  |             .map(|n| n.reputation as f64) | ||||||
|  |             .sum::<f64>() / reputation.nodes.len() as f64; | ||||||
|  |         let avg_node_uptime: f64 = reputation.nodes.iter() | ||||||
|  |             .map(|n| n.uptime as f64) | ||||||
|  |             .sum::<f64>() / reputation.nodes.len() as f64; | ||||||
|  |          | ||||||
|  |         println!("    Average Node Reputation: {:.1}/100", avg_node_reputation); | ||||||
|  |         println!("    Average Node Uptime: {:.1}%", avg_node_uptime); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     // Create a new DB instance in /tmp/grid4_reputation_db, and reset before every run | ||||||
|  |     let db = heromodels::db::hero::OurDB::new("/tmp/grid4_reputation_db", true).expect("Can create DB"); | ||||||
|  |  | ||||||
|  |     println!("Grid4 Reputation Models - Basic Usage Example"); | ||||||
|  |     println!("============================================"); | ||||||
|  |  | ||||||
|  |     // Create individual node reputations | ||||||
|  |      | ||||||
|  |     // High-performing nodes | ||||||
|  |     let node_rep1 = NodeReputation::new() | ||||||
|  |         .node_id(1001) | ||||||
|  |         .reputation(85) | ||||||
|  |         .uptime(99); | ||||||
|  |      | ||||||
|  |     let node_rep2 = NodeReputation::new() | ||||||
|  |         .node_id(1002) | ||||||
|  |         .reputation(92) | ||||||
|  |         .uptime(98); | ||||||
|  |      | ||||||
|  |     let node_rep3 = NodeReputation::new() | ||||||
|  |         .node_id(1003) | ||||||
|  |         .reputation(78) | ||||||
|  |         .uptime(97); | ||||||
|  |  | ||||||
|  |     // Medium-performing nodes | ||||||
|  |     let node_rep4 = NodeReputation::new() | ||||||
|  |         .node_id(2001) | ||||||
|  |         .reputation(65) | ||||||
|  |         .uptime(94); | ||||||
|  |      | ||||||
|  |     let node_rep5 = NodeReputation::new() | ||||||
|  |         .node_id(2002) | ||||||
|  |         .reputation(72) | ||||||
|  |         .uptime(96); | ||||||
|  |  | ||||||
|  |     // Lower-performing nodes | ||||||
|  |     let node_rep6 = NodeReputation::new() | ||||||
|  |         .node_id(3001) | ||||||
|  |         .reputation(45) | ||||||
|  |         .uptime(88); | ||||||
|  |      | ||||||
|  |     let node_rep7 = NodeReputation::new() | ||||||
|  |         .node_id(3002) | ||||||
|  |         .reputation(38) | ||||||
|  |         .uptime(85); | ||||||
|  |  | ||||||
|  |     // New nodes with default reputation | ||||||
|  |     let node_rep8 = NodeReputation::new() | ||||||
|  |         .node_id(4001) | ||||||
|  |         .reputation(50) // default | ||||||
|  |         .uptime(0);     // just started | ||||||
|  |      | ||||||
|  |     let node_rep9 = NodeReputation::new() | ||||||
|  |         .node_id(4002) | ||||||
|  |         .reputation(50) // default | ||||||
|  |         .uptime(0);     // just started | ||||||
|  |  | ||||||
|  |     // Create nodegroup reputations with different performance profiles | ||||||
|  |  | ||||||
|  |     // NodeGroup 1 - High-performance provider | ||||||
|  |     let nodegroup_rep1 = NodeGroupReputation::new() | ||||||
|  |         .nodegroup_id(1001) | ||||||
|  |         .reputation(85) // high reputation earned over time | ||||||
|  |         .uptime(98)     // excellent uptime | ||||||
|  |         .add_node_reputation(node_rep1.clone()) | ||||||
|  |         .add_node_reputation(node_rep2.clone()) | ||||||
|  |         .add_node_reputation(node_rep3.clone()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 2 - Medium-performance provider | ||||||
|  |     let nodegroup_rep2 = NodeGroupReputation::new() | ||||||
|  |         .nodegroup_id(1002) | ||||||
|  |         .reputation(68) // decent reputation | ||||||
|  |         .uptime(95)     // good uptime | ||||||
|  |         .add_node_reputation(node_rep4.clone()) | ||||||
|  |         .add_node_reputation(node_rep5.clone()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 3 - Struggling provider | ||||||
|  |     let nodegroup_rep3 = NodeGroupReputation::new() | ||||||
|  |         .nodegroup_id(1003) | ||||||
|  |         .reputation(42) // below average reputation | ||||||
|  |         .uptime(87)     // poor uptime | ||||||
|  |         .add_node_reputation(node_rep6.clone()) | ||||||
|  |         .add_node_reputation(node_rep7.clone()); | ||||||
|  |  | ||||||
|  |     // NodeGroup 4 - New provider (default reputation) | ||||||
|  |     let nodegroup_rep4 = NodeGroupReputation::new() | ||||||
|  |         .nodegroup_id(1004) | ||||||
|  |         .reputation(50) // default starting reputation | ||||||
|  |         .uptime(0)      // no history yet | ||||||
|  |         .add_node_reputation(node_rep8.clone()) | ||||||
|  |         .add_node_reputation(node_rep9.clone()); | ||||||
|  |  | ||||||
|  |     // Save all nodegroup reputations to database and get their assigned IDs and updated models | ||||||
|  |     let (rep1_id, db_rep1) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&nodegroup_rep1) | ||||||
|  |         .expect("can set reputation"); | ||||||
|  |     let (rep2_id, db_rep2) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&nodegroup_rep2) | ||||||
|  |         .expect("can set reputation"); | ||||||
|  |     let (rep3_id, db_rep3) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&nodegroup_rep3) | ||||||
|  |         .expect("can set reputation"); | ||||||
|  |     let (rep4_id, db_rep4) = db | ||||||
|  |         .collection() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&nodegroup_rep4) | ||||||
|  |         .expect("can set reputation"); | ||||||
|  |  | ||||||
|  |     println!("NodeGroup Reputation 1 assigned ID: {}", rep1_id); | ||||||
|  |     println!("NodeGroup Reputation 2 assigned ID: {}", rep2_id); | ||||||
|  |     println!("NodeGroup Reputation 3 assigned ID: {}", rep3_id); | ||||||
|  |     println!("NodeGroup Reputation 4 assigned ID: {}", rep4_id); | ||||||
|  |  | ||||||
|  |     // Print all reputation records retrieved from database | ||||||
|  |     println!("\n--- Reputation Records Retrieved from Database ---"); | ||||||
|  |     println!("\n1. High-performance provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&db_rep1); | ||||||
|  |  | ||||||
|  |     println!("\n2. Medium-performance provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&db_rep2); | ||||||
|  |  | ||||||
|  |     println!("\n3. Struggling provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&db_rep3); | ||||||
|  |  | ||||||
|  |     println!("\n4. New provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&db_rep4); | ||||||
|  |  | ||||||
|  |     // Demonstrate different ways to retrieve reputation records from the database | ||||||
|  |  | ||||||
|  |     // 1. Retrieve by nodegroup ID index | ||||||
|  |     println!("\n--- Retrieving Reputation by Different Methods ---"); | ||||||
|  |     println!("\n1. By NodeGroup ID Index (NodeGroup 1002):"); | ||||||
|  |     let nodegroup_reps = db | ||||||
|  |         .collection::<NodeGroupReputation>() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .get_by_index("nodegroup_id", &1002u32) | ||||||
|  |         .expect("can load reputation by nodegroup"); | ||||||
|  |  | ||||||
|  |     assert_eq!(nodegroup_reps.len(), 1); | ||||||
|  |     print_nodegroup_reputation_details(&nodegroup_reps[0]); | ||||||
|  |  | ||||||
|  |     // 2. Update reputation scores (simulate performance improvement) | ||||||
|  |     println!("\n2. Updating Reputation Scores (Performance Improvement):"); | ||||||
|  |     let mut improved_rep = db_rep3.clone(); | ||||||
|  |     improved_rep.reputation = 55; // improved from 42 | ||||||
|  |     improved_rep.uptime = 92;     // improved from 87 | ||||||
|  |      | ||||||
|  |     // Also improve individual node reputations | ||||||
|  |     for node_rep in &mut improved_rep.nodes { | ||||||
|  |         node_rep.reputation += 10; // boost each node's reputation | ||||||
|  |         node_rep.uptime += 5;      // improve uptime | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let (_, updated_rep) = db | ||||||
|  |         .collection::<NodeGroupReputation>() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&improved_rep) | ||||||
|  |         .expect("can update reputation"); | ||||||
|  |  | ||||||
|  |     println!("Updated reputation for struggling provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&updated_rep); | ||||||
|  |  | ||||||
|  |     // 3. Add new node to existing nodegroup reputation | ||||||
|  |     println!("\n3. Adding New Node to Existing NodeGroup:"); | ||||||
|  |     let new_node_rep = NodeReputation::new() | ||||||
|  |         .node_id(1004) | ||||||
|  |         .reputation(88) | ||||||
|  |         .uptime(99); | ||||||
|  |  | ||||||
|  |     let mut expanded_rep = db_rep1.clone(); | ||||||
|  |     expanded_rep.add_node_reputation(new_node_rep); | ||||||
|  |     // Recalculate nodegroup reputation based on node average | ||||||
|  |     let total_node_rep: i32 = expanded_rep.nodes.iter().map(|n| n.reputation).sum(); | ||||||
|  |     expanded_rep.reputation = total_node_rep / expanded_rep.nodes.len() as i32; | ||||||
|  |  | ||||||
|  |     let (_, expanded_rep) = db | ||||||
|  |         .collection::<NodeGroupReputation>() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .set(&expanded_rep) | ||||||
|  |         .expect("can update reputation"); | ||||||
|  |  | ||||||
|  |     println!("Added new high-performing node to top provider:"); | ||||||
|  |     print_nodegroup_reputation_details(&expanded_rep); | ||||||
|  |  | ||||||
|  |     // Show all reputation records and calculate analytics | ||||||
|  |     let all_reps = db | ||||||
|  |         .collection::<NodeGroupReputation>() | ||||||
|  |         .expect("can open reputation collection") | ||||||
|  |         .get_all() | ||||||
|  |         .expect("can load all reputations"); | ||||||
|  |  | ||||||
|  |     println!("\n--- Reputation Analytics ---"); | ||||||
|  |     println!("Total NodeGroup Reputations: {}", all_reps.len()); | ||||||
|  |  | ||||||
|  |     // Calculate overall statistics | ||||||
|  |     let avg_nodegroup_reputation: f64 = all_reps.iter() | ||||||
|  |         .map(|r| r.reputation as f64) | ||||||
|  |         .sum::<f64>() / all_reps.len() as f64; | ||||||
|  |     let avg_nodegroup_uptime: f64 = all_reps.iter() | ||||||
|  |         .filter(|r| r.uptime > 0) // exclude new providers with 0 uptime | ||||||
|  |         .map(|r| r.uptime as f64) | ||||||
|  |         .sum::<f64>() / all_reps.iter().filter(|r| r.uptime > 0).count() as f64; | ||||||
|  |  | ||||||
|  |     println!("Overall Statistics:"); | ||||||
|  |     println!("  Average NodeGroup Reputation: {:.1}/100", avg_nodegroup_reputation); | ||||||
|  |     println!("  Average NodeGroup Uptime: {:.1}%", avg_nodegroup_uptime); | ||||||
|  |  | ||||||
|  |     // Count reputation tiers | ||||||
|  |     let excellent_reps = all_reps.iter().filter(|r| r.reputation >= 80).count(); | ||||||
|  |     let good_reps = all_reps.iter().filter(|r| r.reputation >= 60 && r.reputation < 80).count(); | ||||||
|  |     let average_reps = all_reps.iter().filter(|r| r.reputation >= 40 && r.reputation < 60).count(); | ||||||
|  |     let poor_reps = all_reps.iter().filter(|r| r.reputation < 40).count(); | ||||||
|  |  | ||||||
|  |     println!("\nReputation Distribution:"); | ||||||
|  |     println!("  Excellent (80-100): {}", excellent_reps); | ||||||
|  |     println!("  Good (60-79): {}", good_reps); | ||||||
|  |     println!("  Average (40-59): {}", average_reps); | ||||||
|  |     println!("  Poor (0-39): {}", poor_reps); | ||||||
|  |  | ||||||
|  |     // Calculate total nodes and their statistics | ||||||
|  |     let total_nodes: usize = all_reps.iter().map(|r| r.nodes.len()).sum(); | ||||||
|  |     let all_node_reps: Vec<i32> = all_reps.iter() | ||||||
|  |         .flat_map(|r| &r.nodes) | ||||||
|  |         .map(|n| n.reputation) | ||||||
|  |         .collect(); | ||||||
|  |     let all_node_uptimes: Vec<i32> = all_reps.iter() | ||||||
|  |         .flat_map(|r| &r.nodes) | ||||||
|  |         .filter(|n| n.uptime > 0) | ||||||
|  |         .map(|n| n.uptime) | ||||||
|  |         .collect(); | ||||||
|  |  | ||||||
|  |     let avg_node_reputation: f64 = all_node_reps.iter().sum::<i32>() as f64 / all_node_reps.len() as f64; | ||||||
|  |     let avg_node_uptime: f64 = all_node_uptimes.iter().sum::<i32>() as f64 / all_node_uptimes.len() as f64; | ||||||
|  |  | ||||||
|  |     println!("\nNode-Level Statistics:"); | ||||||
|  |     println!("  Total Nodes: {}", total_nodes); | ||||||
|  |     println!("  Average Node Reputation: {:.1}/100", avg_node_reputation); | ||||||
|  |     println!("  Average Node Uptime: {:.1}%", avg_node_uptime); | ||||||
|  |  | ||||||
|  |     // Find best and worst performing nodegroups | ||||||
|  |     let best_nodegroup = all_reps.iter().max_by_key(|r| r.reputation).unwrap(); | ||||||
|  |     let worst_nodegroup = all_reps.iter().min_by_key(|r| r.reputation).unwrap(); | ||||||
|  |  | ||||||
|  |     println!("\nPerformance Leaders:"); | ||||||
|  |     println!("  Best NodeGroup: {} (Reputation: {}, Uptime: {}%)",  | ||||||
|  |         best_nodegroup.nodegroup_id, best_nodegroup.reputation, best_nodegroup.uptime); | ||||||
|  |     println!("  Worst NodeGroup: {} (Reputation: {}, Uptime: {}%)",  | ||||||
|  |         worst_nodegroup.nodegroup_id, worst_nodegroup.reputation, worst_nodegroup.uptime); | ||||||
|  |  | ||||||
|  |     // Rank nodegroups by reputation | ||||||
|  |     let mut ranked_nodegroups: Vec<_> = all_reps.iter().collect(); | ||||||
|  |     ranked_nodegroups.sort_by(|a, b| b.reputation.cmp(&a.reputation)); | ||||||
|  |  | ||||||
|  |     println!("\nNodeGroup Rankings (by Reputation):"); | ||||||
|  |     for (i, rep) in ranked_nodegroups.iter().enumerate() { | ||||||
|  |         let status = match rep.reputation { | ||||||
|  |             80..=100 => "Excellent", | ||||||
|  |             60..=79 => "Good", | ||||||
|  |             40..=59 => "Average", | ||||||
|  |             _ => "Poor", | ||||||
|  |         }; | ||||||
|  |         println!("  {}. NodeGroup {}: {} ({}/100, {}% uptime)",  | ||||||
|  |             i + 1, rep.nodegroup_id, status, rep.reputation, rep.uptime); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     println!("\n--- Model Information ---"); | ||||||
|  |     println!("NodeGroupReputation DB Prefix: {}", NodeGroupReputation::db_prefix()); | ||||||
|  | } | ||||||
| @@ -1,6 +1,6 @@ | |||||||
|  | use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules; | ||||||
| use heromodels_core::db::hero::OurDB; | use heromodels_core::db::hero::OurDB; | ||||||
| use rhai::{Dynamic, Engine}; | use rhai::{Dynamic, Engine}; | ||||||
| use heromodels::models::heroledger::rhai::register_heroledger_rhai_modules; |  | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
| use std::{fs, path::Path}; | use std::{fs, path::Path}; | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										15
									
								
								heromodels/examples/heroledger_example/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								heromodels/examples/heroledger_example/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | |||||||
|  | # Heroledger Postgres Example | ||||||
|  |  | ||||||
|  | This example demonstrates how to use the Heroledger `User` model against Postgres using the `heromodels::db::postgres` backend. | ||||||
|  |  | ||||||
|  | - Connects to Postgres with user `postgres` and password `test123` on `localhost:5432`. | ||||||
|  | - Creates the table and indexes automatically on first use. | ||||||
|  | - Shows basic CRUD and an index lookup on `username`. | ||||||
|  |  | ||||||
|  | Run it: | ||||||
|  |  | ||||||
|  | ```bash | ||||||
|  | cargo run -p heromodels --example heroledger_example | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | Make sure Postgres is running locally and accessible with the credentials above. | ||||||
							
								
								
									
										54
									
								
								heromodels/examples/heroledger_example/example.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								heromodels/examples/heroledger_example/example.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | |||||||
|  | use heromodels::db::postgres::{Config, Postgres}; | ||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::heroledger::user::user_index::username; | ||||||
|  | use heromodels::models::heroledger::user::{SecretBox, User}; | ||||||
|  |  | ||||||
|  | fn main() { | ||||||
|  |     let db = Postgres::new( | ||||||
|  |         Config::new() | ||||||
|  |             .user(Some("postgres".into())) | ||||||
|  |             .password(Some("test123".into())) | ||||||
|  |             .host(Some("localhost".into())) | ||||||
|  |             .port(Some(5432)), | ||||||
|  |     ) | ||||||
|  |     .expect("Can connect to Postgres"); | ||||||
|  |  | ||||||
|  |     println!("Heroledger User - Postgres Example"); | ||||||
|  |     println!("=================================="); | ||||||
|  |  | ||||||
|  |     let users = db.collection::<User>().expect("open user collection"); | ||||||
|  |  | ||||||
|  |     // Clean | ||||||
|  |     if let Ok(existing) = users.get_all() { | ||||||
|  |         for u in existing { | ||||||
|  |             let _ = users.delete_by_id(u.get_id()); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let sb = SecretBox::new().data(vec![1, 2, 3]).nonce(vec![9, 9, 9]).build(); | ||||||
|  |  | ||||||
|  |     let u = User::new(0) | ||||||
|  |         .username("alice") | ||||||
|  |         .pubkey("PUBKEY_A") | ||||||
|  |         .add_email("alice@example.com") | ||||||
|  |         .add_userprofile(sb) | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let (id, stored) = users.set(&u).expect("store user"); | ||||||
|  |     println!("Stored user id={id} username={} pubkey={}", stored.username, stored.pubkey); | ||||||
|  |  | ||||||
|  |     let by_idx = users.get::<username, _>("alice").expect("by username"); | ||||||
|  |     println!("Found {} user(s) with username=alice", by_idx.len()); | ||||||
|  |  | ||||||
|  |     let fetched = users.get_by_id(id).expect("get by id").expect("exists"); | ||||||
|  |     println!("Fetched by id={} username={} emails={:?}", id, fetched.username, fetched.email); | ||||||
|  |  | ||||||
|  |     // Update | ||||||
|  |     let updated = fetched.clone().add_email("work@alice.example"); | ||||||
|  |     let (_, back) = users.set(&updated).expect("update user"); | ||||||
|  |     println!("Updated emails = {:?}", back.email); | ||||||
|  |  | ||||||
|  |     // Delete | ||||||
|  |     users.delete_by_id(id).expect("delete user"); | ||||||
|  |     println!("Deleted user id={id}"); | ||||||
|  | } | ||||||
| @@ -1,8 +1,11 @@ | |||||||
| use heromodels::db::postgres::Config; | use heromodels::db::postgres::Config; | ||||||
| use heromodels::db::{Collection, Db}; | use heromodels::db::{Collection, Db}; | ||||||
| use heromodels::models::userexample::user::user_index::{is_active, username}; | use heromodels::models::userexample::user::user_index::{email, username}; | ||||||
| use heromodels::models::{Comment, User}; | use heromodels::models::{Comment, User}; | ||||||
| use heromodels_core::Model; | use heromodels_core::Model; | ||||||
|  | // For demonstrating embedded/nested indexes | ||||||
|  | use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node}; | ||||||
|  | use heromodels::models::grid4::node::node_index::{country as node_country, pubkey as node_pubkey}; | ||||||
|  |  | ||||||
| // Helper function to print user details | // Helper function to print user details | ||||||
| fn print_user_details(user: &User) { | fn print_user_details(user: &User) { | ||||||
| @@ -37,14 +40,31 @@ fn main() { | |||||||
|     ) |     ) | ||||||
|     .expect("Can connect to postgress"); |     .expect("Can connect to postgress"); | ||||||
|  |  | ||||||
|  |     // Unique suffix to avoid collisions with legacy rows from prior runs | ||||||
|  |     use std::time::{SystemTime, UNIX_EPOCH}; | ||||||
|  |     let ts = SystemTime::now() | ||||||
|  |         .duration_since(UNIX_EPOCH) | ||||||
|  |         .unwrap() | ||||||
|  |         .as_secs(); | ||||||
|  |     let user1_name = format!("johndoe_{}", ts); | ||||||
|  |     let user2_name = format!("janesmith_{}", ts); | ||||||
|  |     let user3_name = format!("willism_{}", ts); | ||||||
|  |     let user4_name = format!("carrols_{}", ts); | ||||||
|  |     let user1_email = format!("john.doe+{}@example.com", ts); | ||||||
|  |     let user2_email = format!("jane.smith+{}@example.com", ts); | ||||||
|  |     let user3_email = format!("willis.masters+{}@example.com", ts); | ||||||
|  |     let user4_email = format!("carrol.smith+{}@example.com", ts); | ||||||
|  |  | ||||||
|     println!("Hero Models - Basic Usage Example"); |     println!("Hero Models - Basic Usage Example"); | ||||||
|     println!("================================"); |     println!("================================"); | ||||||
|  |  | ||||||
|     // Clean up any existing data to ensure consistent results |     // Clean up any existing data to ensure consistent results | ||||||
|     println!("Cleaning up existing data..."); |     println!("Cleaning up existing data..."); | ||||||
|     let user_collection = db.collection::<User>().expect("can open user collection"); |     let user_collection = db.collection::<User>().expect("can open user collection"); | ||||||
|     let comment_collection = db.collection::<Comment>().expect("can open comment collection"); |     let comment_collection = db | ||||||
|      |         .collection::<Comment>() | ||||||
|  |         .expect("can open comment collection"); | ||||||
|  |  | ||||||
|     // Clear all existing users and comments |     // Clear all existing users and comments | ||||||
|     if let Ok(existing_users) = user_collection.get_all() { |     if let Ok(existing_users) = user_collection.get_all() { | ||||||
|         for user in existing_users { |         for user in existing_users { | ||||||
| @@ -62,32 +82,32 @@ fn main() { | |||||||
|  |  | ||||||
|     // User 1 |     // User 1 | ||||||
|     let user1 = User::new() |     let user1 = User::new() | ||||||
|         .username("johndoe") |         .username(&user1_name) | ||||||
|         .email("john.doe@example.com") |         .email(&user1_email) | ||||||
|         .full_name("John Doe") |         .full_name("John Doe") | ||||||
|         .is_active(false) |         .is_active(false) | ||||||
|         .build(); |         .build(); | ||||||
|  |  | ||||||
|     // User 2 |     // User 2 | ||||||
|     let user2 = User::new() |     let user2 = User::new() | ||||||
|         .username("janesmith") |         .username(&user2_name) | ||||||
|         .email("jane.smith@example.com") |         .email(&user2_email) | ||||||
|         .full_name("Jane Smith") |         .full_name("Jane Smith") | ||||||
|         .is_active(true) |         .is_active(true) | ||||||
|         .build(); |         .build(); | ||||||
|  |  | ||||||
|     // User 3 |     // User 3 | ||||||
|     let user3 = User::new() |     let user3 = User::new() | ||||||
|         .username("willism") |         .username(&user3_name) | ||||||
|         .email("willis.masters@example.com") |         .email(&user3_email) | ||||||
|         .full_name("Willis Masters") |         .full_name("Willis Masters") | ||||||
|         .is_active(true) |         .is_active(true) | ||||||
|         .build(); |         .build(); | ||||||
|  |  | ||||||
|     // User 4 |     // User 4 | ||||||
|     let user4 = User::new() |     let user4 = User::new() | ||||||
|         .username("carrols") |         .username(&user4_name) | ||||||
|         .email("carrol.smith@example.com") |         .email(&user4_email) | ||||||
|         .full_name("Carrol Smith") |         .full_name("Carrol Smith") | ||||||
|         .is_active(false) |         .is_active(false) | ||||||
|         .build(); |         .build(); | ||||||
| @@ -143,66 +163,95 @@ fn main() { | |||||||
|     let stored_users = db |     let stored_users = db | ||||||
|         .collection::<User>() |         .collection::<User>() | ||||||
|         .expect("can open user collection") |         .expect("can open user collection") | ||||||
|         .get::<username, _>("johndoe") |         .get::<username, _>(&user1_name) | ||||||
|         .expect("can load stored user"); |         .expect("can load stored user"); | ||||||
|  |  | ||||||
|     assert_eq!(stored_users.len(), 1); |     assert_eq!(stored_users.len(), 1); | ||||||
|     print_user_details(&stored_users[0]); |     print_user_details(&stored_users[0]); | ||||||
|  |  | ||||||
|     // 2. Retrieve by active status |     // 2. Retrieve by email index | ||||||
|     println!("\n2. By Active Status (Active = true):"); |     println!("\n2. By Email Index:"); | ||||||
|     let active_users = db |     let by_email = db | ||||||
|         .collection::<User>() |         .collection::<User>() | ||||||
|         .expect("can open user collection") |         .expect("can open user collection") | ||||||
|         .get::<is_active, _>(&true) |         .get::<email, _>(&user2_email) | ||||||
|         .expect("can load stored users"); |         .expect("can load stored user by email"); | ||||||
|  |     assert_eq!(by_email.len(), 1); | ||||||
|     assert_eq!(active_users.len(), 2); |     print_user_details(&by_email[0]); | ||||||
|     for active_user in active_users.iter() { |  | ||||||
|         print_user_details(active_user); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // 3. Delete a user and show the updated results |     // 3. Delete a user and show the updated results | ||||||
|     println!("\n3. After Deleting a User:"); |     println!("\n3. After Deleting a User:"); | ||||||
|     let user_to_delete_id = active_users[0].get_id(); |     let user_to_delete_id = stored_users[0].get_id(); | ||||||
|     println!("Deleting user with ID: {user_to_delete_id}"); |     println!("Deleting user with ID: {user_to_delete_id}"); | ||||||
|     db.collection::<User>() |     db.collection::<User>() | ||||||
|         .expect("can open user collection") |         .expect("can open user collection") | ||||||
|         .delete_by_id(user_to_delete_id) |         .delete_by_id(user_to_delete_id) | ||||||
|         .expect("can delete existing user"); |         .expect("can delete existing user"); | ||||||
|  |  | ||||||
|     // Show remaining active users |     // Verify deletion by querying the same username again | ||||||
|     let active_users = db |     let should_be_empty = db | ||||||
|         .collection::<User>() |         .collection::<User>() | ||||||
|         .expect("can open user collection") |         .expect("can open user collection") | ||||||
|         .get::<is_active, _>(&true) |         .get::<username, _>(&user1_name) | ||||||
|         .expect("can load stored users"); |         .expect("can query by username after delete"); | ||||||
|  |     println!("   a. Query by username '{}' after delete -> {} results", user1_name, should_be_empty.len()); | ||||||
|     println!("   a. Remaining Active Users:"); |     assert_eq!(should_be_empty.len(), 0); | ||||||
|     assert_eq!(active_users.len(), 1); |  | ||||||
|     for active_user in active_users.iter() { |  | ||||||
|         print_user_details(active_user); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // Show inactive users |  | ||||||
|     let inactive_users = db |  | ||||||
|         .collection::<User>() |  | ||||||
|         .expect("can open user collection") |  | ||||||
|         .get::<is_active, _>(&false) |  | ||||||
|         .expect("can load stored users"); |  | ||||||
|  |  | ||||||
|     println!("   b. Inactive Users:"); |  | ||||||
|     assert_eq!(inactive_users.len(), 2); |  | ||||||
|     for inactive_user in inactive_users.iter() { |  | ||||||
|         print_user_details(inactive_user); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // Delete a user based on an index for good measure |     // Delete a user based on an index for good measure | ||||||
|     db.collection::<User>() |     db.collection::<User>() | ||||||
|         .expect("can open user collection") |         .expect("can open user collection") | ||||||
|         .delete::<username, _>("janesmith") |         .delete::<username, _>(&user4_name) | ||||||
|         .expect("can delete existing user"); |         .expect("can delete existing user"); | ||||||
|  |  | ||||||
|  |     // Demonstrate embedded/nested indexes with Grid4 Node | ||||||
|  |     println!("\n--- Demonstrating Embedded/Nested Indexes (Grid4::Node) ---"); | ||||||
|  |     println!("Node indexed fields: {:?}", Node::indexed_fields()); | ||||||
|  |  | ||||||
|  |     // Build a minimal node with nested data and persist it | ||||||
|  |     let cs = ComputeSlice::new() | ||||||
|  |         .nodeid(42) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(32.0) | ||||||
|  |         .storage_gb(512.0) | ||||||
|  |         .passmark(6000) | ||||||
|  |         .vcores(16) | ||||||
|  |         .gpus(1) | ||||||
|  |         .price_cc(0.33); | ||||||
|  |     let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() }; | ||||||
|  |     let node = Node::new() | ||||||
|  |         .nodegroupid(101) | ||||||
|  |         .uptime(99) | ||||||
|  |         .add_compute_slice(cs) | ||||||
|  |         .devices(dev) | ||||||
|  |         .country("BE") | ||||||
|  |         .pubkey("EX_NODE_PK_1") | ||||||
|  |         .build(); | ||||||
|  |     let (node_id, _stored_node) = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .set(&node) | ||||||
|  |         .expect("can set node"); | ||||||
|  |     println!("Stored node id: {}", node_id); | ||||||
|  |  | ||||||
|  |     // Query by top-level indexes | ||||||
|  |     let be_nodes = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .get::<node_country, _>("BE") | ||||||
|  |         .expect("can query nodes by country"); | ||||||
|  |     println!("Nodes in BE (count may include legacy rows): {}", be_nodes.len()); | ||||||
|  |  | ||||||
|  |     let by_pk = db | ||||||
|  |         .collection::<Node>() | ||||||
|  |         .expect("can open node collection") | ||||||
|  |         .get::<node_pubkey, _>("EX_NODE_PK_1") | ||||||
|  |         .expect("can query node by pubkey"); | ||||||
|  |     assert!(by_pk.iter().any(|n| n.get_id() == node_id)); | ||||||
|  |  | ||||||
|  |     // Note: Nested path indexes (e.g., devices.vendor, computeslices.passmark) are created and used | ||||||
|  |     // for DB-side indexing, but are not yet exposed as typed Index keys in the API. They appear in | ||||||
|  |     // Node::indexed_fields() and contribute to Model::db_keys(), enabling performant JSONB GIN indexes. | ||||||
|  |  | ||||||
|     println!("\n--- User Model Information ---"); |     println!("\n--- User Model Information ---"); | ||||||
|     println!("User DB Prefix: {}", User::db_prefix()); |     println!("User DB Prefix: {}", User::db_prefix()); | ||||||
|  |  | ||||||
| @@ -212,7 +261,7 @@ fn main() { | |||||||
|     // 1. Create and save a comment |     // 1. Create and save a comment | ||||||
|     println!("\n1. Creating a Comment:"); |     println!("\n1. Creating a Comment:"); | ||||||
|     let comment = Comment::new() |     let comment = Comment::new() | ||||||
|         .user_id(db_user1.get_id()) // commenter's user ID |         .user_id(db_user2.get_id()) // commenter's user ID (use an existing user) | ||||||
|         .content("This is a comment on the user") |         .content("This is a comment on the user") | ||||||
|         .build(); |         .build(); | ||||||
|  |  | ||||||
| @@ -230,7 +279,7 @@ fn main() { | |||||||
|  |  | ||||||
|     // 3. Associate the comment with a user |     // 3. Associate the comment with a user | ||||||
|     println!("\n2. Associating Comment with User:"); |     println!("\n2. Associating Comment with User:"); | ||||||
|     let mut updated_user = db_user1.clone(); |     let mut updated_user = db_user2.clone(); | ||||||
|     updated_user.base_data.add_comment(db_comment.get_id()); |     updated_user.base_data.add_comment(db_comment.get_id()); | ||||||
|  |  | ||||||
|     // Save the updated user and get the new version |     // Save the updated user and get the new version | ||||||
|   | |||||||
| @@ -8,8 +8,8 @@ use std::{ | |||||||
|     collections::HashSet, |     collections::HashSet, | ||||||
|     path::PathBuf, |     path::PathBuf, | ||||||
|     sync::{ |     sync::{ | ||||||
|         atomic::{AtomicU32, Ordering}, |  | ||||||
|         Arc, Mutex, |         Arc, Mutex, | ||||||
|  |         atomic::{AtomicU32, Ordering}, | ||||||
|     }, |     }, | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -119,4 +119,4 @@ impl Circle { | |||||||
| /// Creates a new circle builder | /// Creates a new circle builder | ||||||
| pub fn new_circle() -> Circle { | pub fn new_circle() -> Circle { | ||||||
|     Circle::new() |     Circle::new() | ||||||
| } | } | ||||||
|   | |||||||
| @@ -1,16 +1,17 @@ | |||||||
| use crate::db::Db; | use crate::db::Db; | ||||||
| use rhailib_macros::{ |  | ||||||
|     register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn, register_authorized_get_by_id_fn, |  | ||||||
| }; |  | ||||||
| use rhai::plugin::*; | use rhai::plugin::*; | ||||||
| use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module}; | use rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module}; | ||||||
|  | use rhailib_macros::{ | ||||||
|  |     register_authorized_create_by_id_fn, register_authorized_delete_by_id_fn, | ||||||
|  |     register_authorized_get_by_id_fn, | ||||||
|  | }; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| use std::sync::Arc; | use std::sync::Arc; | ||||||
|  |  | ||||||
| use crate::models::circle::Circle; | use crate::models::circle::Circle; | ||||||
| type RhaiCircle = Circle; | type RhaiCircle = Circle; | ||||||
| use crate::db::hero::OurDB; |  | ||||||
| use crate::db::Collection; | use crate::db::Collection; | ||||||
|  | use crate::db::hero::OurDB; | ||||||
| use crate::models::circle::ThemeData; | use crate::models::circle::ThemeData; | ||||||
|  |  | ||||||
| #[export_module] | #[export_module] | ||||||
|   | |||||||
							
								
								
									
										128
									
								
								heromodels/src/models/grid4/bid.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								heromodels/src/models/grid4/bid.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | |||||||
|  | use heromodels_core::BaseModelData; | ||||||
|  | use heromodels_derive::model; | ||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
|  | /// Bid status enumeration | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
|  | pub enum BidStatus { | ||||||
|  |     #[default] | ||||||
|  |     Pending, | ||||||
|  |     Confirmed, | ||||||
|  |     Assigned, | ||||||
|  |     Cancelled, | ||||||
|  |     Done, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Billing period enumeration | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
|  | pub enum BillingPeriod { | ||||||
|  |     #[default] | ||||||
|  |     Hourly, | ||||||
|  |     Monthly, | ||||||
|  |     Yearly, | ||||||
|  |     Biannually, | ||||||
|  |     Triannually, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// I can bid for infra, and optionally get accepted | ||||||
|  | #[model] | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct Bid { | ||||||
|  |     pub base_data: BaseModelData, | ||||||
|  |     /// links back to customer for this capacity (user on ledger) | ||||||
|  |     #[index] | ||||||
|  |     pub customer_id: u32, | ||||||
|  |     /// nr of slices I need in 1 machine | ||||||
|  |     pub compute_slices_nr: i32, | ||||||
|  |     /// price per 1 GB slice I want to accept | ||||||
|  |     pub compute_slice_price: f64, | ||||||
|  |     /// nr of storage slices needed | ||||||
|  |     pub storage_slices_nr: i32, | ||||||
|  |     /// price per 1 GB storage slice I want to accept | ||||||
|  |     pub storage_slice_price: f64, | ||||||
|  |     pub status: BidStatus, | ||||||
|  |     /// if obligation then will be charged and money needs to be in escrow, otherwise its an intent | ||||||
|  |     pub obligation: bool, | ||||||
|  |     /// epoch timestamp | ||||||
|  |     pub start_date: u32, | ||||||
|  |     /// epoch timestamp | ||||||
|  |     pub end_date: u32, | ||||||
|  |     /// signature as done by a user/consumer to validate their identity and intent | ||||||
|  |     pub signature_user: String, | ||||||
|  |     pub billing_period: BillingPeriod, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl Bid { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             base_data: BaseModelData::new(), | ||||||
|  |             customer_id: 0, | ||||||
|  |             compute_slices_nr: 0, | ||||||
|  |             compute_slice_price: 0.0, | ||||||
|  |             storage_slices_nr: 0, | ||||||
|  |             storage_slice_price: 0.0, | ||||||
|  |             status: BidStatus::default(), | ||||||
|  |             obligation: false, | ||||||
|  |             start_date: 0, | ||||||
|  |             end_date: 0, | ||||||
|  |             signature_user: String::new(), | ||||||
|  |             billing_period: BillingPeriod::default(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn customer_id(mut self, v: u32) -> Self { | ||||||
|  |         self.customer_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn compute_slices_nr(mut self, v: i32) -> Self { | ||||||
|  |         self.compute_slices_nr = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn compute_slice_price(mut self, v: f64) -> Self { | ||||||
|  |         self.compute_slice_price = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn storage_slices_nr(mut self, v: i32) -> Self { | ||||||
|  |         self.storage_slices_nr = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn storage_slice_price(mut self, v: f64) -> Self { | ||||||
|  |         self.storage_slice_price = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn status(mut self, v: BidStatus) -> Self { | ||||||
|  |         self.status = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn obligation(mut self, v: bool) -> Self { | ||||||
|  |         self.obligation = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn start_date(mut self, v: u32) -> Self { | ||||||
|  |         self.start_date = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn end_date(mut self, v: u32) -> Self { | ||||||
|  |         self.end_date = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn signature_user(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.signature_user = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn billing_period(mut self, v: BillingPeriod) -> Self { | ||||||
|  |         self.billing_period = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										39
									
								
								heromodels/src/models/grid4/common.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								heromodels/src/models/grid4/common.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | |||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
|  | /// SLA policy matching the V spec `SLAPolicy` | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct SLAPolicy { | ||||||
|  |     /// should +90 | ||||||
|  |     pub sla_uptime: i32, | ||||||
|  |     /// minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee | ||||||
|  |     pub sla_bandwidth_mbit: i32, | ||||||
|  |     /// 0-100, percent of money given back in relation to month if sla breached, | ||||||
|  |     /// e.g. 200 means we return 2 months worth of rev if sla missed | ||||||
|  |     pub sla_penalty: i32, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl SLAPolicy { | ||||||
|  |     pub fn new() -> Self { Self::default() } | ||||||
|  |     pub fn sla_uptime(mut self, v: i32) -> Self { self.sla_uptime = v; self } | ||||||
|  |     pub fn sla_bandwidth_mbit(mut self, v: i32) -> Self { self.sla_bandwidth_mbit = v; self } | ||||||
|  |     pub fn sla_penalty(mut self, v: i32) -> Self { self.sla_penalty = v; self } | ||||||
|  |     pub fn build(self) -> Self { self } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Pricing policy matching the V spec `PricingPolicy` | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct PricingPolicy { | ||||||
|  |     /// e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization | ||||||
|  |     /// then this provider gives 30%, 2Y 40%, ... | ||||||
|  |     pub marketplace_year_discounts: Vec<i32>, | ||||||
|  |     /// e.g. 10,20,30 | ||||||
|  |     pub volume_discounts: Vec<i32>, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl PricingPolicy { | ||||||
|  |     pub fn new() -> Self { Self { marketplace_year_discounts: vec![30, 40, 50], volume_discounts: vec![10, 20, 30] } } | ||||||
|  |     pub fn marketplace_year_discounts(mut self, v: Vec<i32>) -> Self { self.marketplace_year_discounts = v; self } | ||||||
|  |     pub fn volume_discounts(mut self, v: Vec<i32>) -> Self { self.volume_discounts = v; self } | ||||||
|  |     pub fn build(self) -> Self { self } | ||||||
|  | } | ||||||
							
								
								
									
										219
									
								
								heromodels/src/models/grid4/contract.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										219
									
								
								heromodels/src/models/grid4/contract.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,219 @@ | |||||||
|  | use heromodels_core::BaseModelData; | ||||||
|  | use heromodels_derive::model; | ||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  | use super::bid::BillingPeriod; | ||||||
|  |  | ||||||
|  | /// Contract status enumeration | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
|  | pub enum ContractStatus { | ||||||
|  |     #[default] | ||||||
|  |     Active, | ||||||
|  |     Cancelled, | ||||||
|  |     Error, | ||||||
|  |     Paused, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Compute slice provisioned for a contract | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct ComputeSliceProvisioned { | ||||||
|  |     pub node_id: u32, | ||||||
|  |     /// the id of the slice in the node | ||||||
|  |     pub id: u16, | ||||||
|  |     pub mem_gb: f64, | ||||||
|  |     pub storage_gb: f64, | ||||||
|  |     pub passmark: i32, | ||||||
|  |     pub vcores: i32, | ||||||
|  |     pub cpu_oversubscription: i32, | ||||||
|  |     pub tags: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Storage slice provisioned for a contract | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct StorageSliceProvisioned { | ||||||
|  |     pub node_id: u32, | ||||||
|  |     /// the id of the slice in the node, are tracked in the node itself | ||||||
|  |     pub id: u16, | ||||||
|  |     pub storage_size_gb: i32, | ||||||
|  |     pub tags: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Contract for provisioned infrastructure | ||||||
|  | #[model] | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct Contract { | ||||||
|  |     pub base_data: BaseModelData, | ||||||
|  |     /// links back to customer for this capacity (user on ledger) | ||||||
|  |     #[index] | ||||||
|  |     pub customer_id: u32, | ||||||
|  |     pub compute_slices: Vec<ComputeSliceProvisioned>, | ||||||
|  |     pub storage_slices: Vec<StorageSliceProvisioned>, | ||||||
|  |     /// price per 1 GB agreed upon | ||||||
|  |     pub compute_slice_price: f64, | ||||||
|  |     /// price per 1 GB agreed upon | ||||||
|  |     pub storage_slice_price: f64, | ||||||
|  |     /// price per 1 GB agreed upon (transfer) | ||||||
|  |     pub network_slice_price: f64, | ||||||
|  |     pub status: ContractStatus, | ||||||
|  |     /// epoch timestamp | ||||||
|  |     pub start_date: u32, | ||||||
|  |     /// epoch timestamp | ||||||
|  |     pub end_date: u32, | ||||||
|  |     /// signature as done by a user/consumer to validate their identity and intent | ||||||
|  |     pub signature_user: String, | ||||||
|  |     /// signature as done by the hoster | ||||||
|  |     pub signature_hoster: String, | ||||||
|  |     pub billing_period: BillingPeriod, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl Contract { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             base_data: BaseModelData::new(), | ||||||
|  |             customer_id: 0, | ||||||
|  |             compute_slices: Vec::new(), | ||||||
|  |             storage_slices: Vec::new(), | ||||||
|  |             compute_slice_price: 0.0, | ||||||
|  |             storage_slice_price: 0.0, | ||||||
|  |             network_slice_price: 0.0, | ||||||
|  |             status: ContractStatus::default(), | ||||||
|  |             start_date: 0, | ||||||
|  |             end_date: 0, | ||||||
|  |             signature_user: String::new(), | ||||||
|  |             signature_hoster: String::new(), | ||||||
|  |             billing_period: BillingPeriod::default(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn customer_id(mut self, v: u32) -> Self { | ||||||
|  |         self.customer_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn add_compute_slice(mut self, slice: ComputeSliceProvisioned) -> Self { | ||||||
|  |         self.compute_slices.push(slice); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn add_storage_slice(mut self, slice: StorageSliceProvisioned) -> Self { | ||||||
|  |         self.storage_slices.push(slice); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn compute_slice_price(mut self, v: f64) -> Self { | ||||||
|  |         self.compute_slice_price = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn storage_slice_price(mut self, v: f64) -> Self { | ||||||
|  |         self.storage_slice_price = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn network_slice_price(mut self, v: f64) -> Self { | ||||||
|  |         self.network_slice_price = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn status(mut self, v: ContractStatus) -> Self { | ||||||
|  |         self.status = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn start_date(mut self, v: u32) -> Self { | ||||||
|  |         self.start_date = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn end_date(mut self, v: u32) -> Self { | ||||||
|  |         self.end_date = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn signature_user(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.signature_user = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn signature_hoster(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.signature_hoster = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn billing_period(mut self, v: BillingPeriod) -> Self { | ||||||
|  |         self.billing_period = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl ComputeSliceProvisioned { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self::default() | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn node_id(mut self, v: u32) -> Self { | ||||||
|  |         self.node_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn id(mut self, v: u16) -> Self { | ||||||
|  |         self.id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn mem_gb(mut self, v: f64) -> Self { | ||||||
|  |         self.mem_gb = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn storage_gb(mut self, v: f64) -> Self { | ||||||
|  |         self.storage_gb = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn passmark(mut self, v: i32) -> Self { | ||||||
|  |         self.passmark = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn vcores(mut self, v: i32) -> Self { | ||||||
|  |         self.vcores = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn cpu_oversubscription(mut self, v: i32) -> Self { | ||||||
|  |         self.cpu_oversubscription = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn tags(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.tags = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl StorageSliceProvisioned { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self::default() | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn node_id(mut self, v: u32) -> Self { | ||||||
|  |         self.node_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn id(mut self, v: u16) -> Self { | ||||||
|  |         self.id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn storage_size_gb(mut self, v: i32) -> Self { | ||||||
|  |         self.storage_size_gb = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn tags(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.tags = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
| @@ -1,16 +1,18 @@ | |||||||
|  | pub mod bid; | ||||||
|  | pub mod common; | ||||||
|  | pub mod contract; | ||||||
| pub mod node; | pub mod node; | ||||||
|  | pub mod nodegroup; | ||||||
|  | pub mod reputation; | ||||||
|  | pub mod reservation; | ||||||
|  |  | ||||||
|  | pub use bid::{Bid, BidStatus, BillingPeriod}; | ||||||
|  | pub use common::{PricingPolicy, SLAPolicy}; | ||||||
|  | pub use contract::{Contract, ContractStatus, ComputeSliceProvisioned, StorageSliceProvisioned}; | ||||||
| pub use node::{ | pub use node::{ | ||||||
|     Node, |     CPUDevice, ComputeSlice, DeviceInfo, GPUDevice, MemoryDevice, NetworkDevice, Node, | ||||||
|     DeviceInfo, |     NodeCapacity, StorageDevice, StorageSlice, | ||||||
|     StorageDevice, | }; | ||||||
|     MemoryDevice, | pub use nodegroup::NodeGroup; | ||||||
|     CPUDevice, | pub use reputation::{NodeGroupReputation, NodeReputation}; | ||||||
|     GPUDevice, | pub use reservation::{Reservation, ReservationStatus}; | ||||||
|     NetworkDevice, |  | ||||||
|     NodeCapacity, |  | ||||||
|     ComputeSlice, |  | ||||||
|     StorageSlice, |  | ||||||
|     PricingPolicy, |  | ||||||
|     SLAPolicy, |  | ||||||
| }; |  | ||||||
|   | |||||||
| @@ -1,7 +1,8 @@ | |||||||
| use heromodels_core::BaseModelData; | use heromodels_core::BaseModelData; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use rhai::CustomType; | use rhai::{CustomType, TypeBuilder}; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  | use super::common::{PricingPolicy, SLAPolicy}; | ||||||
|  |  | ||||||
| /// Storage device information | /// Storage device information | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
| @@ -94,57 +95,26 @@ pub struct NodeCapacity { | |||||||
|     pub vcores: i32, |     pub vcores: i32, | ||||||
| } | } | ||||||
|  |  | ||||||
| /// Pricing policy for slices (minimal version until full spec available) | // PricingPolicy and SLAPolicy moved to `common.rs` to be shared across models. | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] |  | ||||||
| pub struct PricingPolicy { |  | ||||||
|     /// Human friendly policy name (e.g. "fixed", "market") |  | ||||||
|     pub name: String, |  | ||||||
|     /// Optional free-form details as JSON-encoded string |  | ||||||
|     pub details: Option<String>, |  | ||||||
| } |  | ||||||
|  |  | ||||||
| /// SLA policy for slices (minimal version until full spec available) |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] |  | ||||||
| pub struct SLAPolicy { |  | ||||||
|     /// Uptime in percentage (0..100) |  | ||||||
|     pub uptime: f32, |  | ||||||
|     /// Max response time in ms |  | ||||||
|     pub max_response_time_ms: u32, |  | ||||||
| } |  | ||||||
|  |  | ||||||
| /// Compute slice (typically represents a base unit of compute) | /// Compute slice (typically represents a base unit of compute) | ||||||
| #[model] |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
| pub struct ComputeSlice { | pub struct ComputeSlice { | ||||||
|     pub base_data: BaseModelData, |  | ||||||
|     /// the node in the grid, there is an object describing the node |  | ||||||
|     #[index] |  | ||||||
|     pub nodeid: u32, |  | ||||||
|     /// the id of the slice in the node |     /// the id of the slice in the node | ||||||
|     #[index] |     pub id: u16, | ||||||
|     pub id: i32, |  | ||||||
|     pub mem_gb: f64, |     pub mem_gb: f64, | ||||||
|     pub storage_gb: f64, |     pub storage_gb: f64, | ||||||
|     pub passmark: i32, |     pub passmark: i32, | ||||||
|     pub vcores: i32, |     pub vcores: i32, | ||||||
|     pub cpu_oversubscription: i32, |     pub cpu_oversubscription: i32, | ||||||
|     pub storage_oversubscription: i32, |     pub storage_oversubscription: i32, | ||||||
|     /// Min/max allowed price range for validation |  | ||||||
|     #[serde(default)] |  | ||||||
|     pub price_range: Vec<f64>, |  | ||||||
|     /// nr of GPU's see node to know what GPU's are |     /// nr of GPU's see node to know what GPU's are | ||||||
|     pub gpus: u8, |     pub gpus: u8, | ||||||
|     /// price per slice (even if the grouped one) |  | ||||||
|     pub price_cc: f64, |  | ||||||
|     pub pricing_policy: PricingPolicy, |  | ||||||
|     pub sla_policy: SLAPolicy, |  | ||||||
| } | } | ||||||
|  |  | ||||||
| impl ComputeSlice { | impl ComputeSlice { | ||||||
|     pub fn new() -> Self { |     pub fn new() -> Self { | ||||||
|         Self { |         Self { | ||||||
|             base_data: BaseModelData::new(), |  | ||||||
|             nodeid: 0, |  | ||||||
|             id: 0, |             id: 0, | ||||||
|             mem_gb: 0.0, |             mem_gb: 0.0, | ||||||
|             storage_gb: 0.0, |             storage_gb: 0.0, | ||||||
| @@ -152,63 +122,62 @@ impl ComputeSlice { | |||||||
|             vcores: 0, |             vcores: 0, | ||||||
|             cpu_oversubscription: 0, |             cpu_oversubscription: 0, | ||||||
|             storage_oversubscription: 0, |             storage_oversubscription: 0, | ||||||
|             price_range: vec![0.0, 0.0], |  | ||||||
|             gpus: 0, |             gpus: 0, | ||||||
|             price_cc: 0.0, |  | ||||||
|             pricing_policy: PricingPolicy::default(), |  | ||||||
|             sla_policy: SLAPolicy::default(), |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self } |     pub fn id(mut self, id: u16) -> Self { | ||||||
|     pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self } |         self.id = id; | ||||||
|     pub fn mem_gb(mut self, v: f64) -> Self { self.mem_gb = v; self } |         self | ||||||
|     pub fn storage_gb(mut self, v: f64) -> Self { self.storage_gb = v; self } |     } | ||||||
|     pub fn passmark(mut self, v: i32) -> Self { self.passmark = v; self } |     pub fn mem_gb(mut self, v: f64) -> Self { | ||||||
|     pub fn vcores(mut self, v: i32) -> Self { self.vcores = v; self } |         self.mem_gb = v; | ||||||
|     pub fn cpu_oversubscription(mut self, v: i32) -> Self { self.cpu_oversubscription = v; self } |         self | ||||||
|     pub fn storage_oversubscription(mut self, v: i32) -> Self { self.storage_oversubscription = v; self } |     } | ||||||
|     pub fn price_range(mut self, min_max: Vec<f64>) -> Self { self.price_range = min_max; self } |     pub fn storage_gb(mut self, v: f64) -> Self { | ||||||
|     pub fn gpus(mut self, v: u8) -> Self { self.gpus = v; self } |         self.storage_gb = v; | ||||||
|     pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self } |         self | ||||||
|     pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self } |     } | ||||||
|     pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self } |     pub fn passmark(mut self, v: i32) -> Self { | ||||||
|  |         self.passmark = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn vcores(mut self, v: i32) -> Self { | ||||||
|  |         self.vcores = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn cpu_oversubscription(mut self, v: i32) -> Self { | ||||||
|  |         self.cpu_oversubscription = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn storage_oversubscription(mut self, v: i32) -> Self { | ||||||
|  |         self.storage_oversubscription = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn gpus(mut self, v: u8) -> Self { | ||||||
|  |         self.gpus = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
| } | } | ||||||
|  |  | ||||||
| /// Storage slice (typically 1GB of storage) | /// Storage slice (typically 1GB of storage) | ||||||
| #[model] |  | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
| pub struct StorageSlice { | pub struct StorageSlice { | ||||||
|     pub base_data: BaseModelData, |  | ||||||
|     /// the node in the grid |  | ||||||
|     #[index] |  | ||||||
|     pub nodeid: u32, |  | ||||||
|     /// the id of the slice in the node, are tracked in the node itself |     /// the id of the slice in the node, are tracked in the node itself | ||||||
|     #[index] |     pub id: u16, | ||||||
|     pub id: i32, |  | ||||||
|     /// price per slice (even if the grouped one) |  | ||||||
|     pub price_cc: f64, |  | ||||||
|     pub pricing_policy: PricingPolicy, |  | ||||||
|     pub sla_policy: SLAPolicy, |  | ||||||
| } | } | ||||||
|  |  | ||||||
| impl StorageSlice { | impl StorageSlice { | ||||||
|     pub fn new() -> Self { |     pub fn new() -> Self { | ||||||
|         Self { |         Self { | ||||||
|             base_data: BaseModelData::new(), |  | ||||||
|             nodeid: 0, |  | ||||||
|             id: 0, |             id: 0, | ||||||
|             price_cc: 0.0, |  | ||||||
|             pricing_policy: PricingPolicy::default(), |  | ||||||
|             sla_policy: SLAPolicy::default(), |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn nodeid(mut self, nodeid: u32) -> Self { self.nodeid = nodeid; self } |     pub fn id(mut self, id: u16) -> Self { | ||||||
|     pub fn slice_id(mut self, id: i32) -> Self { self.id = id; self } |         self.id = id; | ||||||
|     pub fn price_cc(mut self, v: f64) -> Self { self.price_cc = v; self } |         self | ||||||
|     pub fn pricing_policy(mut self, p: PricingPolicy) -> Self { self.pricing_policy = p; self } |     } | ||||||
|     pub fn sla_policy(mut self, p: SLAPolicy) -> Self { self.sla_policy = p; self } |  | ||||||
| } | } | ||||||
|  |  | ||||||
| /// Grid4 Node model | /// Grid4 Node model | ||||||
| @@ -224,13 +193,20 @@ pub struct Node { | |||||||
|     pub computeslices: Vec<ComputeSlice>, |     pub computeslices: Vec<ComputeSlice>, | ||||||
|     pub storageslices: Vec<StorageSlice>, |     pub storageslices: Vec<StorageSlice>, | ||||||
|     pub devices: DeviceInfo, |     pub devices: DeviceInfo, | ||||||
|     /// 2 letter code |     /// 2 letter code as specified in lib/data/countries/data/countryInfo.txt | ||||||
|     #[index] |     #[index] | ||||||
|     pub country: String, |     pub country: String, | ||||||
|     /// Hardware capacity details |     /// Hardware capacity details | ||||||
|     pub capacity: NodeCapacity, |     pub capacity: NodeCapacity, | ||||||
|     /// lets keep it simple and compatible |     /// first time node was active | ||||||
|     pub provisiontime: u32, |     pub birthtime: u32, | ||||||
|  |     /// node public key | ||||||
|  |     #[index] | ||||||
|  |     pub pubkey: String, | ||||||
|  |     /// signature done on node to validate pubkey with privkey | ||||||
|  |     pub signature_node: String, | ||||||
|  |     /// signature as done by farmers to validate their identity | ||||||
|  |     pub signature_farmer: String, | ||||||
| } | } | ||||||
|  |  | ||||||
| impl Node { | impl Node { | ||||||
| @@ -244,21 +220,61 @@ impl Node { | |||||||
|             devices: DeviceInfo::default(), |             devices: DeviceInfo::default(), | ||||||
|             country: String::new(), |             country: String::new(), | ||||||
|             capacity: NodeCapacity::default(), |             capacity: NodeCapacity::default(), | ||||||
|             provisiontime: 0, |             birthtime: 0, | ||||||
|  |             pubkey: String::new(), | ||||||
|  |             signature_node: String::new(), | ||||||
|  |             signature_farmer: String::new(), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn nodegroupid(mut self, v: i32) -> Self { self.nodegroupid = v; self } |     pub fn nodegroupid(mut self, v: i32) -> Self { | ||||||
|     pub fn uptime(mut self, v: i32) -> Self { self.uptime = v; self } |         self.nodegroupid = v; | ||||||
|     pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { self.computeslices.push(s); self } |         self | ||||||
|     pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { self.storageslices.push(s); self } |     } | ||||||
|     pub fn devices(mut self, d: DeviceInfo) -> Self { self.devices = d; self } |     pub fn uptime(mut self, v: i32) -> Self { | ||||||
|     pub fn country(mut self, c: impl ToString) -> Self { self.country = c.to_string(); self } |         self.uptime = v; | ||||||
|     pub fn capacity(mut self, c: NodeCapacity) -> Self { self.capacity = c; self } |         self | ||||||
|     pub fn provisiontime(mut self, t: u32) -> Self { self.provisiontime = t; self } |     } | ||||||
|  |     pub fn add_compute_slice(mut self, s: ComputeSlice) -> Self { | ||||||
|  |         self.computeslices.push(s); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn add_storage_slice(mut self, s: StorageSlice) -> Self { | ||||||
|  |         self.storageslices.push(s); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn devices(mut self, d: DeviceInfo) -> Self { | ||||||
|  |         self.devices = d; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn country(mut self, c: impl ToString) -> Self { | ||||||
|  |         self.country = c.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn capacity(mut self, c: NodeCapacity) -> Self { | ||||||
|  |         self.capacity = c; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn birthtime(mut self, t: u32) -> Self { | ||||||
|  |         self.birthtime = t; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn pubkey(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.pubkey = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn signature_node(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.signature_node = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |     pub fn signature_farmer(mut self, v: impl ToString) -> Self { | ||||||
|  |         self.signature_farmer = v.to_string(); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |  | ||||||
|     /// Placeholder for capacity recalculation out of the devices on the Node |     /// Placeholder for capacity recalculation out of the devices on the Node | ||||||
|     pub fn recalc_capacity(mut self) -> Self { |     pub fn check(self) -> Self { | ||||||
|         // TODO: calculate NodeCapacity out of the devices on the Node |         // TODO: calculate NodeCapacity out of the devices on the Node | ||||||
|         self |         self | ||||||
|     } |     } | ||||||
|   | |||||||
							
								
								
									
										52
									
								
								heromodels/src/models/grid4/nodegroup.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								heromodels/src/models/grid4/nodegroup.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | |||||||
|  | use heromodels_core::BaseModelData; | ||||||
|  | use heromodels_derive::model; | ||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
|  | use super::common::{PricingPolicy, SLAPolicy}; | ||||||
|  |  | ||||||
|  | /// Grid4 NodeGroup model (root object for farmer configuration) | ||||||
|  | #[model] | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct NodeGroup { | ||||||
|  |     pub base_data: BaseModelData, | ||||||
|  |     /// link back to farmer who owns the nodegroup, is a user? | ||||||
|  |     #[index] | ||||||
|  |     pub farmerid: u32, | ||||||
|  |     /// only visible by farmer, in future encrypted, used to boot a node | ||||||
|  |     pub secret: String, | ||||||
|  |     pub description: String, | ||||||
|  |     pub slapolicy: SLAPolicy, | ||||||
|  |     pub pricingpolicy: PricingPolicy, | ||||||
|  |     /// pricing in CC - cloud credit, per 2GB node slice | ||||||
|  |     pub compute_slice_normalized_pricing_cc: f64, | ||||||
|  |     /// pricing in CC - cloud credit, per 1GB storage slice | ||||||
|  |     pub storage_slice_normalized_pricing_cc: f64, | ||||||
|  |     /// signature as done by farmers to validate that they created this group | ||||||
|  |     pub signature_farmer: String, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl NodeGroup { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             base_data: BaseModelData::new(), | ||||||
|  |             farmerid: 0, | ||||||
|  |             secret: String::new(), | ||||||
|  |             description: String::new(), | ||||||
|  |             slapolicy: SLAPolicy::default(), | ||||||
|  |             pricingpolicy: PricingPolicy::new(), | ||||||
|  |             compute_slice_normalized_pricing_cc: 0.0, | ||||||
|  |             storage_slice_normalized_pricing_cc: 0.0, | ||||||
|  |             signature_farmer: String::new(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn farmerid(mut self, v: u32) -> Self { self.farmerid = v; self } | ||||||
|  |     pub fn secret(mut self, v: impl ToString) -> Self { self.secret = v.to_string(); self } | ||||||
|  |     pub fn description(mut self, v: impl ToString) -> Self { self.description = v.to_string(); self } | ||||||
|  |     pub fn slapolicy(mut self, v: SLAPolicy) -> Self { self.slapolicy = v; self } | ||||||
|  |     pub fn pricingpolicy(mut self, v: PricingPolicy) -> Self { self.pricingpolicy = v; self } | ||||||
|  |     pub fn compute_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.compute_slice_normalized_pricing_cc = v; self } | ||||||
|  |     pub fn storage_slice_normalized_pricing_cc(mut self, v: f64) -> Self { self.storage_slice_normalized_pricing_cc = v; self } | ||||||
|  |     pub fn signature_farmer(mut self, v: impl ToString) -> Self { self.signature_farmer = v.to_string(); self } | ||||||
|  | } | ||||||
							
								
								
									
										85
									
								
								heromodels/src/models/grid4/reputation.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								heromodels/src/models/grid4/reputation.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | |||||||
|  | use heromodels_core::BaseModelData; | ||||||
|  | use heromodels_derive::model; | ||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
|  | /// Node reputation information | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct NodeReputation { | ||||||
|  |     pub node_id: u32, | ||||||
|  |     /// between 0 and 100, earned over time | ||||||
|  |     pub reputation: i32, | ||||||
|  |     /// between 0 and 100, set by system, farmer has no ability to set this | ||||||
|  |     pub uptime: i32, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// NodeGroup reputation model | ||||||
|  | #[model] | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct NodeGroupReputation { | ||||||
|  |     pub base_data: BaseModelData, | ||||||
|  |     #[index] | ||||||
|  |     pub nodegroup_id: u32, | ||||||
|  |     /// between 0 and 100, earned over time | ||||||
|  |     pub reputation: i32, | ||||||
|  |     /// between 0 and 100, set by system, farmer has no ability to set this | ||||||
|  |     pub uptime: i32, | ||||||
|  |     pub nodes: Vec<NodeReputation>, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl NodeGroupReputation { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             base_data: BaseModelData::new(), | ||||||
|  |             nodegroup_id: 0, | ||||||
|  |             reputation: 50, // default as per spec | ||||||
|  |             uptime: 0, | ||||||
|  |             nodes: Vec::new(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn nodegroup_id(mut self, v: u32) -> Self { | ||||||
|  |         self.nodegroup_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn reputation(mut self, v: i32) -> Self { | ||||||
|  |         self.reputation = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn uptime(mut self, v: i32) -> Self { | ||||||
|  |         self.uptime = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn add_node_reputation(mut self, node_rep: NodeReputation) -> Self { | ||||||
|  |         self.nodes.push(node_rep); | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl NodeReputation { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             node_id: 0, | ||||||
|  |             reputation: 50, // default as per spec | ||||||
|  |             uptime: 0, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn node_id(mut self, v: u32) -> Self { | ||||||
|  |         self.node_id = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn reputation(mut self, v: i32) -> Self { | ||||||
|  |         self.reputation = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     pub fn uptime(mut self, v: i32) -> Self { | ||||||
|  |         self.uptime = v; | ||||||
|  |         self | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										58
									
								
								heromodels/src/models/grid4/reservation.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								heromodels/src/models/grid4/reservation.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | |||||||
|  | use heromodels_core::BaseModelData; | ||||||
|  | use heromodels_derive::model; | ||||||
|  | use rhai::{CustomType, TypeBuilder}; | ||||||
|  | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
|  | /// Reservation status as per V spec | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
|  | pub enum ReservationStatus { | ||||||
|  |     #[default] | ||||||
|  |     Pending, | ||||||
|  |     Confirmed, | ||||||
|  |     Assigned, | ||||||
|  |     Cancelled, | ||||||
|  |     Done, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | /// Grid4 Reservation model | ||||||
|  | #[model] | ||||||
|  | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, CustomType)] | ||||||
|  | pub struct Reservation { | ||||||
|  |     pub base_data: BaseModelData, | ||||||
|  |     /// links back to customer for this capacity | ||||||
|  |     #[index] | ||||||
|  |     pub customer_id: u32, | ||||||
|  |     pub compute_slices: Vec<u32>, | ||||||
|  |     pub storage_slices: Vec<u32>, | ||||||
|  |     pub status: ReservationStatus, | ||||||
|  |     /// if obligation then will be charged and money needs to be in escrow, otherwise its an intent | ||||||
|  |     pub obligation: bool, | ||||||
|  |     /// epoch | ||||||
|  |     pub start_date: u32, | ||||||
|  |     pub end_date: u32, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | impl Reservation { | ||||||
|  |     pub fn new() -> Self { | ||||||
|  |         Self { | ||||||
|  |             base_data: BaseModelData::new(), | ||||||
|  |             customer_id: 0, | ||||||
|  |             compute_slices: Vec::new(), | ||||||
|  |             storage_slices: Vec::new(), | ||||||
|  |             status: ReservationStatus::Pending, | ||||||
|  |             obligation: false, | ||||||
|  |             start_date: 0, | ||||||
|  |             end_date: 0, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     pub fn customer_id(mut self, v: u32) -> Self { self.customer_id = v; self } | ||||||
|  |     pub fn add_compute_slice(mut self, id: u32) -> Self { self.compute_slices.push(id); self } | ||||||
|  |     pub fn compute_slices(mut self, v: Vec<u32>) -> Self { self.compute_slices = v; self } | ||||||
|  |     pub fn add_storage_slice(mut self, id: u32) -> Self { self.storage_slices.push(id); self } | ||||||
|  |     pub fn storage_slices(mut self, v: Vec<u32>) -> Self { self.storage_slices = v; self } | ||||||
|  |     pub fn status(mut self, v: ReservationStatus) -> Self { self.status = v; self } | ||||||
|  |     pub fn obligation(mut self, v: bool) -> Self { self.obligation = v; self } | ||||||
|  |     pub fn start_date(mut self, v: u32) -> Self { self.start_date = v; self } | ||||||
|  |     pub fn end_date(mut self, v: u32) -> Self { self.end_date = v; self } | ||||||
|  | } | ||||||
							
								
								
									
										194
									
								
								heromodels/src/models/grid4/specs/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										194
									
								
								heromodels/src/models/grid4/specs/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,194 @@ | |||||||
|  |  | ||||||
|  | # Grid4 Data Model | ||||||
|  |  | ||||||
|  | This module defines data models for nodes, groups, and slices in a cloud/grid infrastructure. Each root object is marked with `@[heap]` and can be indexed for efficient querying. | ||||||
|  |  | ||||||
|  | ## Root Objects Overview | ||||||
|  |  | ||||||
|  | | Object      | Description                                   | Index Fields                   | | ||||||
|  | | ----------- | --------------------------------------------- | ------------------------------ | | ||||||
|  | | `Node`      | Represents a single node in the grid          | `id`, `nodegroupid`, `country` | | ||||||
|  | | `NodeGroup` | Represents a group of nodes owned by a farmer | `id`, `farmerid`               | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## Node | ||||||
|  |  | ||||||
|  | Represents a single node in the grid with slices, devices, and capacity. | ||||||
|  |  | ||||||
|  | | Field           | Type             | Description                                  | Indexed | | ||||||
|  | | --------------- | ---------------- | -------------------------------------------- | ------- | | ||||||
|  | | `id`            | `int`            | Unique node ID                               | ✅       | | ||||||
|  | | `nodegroupid`   | `int`            | ID of the owning node group                  | ✅       | | ||||||
|  | | `uptime`        | `int`            | Uptime percentage (0-100)                    | ✅       | | ||||||
|  | | `computeslices` | `[]ComputeSlice` | List of compute slices                       | ❌       | | ||||||
|  | | `storageslices` | `[]StorageSlice` | List of storage slices                       | ❌       | | ||||||
|  | | `devices`       | `DeviceInfo`     | Hardware device info (storage, memory, etc.) | ❌       | | ||||||
|  | | `country`       | `string`         | 2-letter country code                        | ✅       | | ||||||
|  | | `capacity`      | `NodeCapacity`   | Aggregated hardware capacity                 | ❌       | | ||||||
|  | | `provisiontime` | `u32`            | Provisioning time (simple/compatible format) | ✅       | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## NodeGroup | ||||||
|  |  | ||||||
|  | Represents a group of nodes owned by a farmer, with policies. | ||||||
|  |  | ||||||
|  | | Field                                 | Type            | Description                                    | Indexed | | ||||||
|  | | ------------------------------------- | --------------- | ---------------------------------------------- | ------- | | ||||||
|  | | `id`                                  | `u32`           | Unique group ID                                | ✅       | | ||||||
|  | | `farmerid`                            | `u32`           | Farmer/user ID                                 | ✅       | | ||||||
|  | | `secret`                              | `string`        | Encrypted secret for booting nodes             | ❌       | | ||||||
|  | | `description`                         | `string`        | Group description                              | ❌       | | ||||||
|  | | `slapolicy`                           | `SLAPolicy`     | SLA policy details                             | ❌       | | ||||||
|  | | `pricingpolicy`                       | `PricingPolicy` | Pricing policy details                         | ❌       | | ||||||
|  | | `compute_slice_normalized_pricing_cc` | `f64`           | Pricing per 2GB compute slice in cloud credits | ❌       | | ||||||
|  | | `storage_slice_normalized_pricing_cc` | `f64`           | Pricing per 1GB storage slice in cloud credits | ❌       | | ||||||
|  | | `reputation`                          | `int`           | Reputation (0-100)                             | ✅       | | ||||||
|  | | `uptime`                              | `int`           | Uptime (0-100)                                 | ✅       | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## ComputeSlice | ||||||
|  |  | ||||||
|  | Represents a compute slice (e.g., 1GB memory unit). | ||||||
|  |  | ||||||
|  | | Field                      | Type            | Description                      | | ||||||
|  | | -------------------------- | --------------- | -------------------------------- | | ||||||
|  | | `nodeid`                   | `u32`           | Owning node ID                   | | ||||||
|  | | `id`                       | `int`           | Slice ID in node                 | | ||||||
|  | | `mem_gb`                   | `f64`           | Memory in GB                     | | ||||||
|  | | `storage_gb`               | `f64`           | Storage in GB                    | | ||||||
|  | | `passmark`                 | `int`           | Passmark score                   | | ||||||
|  | | `vcores`                   | `int`           | Virtual cores                    | | ||||||
|  | | `cpu_oversubscription`     | `int`           | CPU oversubscription ratio       | | ||||||
|  | | `storage_oversubscription` | `int`           | Storage oversubscription ratio   | | ||||||
|  | | `price_range`              | `[]f64`         | Price range [min, max]           | | ||||||
|  | | `gpus`                     | `u8`            | Number of GPUs                   | | ||||||
|  | | `price_cc`                 | `f64`           | Price per slice in cloud credits | | ||||||
|  | | `pricing_policy`           | `PricingPolicy` | Pricing policy                   | | ||||||
|  | | `sla_policy`               | `SLAPolicy`     | SLA policy                       | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## StorageSlice | ||||||
|  |  | ||||||
|  | Represents a 1GB storage slice. | ||||||
|  |  | ||||||
|  | | Field            | Type            | Description                      | | ||||||
|  | | ---------------- | --------------- | -------------------------------- | | ||||||
|  | | `nodeid`         | `u32`           | Owning node ID                   | | ||||||
|  | | `id`             | `int`           | Slice ID in node                 | | ||||||
|  | | `price_cc`       | `f64`           | Price per slice in cloud credits | | ||||||
|  | | `pricing_policy` | `PricingPolicy` | Pricing policy                   | | ||||||
|  | | `sla_policy`     | `SLAPolicy`     | SLA policy                       | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## DeviceInfo | ||||||
|  |  | ||||||
|  | Hardware device information for a node. | ||||||
|  |  | ||||||
|  | | Field     | Type              | Description             | | ||||||
|  | | --------- | ----------------- | ----------------------- | | ||||||
|  | | `vendor`  | `string`          | Vendor of the node      | | ||||||
|  | | `storage` | `[]StorageDevice` | List of storage devices | | ||||||
|  | | `memory`  | `[]MemoryDevice`  | List of memory devices  | | ||||||
|  | | `cpu`     | `[]CPUDevice`     | List of CPU devices     | | ||||||
|  | | `gpu`     | `[]GPUDevice`     | List of GPU devices     | | ||||||
|  | | `network` | `[]NetworkDevice` | List of network devices | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## StorageDevice | ||||||
|  |  | ||||||
|  | | Field         | Type     | Description           | | ||||||
|  | | ------------- | -------- | --------------------- | | ||||||
|  | | `id`          | `string` | Unique ID for device  | | ||||||
|  | | `size_gb`     | `f64`    | Size in GB            | | ||||||
|  | | `description` | `string` | Description of device | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## MemoryDevice | ||||||
|  |  | ||||||
|  | | Field         | Type     | Description           | | ||||||
|  | | ------------- | -------- | --------------------- | | ||||||
|  | | `id`          | `string` | Unique ID for device  | | ||||||
|  | | `size_gb`     | `f64`    | Size in GB            | | ||||||
|  | | `description` | `string` | Description of device | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## CPUDevice | ||||||
|  |  | ||||||
|  | | Field         | Type     | Description              | | ||||||
|  | | ------------- | -------- | ------------------------ | | ||||||
|  | | `id`          | `string` | Unique ID for device     | | ||||||
|  | | `cores`       | `int`    | Number of CPU cores      | | ||||||
|  | | `passmark`    | `int`    | Passmark benchmark score | | ||||||
|  | | `description` | `string` | Description of device    | | ||||||
|  | | `cpu_brand`   | `string` | Brand of the CPU         | | ||||||
|  | | `cpu_version` | `string` | Version of the CPU       | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## GPUDevice | ||||||
|  |  | ||||||
|  | | Field         | Type     | Description           | | ||||||
|  | | ------------- | -------- | --------------------- | | ||||||
|  | | `id`          | `string` | Unique ID for device  | | ||||||
|  | | `cores`       | `int`    | Number of GPU cores   | | ||||||
|  | | `memory_gb`   | `f64`    | GPU memory in GB      | | ||||||
|  | | `description` | `string` | Description of device | | ||||||
|  | | `gpu_brand`   | `string` | Brand of the GPU      | | ||||||
|  | | `gpu_version` | `string` | Version of the GPU    | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## NetworkDevice | ||||||
|  |  | ||||||
|  | | Field         | Type     | Description           | | ||||||
|  | | ------------- | -------- | --------------------- | | ||||||
|  | | `id`          | `string` | Unique ID for device  | | ||||||
|  | | `speed_mbps`  | `int`    | Network speed in Mbps | | ||||||
|  | | `description` | `string` | Description of device | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## NodeCapacity | ||||||
|  |  | ||||||
|  | Aggregated hardware capacity for a node. | ||||||
|  |  | ||||||
|  | | Field        | Type  | Description            | | ||||||
|  | | ------------ | ----- | ---------------------- | | ||||||
|  | | `storage_gb` | `f64` | Total storage in GB    | | ||||||
|  | | `mem_gb`     | `f64` | Total memory in GB     | | ||||||
|  | | `mem_gb_gpu` | `f64` | Total GPU memory in GB | | ||||||
|  | | `passmark`   | `int` | Total passmark score   | | ||||||
|  | | `vcores`     | `int` | Total virtual cores    | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## SLAPolicy | ||||||
|  |  | ||||||
|  | Service Level Agreement policy for slices or node groups. | ||||||
|  |  | ||||||
|  | | Field                | Type  | Description                             | | ||||||
|  | | -------------------- | ----- | --------------------------------------- | | ||||||
|  | | `sla_uptime`         | `int` | Required uptime % (e.g., 90)            | | ||||||
|  | | `sla_bandwidth_mbit` | `int` | Guaranteed bandwidth in Mbps (0 = none) | | ||||||
|  | | `sla_penalty`        | `int` | Penalty % if SLA is breached (0-100)    | | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## PricingPolicy | ||||||
|  |  | ||||||
|  | Pricing policy for slices or node groups. | ||||||
|  |  | ||||||
|  | | Field                        | Type    | Description                                               | | ||||||
|  | | ---------------------------- | ------- | --------------------------------------------------------- | | ||||||
|  | | `marketplace_year_discounts` | `[]int` | Discounts for 1Y, 2Y, 3Y prepaid usage (e.g. [30,40,50])  | | ||||||
|  | | `volume_discounts`           | `[]int` | Volume discounts based on purchase size (e.g. [10,20,30]) | | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										37
									
								
								heromodels/src/models/grid4/specs/model_bid.v
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								heromodels/src/models/grid4/specs/model_bid.v
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | |||||||
|  | module datamodel | ||||||
|  |  | ||||||
|  | // I can bid for infra, and optionally get accepted | ||||||
|  | @[heap] | ||||||
|  | pub struct Bid { | ||||||
|  | pub mut: | ||||||
|  | 	id                u32 | ||||||
|  | 	customer_id       u32 // links back to customer for this capacity (user on ledger) | ||||||
|  | 	compute_slices_nr int // nr of slices I need in 1 machine | ||||||
|  | 	compute_slice_price     f64 // price per 1 GB slice I want to accept | ||||||
|  | 	storage_slices_nr int | ||||||
|  | 	storage_slice_price     f64 // price per 1 GB storage slice I want to accept | ||||||
|  | 	storage_slices_nr int | ||||||
|  | 	status            BidStatus | ||||||
|  | 	obligation        bool // if obligation then will be charged and money needs to be in escrow, otherwise its an intent | ||||||
|  | 	start_date        u32  // epoch | ||||||
|  | 	end_date          u32 | ||||||
|  | 	signature_user    string // signature as done by a user/consumer to validate their identity and intent | ||||||
|  | 	billing_period    BillingPeriod | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub enum BidStatus { | ||||||
|  | 	pending | ||||||
|  | 	confirmed | ||||||
|  | 	assigned | ||||||
|  | 	cancelled | ||||||
|  | 	done | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | pub enum BillingPeriod { | ||||||
|  | 	hourly | ||||||
|  | 	monthly | ||||||
|  | 	yearly | ||||||
|  | 	biannually | ||||||
|  | 	triannually  | ||||||
|  | } | ||||||
							
								
								
									
										52
									
								
								heromodels/src/models/grid4/specs/model_contract.v
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								heromodels/src/models/grid4/specs/model_contract.v
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | |||||||
|  | module datamodel | ||||||
|  |  | ||||||
|  | // I can bid for infra, and optionally get accepted | ||||||
|  | @[heap] | ||||||
|  | pub struct Contract { | ||||||
|  | pub mut: | ||||||
|  | 	id                u32 | ||||||
|  | 	customer_id       u32 // links back to customer for this capacity (user on ledger) | ||||||
|  | 	compute_slices     []ComputeSliceProvisioned | ||||||
|  | 	storage_slices     []StorageSliceProvisioned | ||||||
|  | 	compute_slice_price     f64 // price per 1 GB agreed upon | ||||||
|  | 	storage_slice_price     f64 // price per 1 GB agreed upon | ||||||
|  | 	network_slice_price     f64 // price per 1 GB agreed upon (transfer) | ||||||
|  | 	status            ContractStatus | ||||||
|  | 	start_date        u32  // epoch | ||||||
|  | 	end_date          u32 | ||||||
|  | 	signature_user    string // signature as done by a user/consumer to validate their identity and intent | ||||||
|  | 	signature_hoster  string // signature as done by the hoster | ||||||
|  | 	billing_period    BillingPeriod | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub enum ConctractStatus { | ||||||
|  | 	active | ||||||
|  | 	cancelled | ||||||
|  | 	error | ||||||
|  | 	paused | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | // typically 1GB of memory, but can be adjusted based based on size of machine | ||||||
|  | pub struct ComputeSliceProvisioned { | ||||||
|  | pub mut: | ||||||
|  | 	node_id					 u32 | ||||||
|  | 	id                       u16 // the id of the slice in the node | ||||||
|  | 	mem_gb                   f64 | ||||||
|  | 	storage_gb               f64 | ||||||
|  | 	passmark                 int | ||||||
|  | 	vcores                   int | ||||||
|  | 	cpu_oversubscription     int | ||||||
|  | 	tags string | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // 1GB of storage | ||||||
|  | pub struct StorageSliceProvisioned { | ||||||
|  | pub mut: | ||||||
|  | 	node_id		   u32 | ||||||
|  | 	id             u16 // the id of the slice in the node, are tracked in the node itself | ||||||
|  | 	storage_size_gb int | ||||||
|  | 	tags string | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										104
									
								
								heromodels/src/models/grid4/specs/model_node.v
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								heromodels/src/models/grid4/specs/model_node.v
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,104 @@ | |||||||
|  | module datamodel | ||||||
|  |  | ||||||
|  | //ACCESS ONLY TF | ||||||
|  |  | ||||||
|  | @[heap] | ||||||
|  | pub struct Node { | ||||||
|  | pub mut: | ||||||
|  | 	id               int | ||||||
|  | 	nodegroupid      int | ||||||
|  | 	uptime           int // 0..100 | ||||||
|  | 	computeslices    []ComputeSlice | ||||||
|  | 	storageslices    []StorageSlice | ||||||
|  | 	devices          DeviceInfo | ||||||
|  | 	country          string       // 2 letter code as specified in lib/data/countries/data/countryInfo.txt, use that library for validation | ||||||
|  | 	capacity         NodeCapacity // Hardware capacity details | ||||||
|  | 	birthtime    	 u32          // first time node was active | ||||||
|  | 	pubkey           string | ||||||
|  | 	signature_node   string // signature done on node to validate pubkey with privkey | ||||||
|  | 	signature_farmer string // signature as done by farmers to validate their identity | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct DeviceInfo { | ||||||
|  | pub mut: | ||||||
|  | 	vendor  string | ||||||
|  | 	storage []StorageDevice | ||||||
|  | 	memory  []MemoryDevice | ||||||
|  | 	cpu     []CPUDevice | ||||||
|  | 	gpu     []GPUDevice | ||||||
|  | 	network []NetworkDevice | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct StorageDevice { | ||||||
|  | pub mut: | ||||||
|  | 	id          string // can be used in node | ||||||
|  | 	size_gb     f64    // Size of the storage device in gigabytes | ||||||
|  | 	description string // Description of the storage device | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct MemoryDevice { | ||||||
|  | pub mut: | ||||||
|  | 	id          string // can be used in node | ||||||
|  | 	size_gb     f64    // Size of the memory device in gigabytes | ||||||
|  | 	description string // Description of the memory device | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct CPUDevice { | ||||||
|  | pub mut: | ||||||
|  | 	id          string // can be used in node | ||||||
|  | 	cores       int    // Number of CPU cores | ||||||
|  | 	passmark    int | ||||||
|  | 	description string // Description of the CPU | ||||||
|  | 	cpu_brand   string // Brand of the CPU | ||||||
|  | 	cpu_version string // Version of the CPU | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct GPUDevice { | ||||||
|  | pub mut: | ||||||
|  | 	id          string // can be used in node | ||||||
|  | 	cores       int    // Number of GPU cores | ||||||
|  | 	memory_gb   f64    // Size of the GPU memory in gigabytes | ||||||
|  | 	description string // Description of the GPU | ||||||
|  | 	gpu_brand   string | ||||||
|  | 	gpu_version string | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct NetworkDevice { | ||||||
|  | pub mut: | ||||||
|  | 	id          string // can be used in node | ||||||
|  | 	speed_mbps  int    // Network speed in Mbps | ||||||
|  | 	description string // Description of the network device | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // NodeCapacity represents the hardware capacity details of a node. | ||||||
|  | pub struct NodeCapacity { | ||||||
|  | pub mut: | ||||||
|  | 	storage_gb f64 // Total storage in gigabytes | ||||||
|  | 	mem_gb     f64 // Total memory in gigabytes | ||||||
|  | 	mem_gb_gpu f64 // Total GPU memory in gigabytes | ||||||
|  | 	passmark   int // Passmark score for the node | ||||||
|  | 	vcores     int // Total virtual cores | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // typically 1GB of memory, but can be adjusted based based on size of machine | ||||||
|  | pub struct ComputeSlice { | ||||||
|  | pub mut: | ||||||
|  | 	u16                       int // the id of the slice in the node | ||||||
|  | 	mem_gb                   f64 | ||||||
|  | 	storage_gb               f64 | ||||||
|  | 	passmark                 int | ||||||
|  | 	vcores                   int | ||||||
|  | 	cpu_oversubscription     int | ||||||
|  | 	storage_oversubscription int | ||||||
|  | 	gpus                     u8  // nr of GPU's see node to know what GPU's are | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // 1GB of storage | ||||||
|  | pub struct StorageSlice { | ||||||
|  | pub mut: | ||||||
|  | 	u16             int // the id of the slice in the node, are tracked in the node itself | ||||||
|  | } | ||||||
|  |  | ||||||
|  | fn (mut n Node) check() ! { | ||||||
|  | 	// todo calculate NodeCapacity out of the devices on the Node | ||||||
|  | } | ||||||
							
								
								
									
										33
									
								
								heromodels/src/models/grid4/specs/model_nodegroup.v
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								heromodels/src/models/grid4/specs/model_nodegroup.v
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | |||||||
|  | module datamodel | ||||||
|  |  | ||||||
|  | // is a root object, is the only obj farmer needs to configure in the UI, this defines how slices will be created | ||||||
|  | @[heap] | ||||||
|  | pub struct NodeGroup { | ||||||
|  | pub mut: | ||||||
|  | 	id                                  u32 | ||||||
|  | 	farmerid                            u32    // link back to farmer who owns the nodegroup, is a user? | ||||||
|  | 	secret                              string // only visible by farmer, in future encrypted, used to boot a node | ||||||
|  | 	description                         string | ||||||
|  | 	slapolicy                           SLAPolicy | ||||||
|  | 	pricingpolicy                       PricingPolicy | ||||||
|  | 	compute_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 2GB node slice | ||||||
|  | 	storage_slice_normalized_pricing_cc f64 // pricing in CC - cloud credit, per 1GB storage slice | ||||||
|  | 	signature_farmer string // signature as done by farmers to validate that they created this group | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct SLAPolicy { | ||||||
|  | pub mut: | ||||||
|  | 	sla_uptime         int // should +90 | ||||||
|  | 	sla_bandwidth_mbit int // minimal mbits we can expect avg over 1h per node, 0 means we don't guarantee | ||||||
|  | 	sla_penalty        int // 0-100, percent of money given back in relation to month if sla breached, e.g. 200 means we return 2 months worth of rev if sla missed | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct PricingPolicy { | ||||||
|  | pub mut: | ||||||
|  | 	marketplace_year_discounts []int = [30, 40, 50] // e.g. 30,40,50 means if user has more CC in wallet than 1 year utilization on all his purchaes then this provider gives 30%, 2Y 40%, ... | ||||||
|  | 	// volume_discounts           []int = [10, 20, 30] // e.g. 10,20,30 | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										19
									
								
								heromodels/src/models/grid4/specs/model_reputation.v
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								heromodels/src/models/grid4/specs/model_reputation.v
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | |||||||
|  |  | ||||||
|  | @[heap] | ||||||
|  | pub struct NodeGroupReputation { | ||||||
|  | pub mut: | ||||||
|  | 	nodegroup_id 	u32 | ||||||
|  | 	reputation                          int = 50 // between 0 and 100, earned over time | ||||||
|  | 	uptime                              int // between 0 and 100, set by system, farmer has no ability to set this | ||||||
|  | 	nodes                               []NodeReputation | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pub struct NodeReputation { | ||||||
|  | pub mut: | ||||||
|  | 	node_id 							u32 | ||||||
|  | 	reputation                          int = 50 // between 0 and 100, earned over time | ||||||
|  | 	uptime                              int // between 0 and 100, set by system, farmer has no ability to set this | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| @@ -209,10 +209,13 @@ pub struct DNSZone { | |||||||
|     pub base_data: BaseModelData, |     pub base_data: BaseModelData, | ||||||
|     #[index] |     #[index] | ||||||
|     pub domain: String, |     pub domain: String, | ||||||
|  |     #[index(path = "subdomain")] | ||||||
|  |     #[index(path = "record_type")] | ||||||
|     pub dnsrecords: Vec<DNSRecord>, |     pub dnsrecords: Vec<DNSRecord>, | ||||||
|     pub administrators: Vec<u32>, |     pub administrators: Vec<u32>, | ||||||
|     pub status: DNSZoneStatus, |     pub status: DNSZoneStatus, | ||||||
|     pub metadata: HashMap<String, String>, |     pub metadata: HashMap<String, String>, | ||||||
|  |     #[index(path = "primary_ns")] | ||||||
|     pub soarecord: Vec<SOARecord>, |     pub soarecord: Vec<SOARecord>, | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -297,5 +300,3 @@ impl DNSZone { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
| @@ -184,8 +184,6 @@ impl Group { | |||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /// Represents the membership relationship between users and groups | /// Represents the membership relationship between users and groups | ||||||
| #[model] | #[model] | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
| @@ -232,5 +230,3 @@ impl UserGroupMembership { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
| @@ -111,5 +111,3 @@ impl Member { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,20 +1,10 @@ | |||||||
| // Export all heroledger model modules | // Export all heroledger model modules | ||||||
| pub mod user; |  | ||||||
| pub mod group; |  | ||||||
| pub mod money; |  | ||||||
| pub mod membership; |  | ||||||
| pub mod dnsrecord; | pub mod dnsrecord; | ||||||
|  | pub mod group; | ||||||
|  | pub mod membership; | ||||||
|  | pub mod money; | ||||||
|  | pub mod rhai; | ||||||
| pub mod secretbox; | pub mod secretbox; | ||||||
| pub mod signature; | pub mod signature; | ||||||
|  | pub mod user; | ||||||
| pub mod user_kvs; | pub mod user_kvs; | ||||||
| pub mod rhai; |  | ||||||
|  |  | ||||||
| // Re-export key types for convenience |  | ||||||
| pub use user::{User, UserStatus, UserProfile, KYCInfo, KYCStatus, SecretBox}; |  | ||||||
| pub use group::{Group, UserGroupMembership, GroupStatus, Visibility, GroupConfig}; |  | ||||||
| pub use money::{Account, Asset, AccountPolicy, AccountPolicyItem, Transaction, AccountStatus, TransactionType, Signature as TransactionSignature}; |  | ||||||
| pub use membership::{Member, MemberRole, MemberStatus}; |  | ||||||
| pub use dnsrecord::{DNSZone, DNSRecord, SOARecord, NameType, NameCat, DNSZoneStatus}; |  | ||||||
| pub use secretbox::{Notary, NotaryStatus, SecretBoxCategory}; |  | ||||||
| pub use signature::{Signature, SignatureStatus, ObjectType}; |  | ||||||
| pub use user_kvs::{UserKVS, UserKVSItem}; |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| @@ -223,8 +223,6 @@ impl Account { | |||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /// Represents an asset in the financial system | /// Represents an asset in the financial system | ||||||
| #[model] | #[model] | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
| @@ -342,8 +340,6 @@ impl Asset { | |||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /// Represents account policies for various operations | /// Represents account policies for various operations | ||||||
| #[model] | #[model] | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
| @@ -400,8 +396,6 @@ impl AccountPolicy { | |||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /// Represents a financial transaction | /// Represents a financial transaction | ||||||
| #[model] | #[model] | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
| @@ -511,5 +505,3 @@ impl Transaction { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,8 +1,13 @@ | |||||||
| use ::rhai::plugin::*; | use ::rhai::plugin::*; | ||||||
| use ::rhai::{Array, Dynamic, Engine, EvalAltResult, Map, Module}; | use ::rhai::{Dynamic, Engine, EvalAltResult, Module}; | ||||||
| use std::mem; | use std::mem; | ||||||
|  |  | ||||||
| use crate::models::heroledger::*; | use crate::models::heroledger::{ | ||||||
|  |     dnsrecord::DNSZone, | ||||||
|  |     group::{Group, Visibility}, | ||||||
|  |     money::Account, | ||||||
|  |     user::{User, UserStatus}, | ||||||
|  | }; | ||||||
|  |  | ||||||
| // ============================================================================ | // ============================================================================ | ||||||
| // User Module | // User Module | ||||||
| @@ -12,6 +17,8 @@ type RhaiUser = User; | |||||||
|  |  | ||||||
| #[export_module] | #[export_module] | ||||||
| mod rhai_user_module { | mod rhai_user_module { | ||||||
|  |     use crate::models::heroledger::user::User; | ||||||
|  |  | ||||||
|     use super::RhaiUser; |     use super::RhaiUser; | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "new_user", return_raw)] |     #[rhai_fn(name = "new_user", return_raw)] | ||||||
| @@ -30,30 +37,21 @@ mod rhai_user_module { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "add_email", return_raw)] |     #[rhai_fn(name = "add_email", return_raw)] | ||||||
|     pub fn add_email( |     pub fn add_email(user: &mut RhaiUser, email: String) -> Result<RhaiUser, Box<EvalAltResult>> { | ||||||
|         user: &mut RhaiUser, |  | ||||||
|         email: String, |  | ||||||
|     ) -> Result<RhaiUser, Box<EvalAltResult>> { |  | ||||||
|         let owned = std::mem::take(user); |         let owned = std::mem::take(user); | ||||||
|         *user = owned.add_email(email); |         *user = owned.add_email(email); | ||||||
|         Ok(user.clone()) |         Ok(user.clone()) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "pubkey", return_raw)] |     #[rhai_fn(name = "pubkey", return_raw)] | ||||||
|     pub fn set_pubkey( |     pub fn set_pubkey(user: &mut RhaiUser, pubkey: String) -> Result<RhaiUser, Box<EvalAltResult>> { | ||||||
|         user: &mut RhaiUser, |  | ||||||
|         pubkey: String, |  | ||||||
|     ) -> Result<RhaiUser, Box<EvalAltResult>> { |  | ||||||
|         let owned = std::mem::take(user); |         let owned = std::mem::take(user); | ||||||
|         *user = owned.pubkey(pubkey); |         *user = owned.pubkey(pubkey); | ||||||
|         Ok(user.clone()) |         Ok(user.clone()) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "status", return_raw)] |     #[rhai_fn(name = "status", return_raw)] | ||||||
|     pub fn set_status( |     pub fn set_status(user: &mut RhaiUser, status: String) -> Result<RhaiUser, Box<EvalAltResult>> { | ||||||
|         user: &mut RhaiUser, |  | ||||||
|         status: String, |  | ||||||
|     ) -> Result<RhaiUser, Box<EvalAltResult>> { |  | ||||||
|         let status_enum = match status.as_str() { |         let status_enum = match status.as_str() { | ||||||
|             "Active" => UserStatus::Active, |             "Active" => UserStatus::Active, | ||||||
|             "Inactive" => UserStatus::Inactive, |             "Inactive" => UserStatus::Inactive, | ||||||
| @@ -115,10 +113,7 @@ mod rhai_group_module { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "name", return_raw)] |     #[rhai_fn(name = "name", return_raw)] | ||||||
|     pub fn set_name( |     pub fn set_name(group: &mut RhaiGroup, name: String) -> Result<RhaiGroup, Box<EvalAltResult>> { | ||||||
|         group: &mut RhaiGroup, |  | ||||||
|         name: String, |  | ||||||
|     ) -> Result<RhaiGroup, Box<EvalAltResult>> { |  | ||||||
|         let owned = std::mem::take(group); |         let owned = std::mem::take(group); | ||||||
|         *group = owned.name(name); |         *group = owned.name(name); | ||||||
|         Ok(group.clone()) |         Ok(group.clone()) | ||||||
| @@ -263,15 +258,11 @@ mod rhai_dns_zone_module { | |||||||
|         Ok(zone.clone()) |         Ok(zone.clone()) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     #[rhai_fn(name = "save_dns_zone", return_raw)] |     #[rhai_fn(name = "save_dns_zone", return_raw)] | ||||||
|     pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> { |     pub fn save_dns_zone(zone: &mut RhaiDNSZone) -> Result<RhaiDNSZone, Box<EvalAltResult>> { | ||||||
|         Ok(zone.clone()) |         Ok(zone.clone()) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     // Getters |     // Getters | ||||||
|     #[rhai_fn(name = "get_id")] |     #[rhai_fn(name = "get_id")] | ||||||
|     pub fn get_id(zone: &mut RhaiDNSZone) -> i64 { |     pub fn get_id(zone: &mut RhaiDNSZone) -> i64 { | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
| @@ -138,5 +138,3 @@ impl Notary { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
|  |  | ||||||
| @@ -116,5 +116,3 @@ impl Signature { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use std::collections::HashMap; | use std::collections::HashMap; | ||||||
| @@ -366,5 +366,3 @@ impl User { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,7 +1,7 @@ | |||||||
| use heromodels_core::{Model, BaseModelData, IndexKey}; | use super::secretbox::SecretBox; | ||||||
|  | use heromodels_core::{BaseModelData, IndexKey, Model}; | ||||||
| use heromodels_derive::model; | use heromodels_derive::model; | ||||||
| use serde::{Deserialize, Serialize}; | use serde::{Deserialize, Serialize}; | ||||||
| use super::secretbox::SecretBox; |  | ||||||
|  |  | ||||||
| /// Represents a per-user key-value store | /// Represents a per-user key-value store | ||||||
| #[model] | #[model] | ||||||
| @@ -44,8 +44,6 @@ impl UserKVS { | |||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /// Represents an item in a user's key-value store | /// Represents an item in a user's key-value store | ||||||
| #[model] | #[model] | ||||||
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] | ||||||
| @@ -116,5 +114,3 @@ impl UserKVSItem { | |||||||
|         self |         self | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -46,4 +46,4 @@ pub struct IdenfyVerificationData { | |||||||
|     pub doc_issuing_country: Option<String>, |     pub doc_issuing_country: Option<String>, | ||||||
|     #[serde(rename = "manuallyDataChanged")] |     #[serde(rename = "manuallyDataChanged")] | ||||||
|     pub manually_data_changed: Option<bool>, |     pub manually_data_changed: Option<bool>, | ||||||
| } | } | ||||||
|   | |||||||
| @@ -2,4 +2,4 @@ | |||||||
|  |  | ||||||
| pub mod kyc; | pub mod kyc; | ||||||
|  |  | ||||||
| pub use kyc::*; | pub use kyc::*; | ||||||
|   | |||||||
| @@ -8,4 +8,4 @@ pub struct Address { | |||||||
|     pub postal_code: String, |     pub postal_code: String, | ||||||
|     pub country: String, |     pub country: String, | ||||||
|     pub company: Option<String>, |     pub company: Option<String>, | ||||||
| } | } | ||||||
|   | |||||||
| @@ -10,16 +10,16 @@ pub mod contact; | |||||||
| pub mod finance; | pub mod finance; | ||||||
| pub mod flow; | pub mod flow; | ||||||
| pub mod governance; | pub mod governance; | ||||||
|  | pub mod grid4; | ||||||
| pub mod heroledger; | pub mod heroledger; | ||||||
|  | pub mod identity; | ||||||
| pub mod legal; | pub mod legal; | ||||||
| pub mod library; | pub mod library; | ||||||
| pub mod location; | pub mod location; | ||||||
| pub mod object; | pub mod object; | ||||||
| pub mod projects; |  | ||||||
| pub mod payment; | pub mod payment; | ||||||
| pub mod identity; | pub mod projects; | ||||||
| pub mod tfmarketplace; | // pub mod tfmarketplace; | ||||||
| pub mod grid4; |  | ||||||
|  |  | ||||||
| // Re-export key types for convenience | // Re-export key types for convenience | ||||||
| pub use core::Comment; | pub use core::Comment; | ||||||
| @@ -39,3 +39,4 @@ pub use legal::{Contract, ContractRevision, ContractSigner, ContractStatus, Sign | |||||||
| pub use library::collection::Collection; | pub use library::collection::Collection; | ||||||
| pub use library::items::{Image, Markdown, Pdf}; | pub use library::items::{Image, Markdown, Pdf}; | ||||||
| pub use projects::{Project, Status}; | pub use projects::{Project, Status}; | ||||||
|  | pub use heroledger::*; | ||||||
| @@ -1,6 +1,6 @@ | |||||||
|  | use super::Object; | ||||||
| use rhai::plugin::*; | use rhai::plugin::*; | ||||||
| use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module}; | use rhai::{CustomType, Dynamic, Engine, EvalAltResult, Module}; | ||||||
| use super::Object; |  | ||||||
|  |  | ||||||
| type RhaiObject = Object; | type RhaiObject = Object; | ||||||
|  |  | ||||||
| @@ -16,10 +16,7 @@ pub mod generated_rhai_module { | |||||||
|  |  | ||||||
|     /// Set the title of an Object |     /// Set the title of an Object | ||||||
|     #[rhai_fn(name = "object_title")] |     #[rhai_fn(name = "object_title")] | ||||||
|     pub fn object_title( |     pub fn object_title(object: &mut RhaiObject, title: String) -> RhaiObject { | ||||||
|         object: &mut RhaiObject, |  | ||||||
|         title: String, |  | ||||||
|     ) -> RhaiObject { |  | ||||||
|         let mut result = object.clone(); |         let mut result = object.clone(); | ||||||
|         result.title = title; |         result.title = title; | ||||||
|         result |         result | ||||||
| @@ -27,10 +24,7 @@ pub mod generated_rhai_module { | |||||||
|  |  | ||||||
|     /// Set the description of an Object |     /// Set the description of an Object | ||||||
|     #[rhai_fn(name = "object_description")] |     #[rhai_fn(name = "object_description")] | ||||||
|     pub fn object_description( |     pub fn object_description(object: &mut RhaiObject, description: String) -> RhaiObject { | ||||||
|         object: &mut RhaiObject, |  | ||||||
|         description: String, |  | ||||||
|     ) -> RhaiObject { |  | ||||||
|         let mut result = object.clone(); |         let mut result = object.clone(); | ||||||
|         result.description = description; |         result.description = description; | ||||||
|         result |         result | ||||||
|   | |||||||
| @@ -2,4 +2,4 @@ | |||||||
|  |  | ||||||
| pub mod stripe; | pub mod stripe; | ||||||
|  |  | ||||||
| pub use stripe::*; | pub use stripe::*; | ||||||
|   | |||||||
| @@ -27,4 +27,4 @@ pub struct StripeEventData { | |||||||
| pub struct StripeEventRequest { | pub struct StripeEventRequest { | ||||||
|     pub id: Option<String>, |     pub id: Option<String>, | ||||||
|     pub idempotency_key: Option<String>, |     pub idempotency_key: Option<String>, | ||||||
| } | } | ||||||
|   | |||||||
							
								
								
									
										43
									
								
								heromodels/test.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										43
									
								
								heromodels/test.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,43 @@ | |||||||
|  | #!/usr/bin/env bash | ||||||
|  | set -euo pipefail | ||||||
|  |  | ||||||
|  | # Config matches examples/tests | ||||||
|  | PGHOST=${PGHOST:-localhost} | ||||||
|  | PGPORT=${PGPORT:-5432} | ||||||
|  | PGUSER=${PGUSER:-postgres} | ||||||
|  | PGPASSWORD=${PGPASSWORD:-test123} | ||||||
|  | export PGPASSWORD | ||||||
|  |  | ||||||
|  | echo "[test.sh] Checking Postgres at ${PGHOST}:${PGPORT} (user=${PGUSER})..." | ||||||
|  |  | ||||||
|  | # Require pg_isready | ||||||
|  | if ! command -v pg_isready >/dev/null 2>&1; then | ||||||
|  |   echo "[test.sh] ERROR: pg_isready not found. Install PostgreSQL client tools (e.g., brew install libpq && brew link --force libpq)." >&2 | ||||||
|  |   exit 1 | ||||||
|  | fi | ||||||
|  |  | ||||||
|  | # Wait for Postgres to be ready (30s timeout) | ||||||
|  | ATTEMPTS=30 | ||||||
|  | until pg_isready -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" >/dev/null 2>&1; do | ||||||
|  |   ((ATTEMPTS--)) || { | ||||||
|  |     echo "[test.sh] ERROR: Postgres not ready after 30s. Ensure it's running with user=$PGUSER password=$PGPASSWORD host=$PGHOST port=$PGPORT." >&2 | ||||||
|  |     exit 1 | ||||||
|  |   } | ||||||
|  |   sleep 1 | ||||||
|  |   echo "[test.sh] Waiting for Postgres..." | ||||||
|  | done | ||||||
|  |  | ||||||
|  | echo "[test.sh] Postgres is ready. Running tests..." | ||||||
|  |  | ||||||
|  | # Run fast OurDB test first (no Postgres dependency) | ||||||
|  | echo "[test.sh] Running OurDB test: grid4_ourdb" | ||||||
|  | cargo test -p heromodels --test grid4_ourdb | ||||||
|  |  | ||||||
|  | # Run Postgres-backed tests (marked ignored) | ||||||
|  | echo "[test.sh] Running Postgres test: heroledger_postgres (ignored)" | ||||||
|  | cargo test -p heromodels --test heroledger_postgres -- --ignored | ||||||
|  |  | ||||||
|  | echo "[test.sh] Running Postgres test: grid4_postgres (ignored)" | ||||||
|  | cargo test -p heromodels --test grid4_postgres -- --ignored | ||||||
|  |  | ||||||
|  | echo "[test.sh] Done." | ||||||
							
								
								
									
										117
									
								
								heromodels/tests/grid4_models.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								heromodels/tests/grid4_models.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | |||||||
|  | use serde_json; | ||||||
|  |  | ||||||
|  | use heromodels::models::grid4::{ | ||||||
|  |     ComputeSlice, DeviceInfo, Node, NodeCapacity, PricingPolicy, Reservation, ReservationStatus, | ||||||
|  |     SLAPolicy, StorageDevice, StorageSlice, | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | #[test] | ||||||
|  | fn build_and_serde_roundtrip_compute_storage_slices() { | ||||||
|  |     let pricing = PricingPolicy::new() | ||||||
|  |         .marketplace_year_discounts(vec![20, 30, 40]) | ||||||
|  |         .volume_discounts(vec![5, 10, 15]) | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let sla = SLAPolicy::new() | ||||||
|  |         .sla_uptime(99) | ||||||
|  |         .sla_bandwidth_mbit(1000) | ||||||
|  |         .sla_penalty(150) | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let cs = ComputeSlice::new() | ||||||
|  |         .nodeid(42) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(16.0) | ||||||
|  |         .storage_gb(200.0) | ||||||
|  |         .passmark(5000) | ||||||
|  |         .vcores(8) | ||||||
|  |         .cpu_oversubscription(2) | ||||||
|  |         .storage_oversubscription(1) | ||||||
|  |         .price_range(vec![0.5, 2.0]) | ||||||
|  |         .gpus(1) | ||||||
|  |         .price_cc(1.25) | ||||||
|  |         .pricing_policy(pricing.clone()) | ||||||
|  |         .sla_policy(sla.clone()); | ||||||
|  |  | ||||||
|  |     let ss = StorageSlice::new() | ||||||
|  |         .nodeid(42) | ||||||
|  |         .slice_id(2) | ||||||
|  |         .price_cc(0.15) | ||||||
|  |         .pricing_policy(pricing) | ||||||
|  |         .sla_policy(sla); | ||||||
|  |  | ||||||
|  |     // serde roundtrip compute slice | ||||||
|  |     let s = serde_json::to_string(&cs).expect("serialize compute slice"); | ||||||
|  |     let cs2: ComputeSlice = serde_json::from_str(&s).expect("deserialize compute slice"); | ||||||
|  |     assert_eq!(cs, cs2); | ||||||
|  |  | ||||||
|  |     // serde roundtrip storage slice | ||||||
|  |     let s2 = serde_json::to_string(&ss).expect("serialize storage slice"); | ||||||
|  |     let ss2: StorageSlice = serde_json::from_str(&s2).expect("deserialize storage slice"); | ||||||
|  |     assert_eq!(ss, ss2); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[test] | ||||||
|  | fn build_and_serde_roundtrip_node() { | ||||||
|  |     let dev = DeviceInfo { | ||||||
|  |         vendor: "AcmeVendor".into(), | ||||||
|  |         storage: vec![StorageDevice { id: "sda".into(), size_gb: 512.0, description: "NVMe".into() }], | ||||||
|  |         memory: vec![], | ||||||
|  |         cpu: vec![], | ||||||
|  |         gpu: vec![], | ||||||
|  |         network: vec![], | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     let cap = NodeCapacity { storage_gb: 2048.0, mem_gb: 128.0, mem_gb_gpu: 24.0, passmark: 12000, vcores: 32 }; | ||||||
|  |  | ||||||
|  |     let cs = ComputeSlice::new().nodeid(1).slice_id(1).mem_gb(8.0).storage_gb(100.0).passmark(2500).vcores(4); | ||||||
|  |     let ss = StorageSlice::new().nodeid(1).slice_id(2).price_cc(0.2); | ||||||
|  |  | ||||||
|  |     let node = Node::new() | ||||||
|  |         .nodegroupid(7) | ||||||
|  |         .uptime(99) | ||||||
|  |         .add_compute_slice(cs) | ||||||
|  |         .add_storage_slice(ss) | ||||||
|  |         .devices(dev) | ||||||
|  |         .country("NL") | ||||||
|  |         .capacity(cap) | ||||||
|  |         .provisiontime(1710000000) | ||||||
|  |         .pubkey("node_pubkey") | ||||||
|  |         .signature_node("sig_node") | ||||||
|  |         .signature_farmer("sig_farmer"); | ||||||
|  |  | ||||||
|  |     let s = serde_json::to_string(&node).expect("serialize node"); | ||||||
|  |     let node2: Node = serde_json::from_str(&s).expect("deserialize node"); | ||||||
|  |  | ||||||
|  |     assert_eq!(node.nodegroupid, node2.nodegroupid); | ||||||
|  |     assert_eq!(node.uptime, node2.uptime); | ||||||
|  |     assert_eq!(node.country, node2.country); | ||||||
|  |     assert_eq!(node.pubkey, node2.pubkey); | ||||||
|  |     assert_eq!(node.signature_node, node2.signature_node); | ||||||
|  |     assert_eq!(node.signature_farmer, node2.signature_farmer); | ||||||
|  |     assert_eq!(node.computeslices.len(), node2.computeslices.len()); | ||||||
|  |     assert_eq!(node.storageslices.len(), node2.storageslices.len()); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[test] | ||||||
|  | fn build_and_serde_roundtrip_reservation() { | ||||||
|  |     let reservation = Reservation::new() | ||||||
|  |         .customer_id(1234) | ||||||
|  |         .add_compute_slice(11) | ||||||
|  |         .add_storage_slice(22) | ||||||
|  |         .status(ReservationStatus::Confirmed) | ||||||
|  |         .obligation(true) | ||||||
|  |         .start_date(1_710_000_000) | ||||||
|  |         .end_date(1_720_000_000); | ||||||
|  |  | ||||||
|  |     let s = serde_json::to_string(&reservation).expect("serialize reservation"); | ||||||
|  |     let reservation2: Reservation = serde_json::from_str(&s).expect("deserialize reservation"); | ||||||
|  |  | ||||||
|  |     assert_eq!(reservation.customer_id, reservation2.customer_id); | ||||||
|  |     assert_eq!(reservation.status, reservation2.status); | ||||||
|  |     assert_eq!(reservation.obligation, reservation2.obligation); | ||||||
|  |     assert_eq!(reservation.start_date, reservation2.start_date); | ||||||
|  |     assert_eq!(reservation.end_date, reservation2.end_date); | ||||||
|  |     assert_eq!(reservation.compute_slices, reservation2.compute_slices); | ||||||
|  |     assert_eq!(reservation.storage_slices, reservation2.storage_slices); | ||||||
|  | } | ||||||
							
								
								
									
										82
									
								
								heromodels/tests/grid4_ourdb.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								heromodels/tests/grid4_ourdb.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,82 @@ | |||||||
|  | use heromodels::db::hero::OurDB; | ||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey}; | ||||||
|  | use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node}; | ||||||
|  | use heromodels_core::Model; | ||||||
|  | use std::sync::Arc; | ||||||
|  |  | ||||||
|  | fn create_test_db() -> Arc<OurDB> { | ||||||
|  |     let ts = std::time::SystemTime::now() | ||||||
|  |         .duration_since(std::time::UNIX_EPOCH) | ||||||
|  |         .unwrap() | ||||||
|  |         .as_nanos(); | ||||||
|  |     let path = format!("/tmp/grid4_node_test_{}", ts); | ||||||
|  |     let _ = std::fs::remove_dir_all(&path); | ||||||
|  |     Arc::new(OurDB::new(path, true).expect("create OurDB")) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | #[test] | ||||||
|  | fn grid4_node_basic_roundtrip_and_indexes() { | ||||||
|  |     let db = create_test_db(); | ||||||
|  |     let nodes = db.collection::<Node>().expect("open node collection"); | ||||||
|  |  | ||||||
|  |     // Clean any leftover | ||||||
|  |     if let Ok(existing) = nodes.get_all() { | ||||||
|  |         for n in existing { | ||||||
|  |             let _ = nodes.delete_by_id(n.get_id()); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Build a node with some compute slices and device info | ||||||
|  |     let cs = ComputeSlice::new() | ||||||
|  |         .nodeid(1) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(32.0) | ||||||
|  |         .storage_gb(512.0) | ||||||
|  |         .passmark(5000) | ||||||
|  |         .vcores(16) | ||||||
|  |         .gpus(1) | ||||||
|  |         .price_cc(0.25); | ||||||
|  |  | ||||||
|  |     let dev = DeviceInfo { | ||||||
|  |         vendor: "ACME".into(), | ||||||
|  |         ..Default::default() | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     let n = Node::new() | ||||||
|  |         .nodegroupid(42) | ||||||
|  |         .uptime(99) | ||||||
|  |         .add_compute_slice(cs) | ||||||
|  |         .devices(dev) | ||||||
|  |         .country("BE") | ||||||
|  |         .pubkey("PUB_NODE_1") | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let (id, stored) = nodes.set(&n).expect("store node"); | ||||||
|  |     assert!(id > 0); | ||||||
|  |     assert_eq!(stored.country, "BE"); | ||||||
|  |  | ||||||
|  |     // get by id | ||||||
|  |     let fetched = nodes.get_by_id(id).expect("get by id").expect("exists"); | ||||||
|  |     assert_eq!(fetched.pubkey, "PUB_NODE_1"); | ||||||
|  |  | ||||||
|  |     // query by top-level indexes | ||||||
|  |     let by_country = nodes.get::<country, _>("BE").expect("query country"); | ||||||
|  |     assert_eq!(by_country.len(), 1); | ||||||
|  |     assert_eq!(by_country[0].get_id(), id); | ||||||
|  |  | ||||||
|  |     let by_group = nodes.get::<nodegroupid, _>(&42).expect("query group"); | ||||||
|  |     assert_eq!(by_group.len(), 1); | ||||||
|  |  | ||||||
|  |     let by_pubkey = nodes.get::<pubkey, _>("PUB_NODE_1").expect("query pubkey"); | ||||||
|  |     assert_eq!(by_pubkey.len(), 1); | ||||||
|  |  | ||||||
|  |     // update | ||||||
|  |     let updated = fetched.clone().country("NL"); | ||||||
|  |     let (_, back) = nodes.set(&updated).expect("update node"); | ||||||
|  |     assert_eq!(back.country, "NL"); | ||||||
|  |  | ||||||
|  |     // delete | ||||||
|  |     nodes.delete_by_id(id).expect("delete"); | ||||||
|  |     assert!(nodes.get_by_id(id).expect("get after delete").is_none()); | ||||||
|  | } | ||||||
							
								
								
									
										125
									
								
								heromodels/tests/grid4_postgres.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								heromodels/tests/grid4_postgres.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | |||||||
|  | use heromodels::db::postgres::{Config, Postgres}; | ||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::grid4::node::node_index::{country, nodegroupid, pubkey}; | ||||||
|  | use heromodels::models::grid4::node::{ComputeSlice, DeviceInfo, Node}; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // Requires local Postgres (user=postgres password=test123 host=localhost port=5432) | ||||||
|  | // Run with: cargo test -p heromodels --test grid4_postgres -- --ignored | ||||||
|  | #[test] | ||||||
|  | #[ignore] | ||||||
|  | fn grid4_node_postgres_roundtrip_like_example() { | ||||||
|  |     let db = Postgres::new( | ||||||
|  |         Config::new() | ||||||
|  |             .user(Some("postgres".into())) | ||||||
|  |             .password(Some("test123".into())) | ||||||
|  |             .host(Some("localhost".into())) | ||||||
|  |             .port(Some(5432)), | ||||||
|  |     ) | ||||||
|  |     .expect("can connect to Postgres"); | ||||||
|  |  | ||||||
|  |     let nodes = db.collection::<Node>().expect("open node collection"); | ||||||
|  |  | ||||||
|  |     // Clean existing | ||||||
|  |     if let Ok(existing) = nodes.get_all() { | ||||||
|  |         for n in existing { | ||||||
|  |             let _ = nodes.delete_by_id(n.get_id()); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Build and store multiple nodes via builder and then persist via collection.set(), like examples | ||||||
|  |     let cs1 = ComputeSlice::new() | ||||||
|  |         .nodeid(10) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(32.0) | ||||||
|  |         .storage_gb(512.0) | ||||||
|  |         .passmark(5000) | ||||||
|  |         .vcores(16) | ||||||
|  |         .gpus(1) | ||||||
|  |         .price_cc(0.25); | ||||||
|  |     let cs2 = ComputeSlice::new() | ||||||
|  |         .nodeid(10) | ||||||
|  |         .slice_id(2) | ||||||
|  |         .mem_gb(64.0) | ||||||
|  |         .storage_gb(2048.0) | ||||||
|  |         .passmark(7000) | ||||||
|  |         .vcores(24) | ||||||
|  |         .gpus(2) | ||||||
|  |         .price_cc(0.50); | ||||||
|  |     let cs3 = ComputeSlice::new() | ||||||
|  |         .nodeid(11) | ||||||
|  |         .slice_id(1) | ||||||
|  |         .mem_gb(16.0) | ||||||
|  |         .storage_gb(256.0) | ||||||
|  |         .passmark(3000) | ||||||
|  |         .vcores(8) | ||||||
|  |         .gpus(0) | ||||||
|  |         .price_cc(0.10); | ||||||
|  |  | ||||||
|  |     let dev = DeviceInfo { vendor: "ACME".into(), ..Default::default() }; | ||||||
|  |  | ||||||
|  |     let n1 = Node::new() | ||||||
|  |         .nodegroupid(99) | ||||||
|  |         .uptime(97) | ||||||
|  |         .add_compute_slice(cs1) | ||||||
|  |         .devices(dev.clone()) | ||||||
|  |         .country("BE") | ||||||
|  |         .pubkey("PG_NODE_1") | ||||||
|  |         .build(); | ||||||
|  |     let n2 = Node::new() | ||||||
|  |         .nodegroupid(99) | ||||||
|  |         .uptime(96) | ||||||
|  |         .add_compute_slice(cs2) | ||||||
|  |         .devices(dev.clone()) | ||||||
|  |         .country("NL") | ||||||
|  |         .pubkey("PG_NODE_2") | ||||||
|  |         .build(); | ||||||
|  |     let n3 = Node::new() | ||||||
|  |         .nodegroupid(7) | ||||||
|  |         .uptime(95) | ||||||
|  |         .add_compute_slice(cs3) | ||||||
|  |         .devices(dev) | ||||||
|  |         .country("BE") | ||||||
|  |         .pubkey("PG_NODE_3") | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let (id1, s1) = nodes.set(&n1).expect("store n1"); | ||||||
|  |     let (id2, s2) = nodes.set(&n2).expect("store n2"); | ||||||
|  |     let (id3, s3) = nodes.set(&n3).expect("store n3"); | ||||||
|  |     assert!(id1 > 0 && id2 > 0 && id3 > 0); | ||||||
|  |  | ||||||
|  |     // Query by top-level indexes similar to the example style | ||||||
|  |     let be_nodes = nodes.get::<country, _>("BE").expect("by country"); | ||||||
|  |     assert_eq!(be_nodes.len(), 2); | ||||||
|  |  | ||||||
|  |     let grp_99 = nodes.get::<nodegroupid, _>(&99).expect("by group"); | ||||||
|  |     assert_eq!(grp_99.len(), 2); | ||||||
|  |  | ||||||
|  |     let by_key = nodes.get::<pubkey, _>("PG_NODE_2").expect("by pubkey"); | ||||||
|  |     assert_eq!(by_key.len(), 1); | ||||||
|  |     assert_eq!(by_key[0].get_id(), id2); | ||||||
|  |  | ||||||
|  |     // Update: change country of n1 | ||||||
|  |     let updated = s1.clone().country("DE"); | ||||||
|  |     let (_, back) = nodes.set(&updated).expect("update n1"); | ||||||
|  |     assert_eq!(back.country, "DE"); | ||||||
|  |  | ||||||
|  |     // Cardinality after update | ||||||
|  |     let de_nodes = nodes.get::<country, _>("DE").expect("by country DE"); | ||||||
|  |     assert_eq!(de_nodes.len(), 1); | ||||||
|  |  | ||||||
|  |     // Delete by id and by index | ||||||
|  |     nodes.delete_by_id(id2).expect("delete n2 by id"); | ||||||
|  |     assert!(nodes.get_by_id(id2).unwrap().is_none()); | ||||||
|  |  | ||||||
|  |     nodes.delete::<pubkey, _>("PG_NODE_3").expect("delete n3 by pubkey"); | ||||||
|  |     assert!(nodes.get_by_id(id3).unwrap().is_none()); | ||||||
|  |  | ||||||
|  |     // Remaining should be updated n1 only; verify via targeted queries | ||||||
|  |     let de_nodes = nodes.get::<country, _>("DE").expect("country DE after deletes"); | ||||||
|  |     assert_eq!(de_nodes.len(), 1); | ||||||
|  |     assert_eq!(de_nodes[0].get_id(), id1); | ||||||
|  |     let by_key = nodes.get::<pubkey, _>("PG_NODE_1").expect("by pubkey PG_NODE_1"); | ||||||
|  |     assert_eq!(by_key.len(), 1); | ||||||
|  |     assert_eq!(by_key[0].get_id(), id1); | ||||||
|  | } | ||||||
							
								
								
									
										97
									
								
								heromodels/tests/heroledger_postgres.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								heromodels/tests/heroledger_postgres.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | |||||||
|  | use heromodels::db::postgres::{Config, Postgres}; | ||||||
|  | use heromodels::db::{Collection, Db}; | ||||||
|  | use heromodels::models::heroledger::user::user_index::username; | ||||||
|  | use heromodels::models::heroledger::user::User; | ||||||
|  | use heromodels_core::Model; | ||||||
|  |  | ||||||
|  | // NOTE: Requires a local Postgres running with user=postgres password=test123 host=localhost port=5432 | ||||||
|  | // Marked ignored by default. Run with: cargo test -p heromodels --test heroledger_postgres -- --ignored | ||||||
|  | #[test] | ||||||
|  | #[ignore] | ||||||
|  | fn heroledger_user_postgres_roundtrip() { | ||||||
|  |     // Connect | ||||||
|  |     let db = Postgres::new( | ||||||
|  |         Config::new() | ||||||
|  |             .user(Some("postgres".into())) | ||||||
|  |             .password(Some("test123".into())) | ||||||
|  |             .host(Some("localhost".into())) | ||||||
|  |             .port(Some(5432)), | ||||||
|  |     ) | ||||||
|  |     .expect("can connect to Postgres"); | ||||||
|  |  | ||||||
|  |     // Open collection (will create table and indexes for top-level fields) | ||||||
|  |     let users = db.collection::<User>().expect("can open user collection"); | ||||||
|  |  | ||||||
|  |     // Clean slate | ||||||
|  |     if let Ok(existing) = users.get_all() { | ||||||
|  |         for u in existing { | ||||||
|  |             let _ = users.delete_by_id(u.get_id()); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Unique suffix to avoid collisions with any pre-existing rows | ||||||
|  |     let uniq = format!("{}", std::time::SystemTime::now() | ||||||
|  |         .duration_since(std::time::UNIX_EPOCH) | ||||||
|  |         .unwrap() | ||||||
|  |         .as_nanos()); | ||||||
|  |     let alice = format!("alice_{}", uniq); | ||||||
|  |     let bob = format!("bob_{}", uniq); | ||||||
|  |     let carol = format!("carol_{}", uniq); | ||||||
|  |  | ||||||
|  |     // Build and store multiple users | ||||||
|  |     let u1 = User::new(0) | ||||||
|  |         .username(&alice) | ||||||
|  |         .pubkey("PUBKEY_A") | ||||||
|  |         .add_email("alice@example.com") | ||||||
|  |         .build(); | ||||||
|  |     let u2 = User::new(0) | ||||||
|  |         .username(&bob) | ||||||
|  |         .pubkey("PUBKEY_B") | ||||||
|  |         .add_email("bob@example.com") | ||||||
|  |         .build(); | ||||||
|  |     let u3 = User::new(0) | ||||||
|  |         .username(&carol) | ||||||
|  |         .pubkey("PUBKEY_C") | ||||||
|  |         .add_email("carol@example.com") | ||||||
|  |         .build(); | ||||||
|  |  | ||||||
|  |     let (id1, db_u1) = users.set(&u1).expect("store u1"); | ||||||
|  |     let (id2, db_u2) = users.set(&u2).expect("store u2"); | ||||||
|  |     let (id3, db_u3) = users.set(&u3).expect("store u3"); | ||||||
|  |     assert!(id1 > 0 && id2 > 0 && id3 > 0); | ||||||
|  |  | ||||||
|  |     // Fetch by id | ||||||
|  |     assert_eq!(users.get_by_id(id1).unwrap().unwrap().username, alice); | ||||||
|  |     assert_eq!(users.get_by_id(id2).unwrap().unwrap().username, bob); | ||||||
|  |     assert_eq!(users.get_by_id(id3).unwrap().unwrap().username, carol); | ||||||
|  |  | ||||||
|  |     // Fetch by index (top-level username) | ||||||
|  |     let by_username = users.get::<username, _>(&alice).expect("by username"); | ||||||
|  |     assert_eq!(by_username.len(), 1); | ||||||
|  |     assert_eq!(by_username[0].get_id(), id1); | ||||||
|  |  | ||||||
|  |     // Update one | ||||||
|  |     let updated = db_u1.clone().add_email("work@alice.example"); | ||||||
|  |     let (id1b, updated_back) = users.set(&updated).expect("update alice"); | ||||||
|  |     assert_eq!(id1b, id1); | ||||||
|  |     assert!(updated_back.email.len() >= 2); | ||||||
|  |  | ||||||
|  |     // Targeted queries to avoid legacy rows in the same table | ||||||
|  |     // Verify three users exist via index queries | ||||||
|  |     assert_eq!(users.get::<username, _>(&alice).unwrap().len(), 1); | ||||||
|  |     assert_eq!(users.get::<username, _>(&bob).unwrap().len(), 1); | ||||||
|  |     assert_eq!(users.get::<username, _>(&carol).unwrap().len(), 1); | ||||||
|  |  | ||||||
|  |     // Delete by id | ||||||
|  |     users.delete_by_id(id2).expect("delete bob by id"); | ||||||
|  |     assert!(users.get_by_id(id2).unwrap().is_none()); | ||||||
|  |  | ||||||
|  |     // Delete by index (username) | ||||||
|  |     users.delete::<username, _>(&carol).expect("delete carol by username"); | ||||||
|  |     assert!(users.get_by_id(id3).unwrap().is_none()); | ||||||
|  |  | ||||||
|  |     // Remaining should be just alice; verify via index | ||||||
|  |     let remain = users.get::<username, _>(&alice).expect("get alice after delete"); | ||||||
|  |     assert_eq!(remain.len(), 1); | ||||||
|  |     assert_eq!(remain[0].get_id(), id1); | ||||||
|  | } | ||||||
| @@ -1,4 +1,5 @@ | |||||||
| use heromodels::db::Collection; | use heromodels::db::Collection; | ||||||
|  | use heromodels::db::Db; | ||||||
| use heromodels::db::hero::OurDB; | use heromodels::db::hero::OurDB; | ||||||
| use heromodels::models::biz::{BusinessType, Company, CompanyStatus, Payment, PaymentStatus}; | use heromodels::models::biz::{BusinessType, Company, CompanyStatus, Payment, PaymentStatus}; | ||||||
| use heromodels_core::Model; | use heromodels_core::Model; | ||||||
| @@ -197,12 +198,18 @@ fn test_payment_database_persistence() { | |||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     // Save payment |     // Save payment | ||||||
|     let (payment_id, saved_payment) = db.set(&payment).expect("Failed to save payment"); |     let (payment_id, saved_payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|  |         .set(&payment) | ||||||
|  |         .expect("Failed to save payment"); | ||||||
|     assert!(payment_id > 0); |     assert!(payment_id > 0); | ||||||
|     assert_eq!(saved_payment.payment_intent_id, "pi_db_test"); |     assert_eq!(saved_payment.payment_intent_id, "pi_db_test"); | ||||||
|  |  | ||||||
|     // Retrieve payment |     // Retrieve payment | ||||||
|     let retrieved_payment: Payment = db |     let retrieved_payment: Payment = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|         .get_by_id(payment_id) |         .get_by_id(payment_id) | ||||||
|         .expect("Failed to get payment") |         .expect("Failed to get payment") | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @@ -224,20 +231,34 @@ fn test_payment_status_transitions() { | |||||||
|         1360.0, |         1360.0, | ||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     let (payment_id, mut payment) = db.set(&payment).expect("Failed to save payment"); |     let (payment_id, mut payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|  |         .set(&payment) | ||||||
|  |         .expect("Failed to save payment"); | ||||||
|  |  | ||||||
|     // Test pending -> completed |     // Test pending -> completed | ||||||
|     payment = payment.complete_payment(Some("cus_transition_test".to_string())); |     payment = payment.complete_payment(Some("cus_transition_test".to_string())); | ||||||
|     let (_, mut payment) = db.set(&payment).expect("Failed to update payment"); |     let (_, mut payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|  |         .set(&payment) | ||||||
|  |         .expect("Failed to update payment"); | ||||||
|     assert!(payment.is_completed()); |     assert!(payment.is_completed()); | ||||||
|  |  | ||||||
|     // Test completed -> refunded |     // Test completed -> refunded | ||||||
|     payment = payment.refund_payment(); |     payment = payment.refund_payment(); | ||||||
|     let (_, payment) = db.set(&payment).expect("Failed to update payment"); |     let (_, payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|  |         .set(&payment) | ||||||
|  |         .expect("Failed to update payment"); | ||||||
|     assert!(payment.is_refunded()); |     assert!(payment.is_refunded()); | ||||||
|  |  | ||||||
|     // Verify final state in database |     // Verify final state in database | ||||||
|     let final_payment: Payment = db |     let final_payment: Payment = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|         .get_by_id(payment_id) |         .get_by_id(payment_id) | ||||||
|         .expect("Failed to get payment") |         .expect("Failed to get payment") | ||||||
|         .unwrap(); |         .unwrap(); | ||||||
| @@ -270,15 +291,18 @@ fn test_company_payment_integration() { | |||||||
|     let db = create_test_db(); |     let db = create_test_db(); | ||||||
|  |  | ||||||
|     // Create company with default PendingPayment status |     // Create company with default PendingPayment status | ||||||
|     let company = Company::new( |     let company = Company::new() | ||||||
|         "Integration Test Corp".to_string(), |         .name("Integration Test Corp") | ||||||
|         "ITC-001".to_string(), |         .registration_number("ITC-001") | ||||||
|         chrono::Utc::now().timestamp(), |         .incorporation_date(chrono::Utc::now().timestamp()) | ||||||
|     ) |         .email("test@integration.com") | ||||||
|     .email("test@integration.com".to_string()) |         .business_type(BusinessType::Starter); | ||||||
|     .business_type(BusinessType::Starter); |  | ||||||
|  |  | ||||||
|     let (company_id, company) = db.set(&company).expect("Failed to save company"); |     let (company_id, company) = db | ||||||
|  |         .collection::<Company>() | ||||||
|  |         .expect("open company collection") | ||||||
|  |         .set(&company) | ||||||
|  |         .expect("Failed to save company"); | ||||||
|     assert_eq!(company.status, CompanyStatus::PendingPayment); |     assert_eq!(company.status, CompanyStatus::PendingPayment); | ||||||
|  |  | ||||||
|     // Create payment for the company |     // Create payment for the company | ||||||
| @@ -291,18 +315,28 @@ fn test_company_payment_integration() { | |||||||
|         305.0, |         305.0, | ||||||
|     ); |     ); | ||||||
|  |  | ||||||
|     let (_payment_id, payment) = db.set(&payment).expect("Failed to save payment"); |     let (_payment_id, payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|  |         .set(&payment) | ||||||
|  |         .expect("Failed to save payment"); | ||||||
|     assert_eq!(payment.company_id, company_id); |     assert_eq!(payment.company_id, company_id); | ||||||
|  |  | ||||||
|     // Complete payment |     // Complete payment | ||||||
|     let completed_payment = payment.complete_payment(Some("cus_integration_test".to_string())); |     let completed_payment = payment.complete_payment(Some("cus_integration_test".to_string())); | ||||||
|     let (_, completed_payment) = db |     let (_, completed_payment) = db | ||||||
|  |         .collection::<Payment>() | ||||||
|  |         .expect("open payment collection") | ||||||
|         .set(&completed_payment) |         .set(&completed_payment) | ||||||
|         .expect("Failed to update payment"); |         .expect("Failed to update payment"); | ||||||
|  |  | ||||||
|     // Update company status to Active |     // Update company status to Active | ||||||
|     let active_company = company.status(CompanyStatus::Active); |     let active_company = company.status(CompanyStatus::Active); | ||||||
|     let (_, active_company) = db.set(&active_company).expect("Failed to update company"); |     let (_, active_company) = db | ||||||
|  |         .collection::<Company>() | ||||||
|  |         .expect("open company collection") | ||||||
|  |         .set(&active_company) | ||||||
|  |         .expect("Failed to update company"); | ||||||
|  |  | ||||||
|     // Verify final states |     // Verify final states | ||||||
|     assert!(completed_payment.is_completed()); |     assert!(completed_payment.is_completed()); | ||||||
|   | |||||||
							
								
								
									
										345
									
								
								specs/billingmanager_research/billingmanager.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										345
									
								
								specs/billingmanager_research/billingmanager.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,345 @@ | |||||||
|  |  | ||||||
|  | ### 2.1 Accounts | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity (non-negative), unique account id | ||||||
|  | * **pubkey**: `BYTEA` unique public key for signing/encryption | ||||||
|  | * **display\_name**: `TEXT` (optional) | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | ### 2.2 Currencies | ||||||
|  |  | ||||||
|  | * **asset\_code**: `TEXT` PK (e.g., `USDC-ETH`, `EUR`, `LND`) | ||||||
|  | * **name**: `TEXT` | ||||||
|  | * **symbol**: `TEXT` | ||||||
|  | * **decimals**: `INT` (default 2) | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 3) Services & Groups | ||||||
|  |  | ||||||
|  | ### 3.1 Services | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **name**: `TEXT` unique | ||||||
|  | * **description**: `TEXT` | ||||||
|  | * **default\_billing\_mode**: `ENUM('per_second','per_request')` | ||||||
|  | * **default\_price**: `NUMERIC(38,18)` (≥0) | ||||||
|  | * **default\_currency**: FK → `currencies(asset_code)` | ||||||
|  | * **max\_request\_seconds**: `INT` (>0 or `NULL`) | ||||||
|  | * **schema\_heroscript**: `TEXT` | ||||||
|  | * **schema\_json**: `JSONB` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | #### Accepted Currencies (per service) | ||||||
|  |  | ||||||
|  | * **service\_id**: FK → `services(id)` | ||||||
|  | * **asset\_code**: FK → `currencies(asset_code)` | ||||||
|  | * **price\_override**: `NUMERIC(38,18)` (optional) | ||||||
|  | * **billing\_mode\_override**: `ENUM` (optional) | ||||||
|  |   Primary key: `(service_id, asset_code)` | ||||||
|  |  | ||||||
|  | ### 3.2 Service Groups | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **name**: `TEXT` unique | ||||||
|  | * **description**: `TEXT` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | #### Group Memberships | ||||||
|  |  | ||||||
|  | * **group\_id**: FK → `service_groups(id)` | ||||||
|  | * **service\_id**: FK → `services(id)` | ||||||
|  |   Primary key: `(group_id, service_id)` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 4) Providers & Runners | ||||||
|  |  | ||||||
|  | ### 4.1 Service Providers | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **account\_id**: FK → `accounts(id)` (the owning account) | ||||||
|  | * **name**: `TEXT` unique | ||||||
|  | * **description**: `TEXT` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | #### Providers Offer Groups | ||||||
|  |  | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` | ||||||
|  | * **group\_id**: FK → `service_groups(id)` | ||||||
|  |   Primary key: `(provider_id, group_id)` | ||||||
|  |  | ||||||
|  | #### Provider Pricing Overrides (optional) | ||||||
|  |  | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` | ||||||
|  | * **service\_id**: FK → `services(id)` | ||||||
|  | * **asset\_code**: FK → `currencies(asset_code)` (nullable for currency-agnostic override) | ||||||
|  | * **price\_override**: `NUMERIC(38,18)` (optional) | ||||||
|  | * **billing\_mode\_override**: `ENUM` (optional) | ||||||
|  | * **max\_request\_seconds\_override**: `INT` (optional) | ||||||
|  |   Primary key: `(provider_id, service_id, asset_code)` | ||||||
|  |  | ||||||
|  | ### 4.2 Runners | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **address**: `INET` (must be IPv6) | ||||||
|  | * **name**: `TEXT` | ||||||
|  | * **description**: `TEXT` | ||||||
|  | * **pubkey**: `BYTEA` (optional) | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | #### Runner Ownership (many-to-many) | ||||||
|  |  | ||||||
|  | * **runner\_id**: FK → `runners(id)` | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` | ||||||
|  |   Primary key: `(runner_id, provider_id)` | ||||||
|  |  | ||||||
|  | #### Routing (provider → service/service\_group → runners) | ||||||
|  |  | ||||||
|  | * **provider\_service\_runners**: `(provider_id, service_id, runner_id)` PK | ||||||
|  | * **provider\_service\_group\_runners**: `(provider_id, group_id, runner_id)` PK | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 5) Subscriptions & Spend Control | ||||||
|  |  | ||||||
|  | A subscription authorizes an **account** to use either a **service** **or** a **service group**, with optional spend limits and allowed providers. | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **account\_id**: FK → `accounts(id)` | ||||||
|  | * **service\_id** *xor* **group\_id**: FK (exactly one must be set) | ||||||
|  | * **secret**: `BYTEA` (random, provided by subscriber; recommend storing a hash) | ||||||
|  | * **subscription\_data**: `JSONB` (free-form) | ||||||
|  | * **limit\_amount**: `NUMERIC(38,18)` (optional) | ||||||
|  | * **limit\_currency**: FK → `currencies(asset_code)` (optional) | ||||||
|  | * **limit\_period**: `ENUM('hour','day','month')` (optional) | ||||||
|  | * **active**: `BOOLEAN` default `TRUE` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | #### Allowed Providers per Subscription | ||||||
|  |  | ||||||
|  | * **subscription\_id**: FK → `subscriptions(id)` | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` | ||||||
|  |   Primary key: `(subscription_id, provider_id)` | ||||||
|  |  | ||||||
|  | **Intended Use:** | ||||||
|  |  | ||||||
|  | * Subscribers bound spending by amount/currency/period. | ||||||
|  | * Merchant (provider) can claim charges for requests fulfilled under an active subscription, within limits, and only if listed in `subscription_providers`. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 6) Requests & Billing | ||||||
|  |  | ||||||
|  | ### 6.1 Request Lifecycle | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **account\_id**: FK → `accounts(id)` | ||||||
|  | * **subscription\_id**: FK → `subscriptions(id)` | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` | ||||||
|  | * **service\_id**: FK → `services(id)` | ||||||
|  | * **runner\_id**: FK → `runners(id)` (nullable) | ||||||
|  | * **request\_schema**: `JSONB` (payload matching `schema_json`/`schema_heroscript`) | ||||||
|  | * **started\_at**, **ended\_at**: `TIMESTAMPTZ` | ||||||
|  | * **status**: `ENUM('pending','running','succeeded','failed','canceled')` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | ### 6.2 Billing Ledger (append-only) | ||||||
|  |  | ||||||
|  | * **id**: `BIGINT` identity | ||||||
|  | * **account\_id**: FK → `accounts(id)` | ||||||
|  | * **provider\_id**: FK → `service_providers(id)` (nullable) | ||||||
|  | * **service\_id**: FK → `services(id)` (nullable) | ||||||
|  | * **request\_id**: FK → `requests(id)` (nullable) | ||||||
|  | * **amount**: `NUMERIC(38,18)` (debit = positive, credit/refund = negative) | ||||||
|  | * **asset\_code**: FK → `currencies(asset_code)` | ||||||
|  | * **entry\_type**: `ENUM('debit','credit','adjustment')` | ||||||
|  | * **description**: `TEXT` | ||||||
|  | * **created\_at**: `TIMESTAMPTZ` | ||||||
|  |  | ||||||
|  | **Balances View (example):** | ||||||
|  |  | ||||||
|  | * `account_balances(account_id, asset_code, balance)` as a view over `billing_ledger`. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 7) Pricing Precedence | ||||||
|  |  | ||||||
|  | When computing the **effective** pricing, billing mode, and max duration for a `(provider, service, currency)`: | ||||||
|  |  | ||||||
|  | 1. **Provider override for (service, asset\_code)** — if present, use it. | ||||||
|  | 2. **Service accepted currency override** — if present, use it. | ||||||
|  | 3. **Service defaults** — fallback. | ||||||
|  |  | ||||||
|  | If `billing_mode` or `max_request_seconds` are not overridden at steps (1) or (2), inherit from the next step down. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 8) Key Constraints & Validations | ||||||
|  |  | ||||||
|  | * All identity ids are non-negative (`CHECK (id >= 0)`). | ||||||
|  | * Runner IPv6 enforcement: `CHECK (family(address) = 6)`. | ||||||
|  | * Subscriptions must point to **exactly one** of `service_id` or `group_id`. | ||||||
|  | * Prices and limits must be non-negative if set. | ||||||
|  | * Unique natural keys where appropriate: service names, provider names, currency asset codes, account pubkeys. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 9) Mermaid Diagrams | ||||||
|  |  | ||||||
|  | ### 9.1 Entity–Relationship Overview | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | erDiagram | ||||||
|  |     ACCOUNTS ||--o{ SERVICE_PROVIDERS : "owns via account_id" | ||||||
|  |     ACCOUNTS ||--o{ SUBSCRIPTIONS : has | ||||||
|  |     CURRENCIES ||--o{ SERVICES : "default_currency" | ||||||
|  |     CURRENCIES ||--o{ SERVICE_ACCEPTED_CURRENCIES : "asset_code" | ||||||
|  |     CURRENCIES ||--o{ PROVIDER_SERVICE_OVERRIDES : "asset_code" | ||||||
|  |     CURRENCIES ||--o{ BILLING_LEDGER : "asset_code" | ||||||
|  |  | ||||||
|  |     SERVICES ||--o{ SERVICE_ACCEPTED_CURRENCIES : has | ||||||
|  |     SERVICES ||--o{ SERVICE_GROUP_MEMBERS : member_of | ||||||
|  |     SERVICE_GROUPS ||--o{ SERVICE_GROUP_MEMBERS : contains | ||||||
|  |  | ||||||
|  |     SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUPS : offers | ||||||
|  |     SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_OVERRIDES : sets | ||||||
|  |     SERVICE_PROVIDERS ||--o{ RUNNER_OWNERS : owns | ||||||
|  |     SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_RUNNERS : routes | ||||||
|  |     SERVICE_PROVIDERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : routes | ||||||
|  |  | ||||||
|  |     RUNNERS ||--o{ RUNNER_OWNERS : owned_by | ||||||
|  |     RUNNERS ||--o{ PROVIDER_SERVICE_RUNNERS : executes | ||||||
|  |     RUNNERS ||--o{ PROVIDER_SERVICE_GROUP_RUNNERS : executes | ||||||
|  |  | ||||||
|  |     SUBSCRIPTIONS ||--o{ SUBSCRIPTION_PROVIDERS : allow | ||||||
|  |     SERVICE_PROVIDERS ||--o{ SUBSCRIPTION_PROVIDERS : allowed | ||||||
|  |  | ||||||
|  |     REQUESTS }o--|| ACCOUNTS : by | ||||||
|  |     REQUESTS }o--|| SUBSCRIPTIONS : under | ||||||
|  |     REQUESTS }o--|| SERVICE_PROVIDERS : via | ||||||
|  |     REQUESTS }o--|| SERVICES : for | ||||||
|  |     REQUESTS }o--o{ RUNNERS : executed_by | ||||||
|  |  | ||||||
|  |     BILLING_LEDGER }o--|| ACCOUNTS : charges | ||||||
|  |     BILLING_LEDGER }o--o{ SERVICES : reference | ||||||
|  |     BILLING_LEDGER }o--o{ SERVICE_PROVIDERS : reference | ||||||
|  |     BILLING_LEDGER }o--o{ REQUESTS : reference | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### 9.2 Request Flow (Happy Path) | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | sequenceDiagram | ||||||
|  |     autonumber | ||||||
|  |     participant AC as Account | ||||||
|  |     participant API as Broker/API | ||||||
|  |     participant PR as Provider | ||||||
|  |     participant RU as Runner | ||||||
|  |     participant DB as PostgreSQL | ||||||
|  |  | ||||||
|  |     AC->>API: Submit request (subscription_id, service_id, payload, secret) | ||||||
|  |     API->>DB: Validate subscription (active, provider allowed, spend limits) | ||||||
|  |     DB-->>API: OK + effective pricing (resolve precedence) | ||||||
|  |     API->>PR: Dispatch request (service, payload) | ||||||
|  |     PR->>DB: Select runner (provider_service_runners / group runners) | ||||||
|  |     PR->>RU: Start job (payload) | ||||||
|  |     RU-->>PR: Job started (started_at) | ||||||
|  |     PR->>DB: Update REQUESTS (status=running, started_at) | ||||||
|  |     RU-->>PR: Job finished (duration, result) | ||||||
|  |     PR->>DB: Update REQUESTS (status=succeeded, ended_at) | ||||||
|  |     API->>DB: Insert BILLING_LEDGER (debit per effective price) | ||||||
|  |     DB-->>API: Ledger entry id | ||||||
|  |     API-->>AC: Return result + charge info | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### 9.3 Pricing Resolution | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | flowchart TD | ||||||
|  |     A[Input: provider_id, service_id, asset_code] --> B{Provider override exists for (service, asset_code)?} | ||||||
|  |     B -- Yes --> P1[Use provider price/mode/max] | ||||||
|  |     B -- No --> C{Service accepted currency override exists?} | ||||||
|  |     C -- Yes --> P2[Use service currency price/mode] | ||||||
|  |     C -- No --> P3[Use service defaults] | ||||||
|  |     P1 --> OUT[Effective pricing] | ||||||
|  |     P2 --> OUT | ||||||
|  |     P3 --> OUT | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 10) Operational Notes | ||||||
|  |  | ||||||
|  | * **Secrets:** store a hash (e.g., `digest(secret,'sha256')`) rather than raw `secret`. Keep the original only client-side. | ||||||
|  | * **Limits enforcement:** before insert of a debit ledger entry, compute period window (hour/day/month UTC or tenant TZ) and enforce `SUM(amount) + new_amount ≤ limit_amount`. | ||||||
|  | * **Durations:** enforce `max_request_seconds` (effective) at orchestration and/or via DB trigger on `REQUESTS` when transitioning to `running/succeeded`. | ||||||
|  | * **Routing:** prefer `provider_service_runners` when a request targets a service directly; otherwise use the union of runners from `provider_service_group_runners` for the group. | ||||||
|  | * **Balances:** serve balance queries via the `account_balances` view or a materialized cache updated by triggers/jobs. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 11) Example Effective Pricing Query (sketch) | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | -- Inputs: :provider_id, :service_id, :asset_code | ||||||
|  | WITH p AS ( | ||||||
|  |   SELECT price_override, billing_mode_override, max_request_seconds_override | ||||||
|  |   FROM provider_service_overrides | ||||||
|  |   WHERE provider_id = :provider_id | ||||||
|  |     AND service_id  = :service_id | ||||||
|  |     AND (asset_code = :asset_code) | ||||||
|  | ), | ||||||
|  | sac AS ( | ||||||
|  |   SELECT price_override, billing_mode_override | ||||||
|  |   FROM service_accepted_currencies | ||||||
|  |   WHERE service_id = :service_id AND asset_code = :asset_code | ||||||
|  | ), | ||||||
|  | svc AS ( | ||||||
|  |   SELECT default_price AS price, default_billing_mode AS mode, max_request_seconds | ||||||
|  |   FROM services WHERE id = :service_id | ||||||
|  | ) | ||||||
|  | SELECT | ||||||
|  |   COALESCE(p.price_override, sac.price_override, svc.price)                AS effective_price, | ||||||
|  |   COALESCE(p.billing_mode_override, sac.billing_mode_override, svc.mode)   AS effective_mode, | ||||||
|  |   COALESCE(p.max_request_seconds_override, svc.max_request_seconds)        AS effective_max_seconds; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 12) Indices (non-exhaustive) | ||||||
|  |  | ||||||
|  | * `services(default_currency)` | ||||||
|  | * `service_accepted_currencies(service_id)` | ||||||
|  | * `provider_service_overrides(service_id, provider_id)` | ||||||
|  | * `requests(account_id)`, `requests(provider_id)`, `requests(service_id)` | ||||||
|  | * `billing_ledger(account_id, asset_code)` | ||||||
|  | * `subscriptions(account_id) WHERE active` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 13) Migration & Compatibility | ||||||
|  |  | ||||||
|  | * Prefer additive migrations (new columns/tables) to avoid downtime. | ||||||
|  | * Use `ENUM` via `CREATE TYPE`; when extending, plan for `ALTER TYPE ... ADD VALUE`. | ||||||
|  | * For high-write ledgers, consider partitioning `billing_ledger` by `created_at` (monthly) and indexing partitions. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 14) Non-Goals | ||||||
|  |  | ||||||
|  | * Wallet custody and on-chain settlement are out of scope. | ||||||
|  | * SLA tracking and detailed observability (metrics/log schema) are not part of this spec. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 15) Acceptance Criteria | ||||||
|  |  | ||||||
|  | * Can represent services, groups, and providers with currency-specific pricing. | ||||||
|  | * Can route requests to runners by service or group. | ||||||
|  | * Can authorize usage via subscriptions, enforce spend limits, and record charges. | ||||||
|  | * Can reconstruct balances and audit via append-only ledger. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | **End of Spec** | ||||||
							
								
								
									
										225
									
								
								specs/billingmanager_research/conceptnote.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										225
									
								
								specs/billingmanager_research/conceptnote.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,225 @@ | |||||||
|  |  | ||||||
|  | # Concept Note: Generic Billing & Tracking Framework | ||||||
|  |  | ||||||
|  | ## 1) Purpose | ||||||
|  |  | ||||||
|  | The model is designed to support a **flexible, generic, and auditable** billing environment that can be applied across diverse services and providers — from compute time billing to per-request API usage, across multiple currencies, with dynamic provider-specific overrides. | ||||||
|  |  | ||||||
|  | It is **not tied to a single business domain** — the same framework can be used for: | ||||||
|  |  | ||||||
|  | * Cloud compute time (per second) | ||||||
|  | * API transactions (per request) | ||||||
|  | * Data transfer charges | ||||||
|  | * Managed service subscriptions | ||||||
|  | * Brokered third-party service reselling | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 2) Key Concepts | ||||||
|  |  | ||||||
|  | ### 2.1 Accounts | ||||||
|  |  | ||||||
|  | An **account** represents an economic actor in the system — typically a customer or a service provider. | ||||||
|  |  | ||||||
|  | * Identified by a **public key** (for authentication & cryptographic signing). | ||||||
|  | * Every billing action traces back to an account. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.2 Currencies & Asset Codes | ||||||
|  |  | ||||||
|  | The system supports **multiple currencies** (crypto or fiat) via **asset codes**. | ||||||
|  |  | ||||||
|  | * Asset codes identify the unit of billing (e.g. `USDC-ETH`, `EUR`, `LND`). | ||||||
|  | * Currencies are **decoupled from services** so you can add or remove supported assets at any time. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.3 Services & Groups | ||||||
|  |  | ||||||
|  | * **Service** = a billable offering (e.g., "Speech-to-Text", "VM Hosting"). | ||||||
|  |  | ||||||
|  |   * Has a **billing mode** (`per_second` or `per_request`). | ||||||
|  |   * Has a **default price** and **default currency**. | ||||||
|  |   * Supports **multiple accepted currencies** with optional per-currency pricing overrides. | ||||||
|  |   * Has execution constraints (e.g. `max_request_seconds`). | ||||||
|  |   * Includes structured schemas for request payloads. | ||||||
|  |  | ||||||
|  | * **Service Group** = a logical grouping of services. | ||||||
|  |  | ||||||
|  |   * Groups make it easy to **bundle related services** and manage them together. | ||||||
|  |   * Providers can offer entire groups rather than individual services. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.4 Service Providers | ||||||
|  |  | ||||||
|  | A **service provider** is an **account** that offers services or service groups. | ||||||
|  | They can: | ||||||
|  |  | ||||||
|  | * Override **pricing** for their offered services (per currency). | ||||||
|  | * Route requests to their own **runners** (execution agents). | ||||||
|  | * Manage multiple **service groups** under one provider identity. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.5 Runners | ||||||
|  |  | ||||||
|  | A **runner** is an execution agent — a node, VM, or service endpoint that can fulfill requests. | ||||||
|  |  | ||||||
|  | * Identified by an **IPv6 address** (supports Mycelium or other overlay networks). | ||||||
|  | * Can be owned by one or multiple providers. | ||||||
|  | * Providers map **services/groups → runners** to define routing. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.6 Subscriptions | ||||||
|  |  | ||||||
|  | A **subscription** is **the authorization mechanism** for usage and spending control: | ||||||
|  |  | ||||||
|  | * Links an **account** to a **service** or **service group**. | ||||||
|  | * Defines **spending limits** (amount, currency, period: hour/day/month). | ||||||
|  | * Restricts which **providers** are allowed to serve the subscription. | ||||||
|  | * Uses a **secret** chosen by the subscriber — providers use this to claim charges. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.7 Requests | ||||||
|  |  | ||||||
|  | A **request** represents a single execution under a subscription: | ||||||
|  |  | ||||||
|  | * Tied to **account**, **subscription**, **provider**, **service**, and optionally **runner**. | ||||||
|  | * Has **status** (`pending`, `running`, `succeeded`, `failed`, `canceled`). | ||||||
|  | * Records start/end times for duration-based billing. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 2.8 Billing Ledger | ||||||
|  |  | ||||||
|  | The **ledger** is **append-only** — the source of truth for all charges and credits. | ||||||
|  |  | ||||||
|  | * Each entry records: | ||||||
|  |  | ||||||
|  |   * `amount` (positive = debit, negative = credit/refund) | ||||||
|  |   * `asset_code` | ||||||
|  |   * Links to `account`, `provider`, `service`, and/or `request` | ||||||
|  | * From the ledger, **balances** can be reconstructed at any time. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 3) How Billing Works — Step by Step | ||||||
|  |  | ||||||
|  | ### 3.1 Setup | ||||||
|  |  | ||||||
|  | 1. **Define services** with default pricing & schemas. | ||||||
|  | 2. **Define currencies** and accepted currencies for services. | ||||||
|  | 3. **Group services** into service groups. | ||||||
|  | 4. **Onboard providers** (accounts) and associate them with service groups. | ||||||
|  | 5. **Assign runners** to services or groups for execution routing. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 3.2 Subscription Creation | ||||||
|  |  | ||||||
|  | 1. Customer **creates a subscription**: | ||||||
|  |  | ||||||
|  |    * Chooses service or service group. | ||||||
|  |    * Sets **spending limit** (amount, currency, period). | ||||||
|  |    * Chooses **secret**. | ||||||
|  |    * Selects **allowed providers**. | ||||||
|  | 2. Subscription is stored in DB. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 3.3 Request Execution | ||||||
|  |  | ||||||
|  | 1. Customer sends a request to broker/API with: | ||||||
|  |  | ||||||
|  |    * `subscription_id` | ||||||
|  |    * Target `service_id` | ||||||
|  |    * Payload + signature using account pubkey. | ||||||
|  | 2. Broker: | ||||||
|  |  | ||||||
|  |    * Validates **subscription active**. | ||||||
|  |    * Validates **provider allowed**. | ||||||
|  |    * Checks **spend limit** hasn’t been exceeded for current period. | ||||||
|  |    * Resolves **effective price** via: | ||||||
|  |  | ||||||
|  |      1. Provider override (currency-specific) | ||||||
|  |      2. Service accepted currency override | ||||||
|  |      3. Service default | ||||||
|  | 3. Broker selects **runner** from provider’s routing tables. | ||||||
|  | 4. Runner executes request and returns result. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 3.4 Billing Entry | ||||||
|  |  | ||||||
|  | 1. When the request completes: | ||||||
|  |  | ||||||
|  |    * If `per_second` mode → calculate `duration × rate`. | ||||||
|  |    * If `per_request` mode → apply flat rate. | ||||||
|  | 2. Broker **inserts ledger entry**: | ||||||
|  |  | ||||||
|  |    * Debit from customer account. | ||||||
|  |    * Credit to provider account (can be separate entries or aggregated). | ||||||
|  | 3. Ledger is append-only — historical billing cannot be altered. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ### 3.5 Balance & Tracking | ||||||
|  |  | ||||||
|  | * **Current balances** are a sum of all ledger entries per account+currency. | ||||||
|  | * Spend limits are enforced by **querying the ledger** for the current period before each charge. | ||||||
|  | * Audit trails are guaranteed via immutable ledger entries. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 4) Why This is Generic & Reusable | ||||||
|  |  | ||||||
|  | This design **decouples**: | ||||||
|  |  | ||||||
|  | * **Service definition** from **provider pricing** → multiple providers can sell the same service at different rates. | ||||||
|  | * **Execution agents** (runners) from **service definitions** → easy scaling or outsourcing of execution. | ||||||
|  | * **Billing rules** (per-second vs per-request) from **subscription limits** → same service can be sold in different billing modes. | ||||||
|  | * **Currencies** from the service → enabling multi-asset billing without changing the service definition. | ||||||
|  |  | ||||||
|  | Because of these separations, you can: | ||||||
|  |  | ||||||
|  | * Reuse the model for **compute**, **APIs**, **storage**, **SaaS features**, etc. | ||||||
|  | * Plug in different **payment backends** (on-chain, centralized payment processor, prepaid balance). | ||||||
|  | * Use the same model for **internal cost allocation** or **external customer billing**. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 5) Potential Extensions | ||||||
|  |  | ||||||
|  | * **Prepaid model**: enforce that ledger debits can’t exceed balance. | ||||||
|  | * **On-chain settlement**: periodically export ledger entries to blockchain transactions. | ||||||
|  | * **Discount models**: percentage or fixed-amount discounts per subscription. | ||||||
|  | * **Usage analytics**: aggregate requests/billing by time period, provider, or service. | ||||||
|  | * **SLAs**: link billing adjustments to performance metrics in requests. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 6) Conceptual Diagram — Billing Flow | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | sequenceDiagram | ||||||
|  |     participant C as Customer Account | ||||||
|  |     participant B as Broker/API | ||||||
|  |     participant P as Provider | ||||||
|  |     participant R as Runner | ||||||
|  |     participant DB as Ledger DB | ||||||
|  |  | ||||||
|  |     C->>B: Request(service, subscription, payload, secret) | ||||||
|  |     B->>DB: Validate subscription & spend limit | ||||||
|  |     DB-->>B: OK + effective pricing | ||||||
|  |     B->>P: Forward request | ||||||
|  |     P->>R: Execute request | ||||||
|  |     R-->>P: Result + execution time | ||||||
|  |     P->>B: Return result | ||||||
|  |     B->>DB: Insert debit (customer) + credit (provider) | ||||||
|  |     DB-->>B: Ledger updated | ||||||
|  |     B-->>C: Return result + charge info | ||||||
|  | ``` | ||||||
							
								
								
									
										234
									
								
								specs/billingmanager_research/schema.sql
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										234
									
								
								specs/billingmanager_research/schema.sql
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,234 @@ | |||||||
|  | -- Enable useful extensions (optional) | ||||||
|  | CREATE EXTENSION IF NOT EXISTS pgcrypto;   -- for digests/hashes if you want | ||||||
|  | CREATE EXTENSION IF NOT EXISTS btree_gist; -- for exclusion/partial indexes | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Core: Accounts & Currency | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | CREATE TABLE accounts ( | ||||||
|  |   id               BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   pubkey           BYTEA NOT NULL UNIQUE, | ||||||
|  |   display_name     TEXT, | ||||||
|  |   created_at       TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   CHECK (id >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | CREATE TABLE currencies ( | ||||||
|  |   asset_code       TEXT PRIMARY KEY,               -- e.g. "USDC-ETH", "EUR", "LND" | ||||||
|  |   name             TEXT NOT NULL, | ||||||
|  |   symbol           TEXT,                           -- e.g. "$", "€" | ||||||
|  |   decimals         INT  NOT NULL DEFAULT 2,        -- how many decimal places | ||||||
|  |   UNIQUE (name) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Services & Groups | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | CREATE TYPE billing_mode AS ENUM ('per_second', 'per_request'); | ||||||
|  |  | ||||||
|  | CREATE TABLE services ( | ||||||
|  |   id                   BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   name                 TEXT NOT NULL UNIQUE, | ||||||
|  |   description          TEXT, | ||||||
|  |   default_billing_mode billing_mode NOT NULL, | ||||||
|  |   default_price        NUMERIC(38, 18) NOT NULL,   -- default price in "unit currency" (see accepted currencies) | ||||||
|  |   default_currency     TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE, | ||||||
|  |   max_request_seconds  INTEGER,                    -- nullable means no cap | ||||||
|  |   schema_heroscript    TEXT, | ||||||
|  |   schema_json          JSONB, | ||||||
|  |   created_at           TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   CHECK (id >= 0), | ||||||
|  |   CHECK (default_price >= 0), | ||||||
|  |   CHECK (max_request_seconds IS NULL OR max_request_seconds > 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Accepted currencies for a service (subset + optional specific price per currency) | ||||||
|  | CREATE TABLE service_accepted_currencies ( | ||||||
|  |   service_id      BIGINT NOT NULL REFERENCES services(id) ON DELETE CASCADE, | ||||||
|  |   asset_code      TEXT   NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE, | ||||||
|  |   price_override  NUMERIC(38, 18),                 -- if set, overrides default_price for this currency | ||||||
|  |   billing_mode_override billing_mode,              -- if set, overrides default_billing_mode | ||||||
|  |   PRIMARY KEY (service_id, asset_code), | ||||||
|  |   CHECK (price_override IS NULL OR price_override >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | CREATE TABLE service_groups ( | ||||||
|  |   id               BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   name             TEXT NOT NULL UNIQUE, | ||||||
|  |   description      TEXT, | ||||||
|  |   created_at       TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   CHECK (id >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | CREATE TABLE service_group_members ( | ||||||
|  |   group_id   BIGINT NOT NULL REFERENCES service_groups(id) ON DELETE CASCADE, | ||||||
|  |   service_id BIGINT NOT NULL REFERENCES services(id)       ON DELETE RESTRICT, | ||||||
|  |   PRIMARY KEY (group_id, service_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Providers, Runners, Routing | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | CREATE TABLE service_providers ( | ||||||
|  |   id               BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   account_id       BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, -- provider is an account | ||||||
|  |   name             TEXT NOT NULL, | ||||||
|  |   description      TEXT, | ||||||
|  |   created_at       TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   UNIQUE (name), | ||||||
|  |   CHECK (id >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Providers can offer groups (which imply their services) | ||||||
|  | CREATE TABLE provider_service_groups ( | ||||||
|  |   provider_id BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   group_id    BIGINT NOT NULL REFERENCES service_groups(id)    ON DELETE CASCADE, | ||||||
|  |   PRIMARY KEY (provider_id, group_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Providers may set per-service overrides (price/mode/max seconds) (optionally per currency) | ||||||
|  | CREATE TABLE provider_service_overrides ( | ||||||
|  |   provider_id     BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   service_id      BIGINT NOT NULL REFERENCES services(id)          ON DELETE CASCADE, | ||||||
|  |   asset_code      TEXT   REFERENCES currencies(asset_code) ON UPDATE CASCADE, | ||||||
|  |   price_override  NUMERIC(38, 18), | ||||||
|  |   billing_mode_override billing_mode, | ||||||
|  |   max_request_seconds_override INTEGER, | ||||||
|  |   PRIMARY KEY (provider_id, service_id, asset_code), | ||||||
|  |   CHECK (price_override IS NULL OR price_override >= 0), | ||||||
|  |   CHECK (max_request_seconds_override IS NULL OR max_request_seconds_override > 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Runners | ||||||
|  | CREATE TABLE runners ( | ||||||
|  |   id           BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   address      INET NOT NULL,            -- IPv6 (INET supports both IPv4/IPv6; require v6 via CHECK below if you like) | ||||||
|  |   name         TEXT NOT NULL, | ||||||
|  |   description  TEXT, | ||||||
|  |   pubkey       BYTEA,                    -- optional | ||||||
|  |   created_at   TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   UNIQUE (address), | ||||||
|  |   CHECK (id >= 0), | ||||||
|  |   CHECK (family(address) = 6)            -- ensure IPv6 | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Runner ownership: a runner can be owned by multiple providers | ||||||
|  | CREATE TABLE runner_owners ( | ||||||
|  |   runner_id    BIGINT NOT NULL REFERENCES runners(id)           ON DELETE CASCADE, | ||||||
|  |   provider_id  BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   PRIMARY KEY (runner_id, provider_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Routing: link providers' services to specific runners | ||||||
|  | CREATE TABLE provider_service_runners ( | ||||||
|  |   provider_id  BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   service_id   BIGINT NOT NULL REFERENCES services(id)          ON DELETE CASCADE, | ||||||
|  |   runner_id    BIGINT NOT NULL REFERENCES runners(id)           ON DELETE CASCADE, | ||||||
|  |   PRIMARY KEY (provider_id, service_id, runner_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Routing: link providers' service groups to runners | ||||||
|  | CREATE TABLE provider_service_group_runners ( | ||||||
|  |   provider_id  BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   group_id     BIGINT NOT NULL REFERENCES service_groups(id)    ON DELETE CASCADE, | ||||||
|  |   runner_id    BIGINT NOT NULL REFERENCES runners(id)           ON DELETE CASCADE, | ||||||
|  |   PRIMARY KEY (provider_id, group_id, runner_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Subscriptions & Spend Control | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | CREATE TYPE spend_period AS ENUM ('hour', 'day', 'month'); | ||||||
|  |  | ||||||
|  | -- A subscription ties an account to a specific service OR a service group, with spend limits and allowed providers | ||||||
|  | CREATE TABLE subscriptions ( | ||||||
|  |   id                   BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   account_id           BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, | ||||||
|  |   service_id           BIGINT REFERENCES services(id) ON DELETE CASCADE, | ||||||
|  |   group_id             BIGINT REFERENCES service_groups(id) ON DELETE CASCADE, | ||||||
|  |   secret               BYTEA NOT NULL,             -- caller-chosen secret (consider storing a hash instead) | ||||||
|  |   subscription_data    JSONB,                      -- arbitrary client-supplied info | ||||||
|  |   limit_amount         NUMERIC(38, 18),            -- allowed spend in the selected currency per period | ||||||
|  |   limit_currency       TEXT REFERENCES currencies(asset_code) ON UPDATE CASCADE, | ||||||
|  |   limit_period         spend_period,               -- period for the limit | ||||||
|  |   active               BOOLEAN NOT NULL DEFAULT TRUE, | ||||||
|  |   created_at           TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   -- Ensure exactly one of service_id or group_id | ||||||
|  |   CHECK ( (service_id IS NOT NULL) <> (group_id IS NOT NULL) ), | ||||||
|  |   CHECK (limit_amount IS NULL OR limit_amount >= 0), | ||||||
|  |   CHECK (id >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Providers that are allowed to serve under a subscription | ||||||
|  | CREATE TABLE subscription_providers ( | ||||||
|  |   subscription_id BIGINT NOT NULL REFERENCES subscriptions(id)    ON DELETE CASCADE, | ||||||
|  |   provider_id     BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE CASCADE, | ||||||
|  |   PRIMARY KEY (subscription_id, provider_id) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Usage, Requests & Billing | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | -- A request lifecycle record (optional but useful for auditing and max duration enforcement) | ||||||
|  | CREATE TYPE request_status AS ENUM ('pending', 'running', 'succeeded', 'failed', 'canceled'); | ||||||
|  |  | ||||||
|  | CREATE TABLE requests ( | ||||||
|  |   id               BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   account_id       BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, | ||||||
|  |   subscription_id  BIGINT NOT NULL REFERENCES subscriptions(id) ON DELETE RESTRICT, | ||||||
|  |   provider_id      BIGINT NOT NULL REFERENCES service_providers(id) ON DELETE RESTRICT, | ||||||
|  |   service_id       BIGINT NOT NULL REFERENCES services(id) ON DELETE RESTRICT, | ||||||
|  |   runner_id        BIGINT REFERENCES runners(id) ON DELETE SET NULL, | ||||||
|  |   request_schema   JSONB,                         -- concrete task payload (conforms to schema_json/heroscript) | ||||||
|  |   started_at       TIMESTAMPTZ, | ||||||
|  |   ended_at         TIMESTAMPTZ, | ||||||
|  |   status           request_status NOT NULL DEFAULT 'pending', | ||||||
|  |   created_at       TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   CHECK (id >= 0), | ||||||
|  |   CHECK (ended_at IS NULL OR started_at IS NULL OR ended_at >= started_at) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Billing ledger (debits/credits). Positive amount = debit to account (charge). Negative = credit/refund. | ||||||
|  | CREATE TYPE ledger_entry_type AS ENUM ('debit', 'credit', 'adjustment'); | ||||||
|  |  | ||||||
|  | CREATE TABLE billing_ledger ( | ||||||
|  |   id             BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, | ||||||
|  |   account_id     BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, | ||||||
|  |   provider_id    BIGINT REFERENCES service_providers(id) ON DELETE SET NULL, | ||||||
|  |   service_id     BIGINT REFERENCES services(id)          ON DELETE SET NULL, | ||||||
|  |   request_id     BIGINT REFERENCES requests(id)          ON DELETE SET NULL, | ||||||
|  |   amount         NUMERIC(38, 18) NOT NULL,               -- positive for debit, negative for credit | ||||||
|  |   asset_code     TEXT NOT NULL REFERENCES currencies(asset_code) ON UPDATE CASCADE, | ||||||
|  |   entry_type     ledger_entry_type NOT NULL, | ||||||
|  |   description    TEXT, | ||||||
|  |   created_at     TIMESTAMPTZ NOT NULL DEFAULT now(), | ||||||
|  |   CHECK (id >= 0) | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | -- Optional: running balances per account/currency (materialized view or real-time view) | ||||||
|  | -- This is a plain view; for performance, you might maintain a cached table. | ||||||
|  | CREATE VIEW account_balances AS | ||||||
|  | SELECT | ||||||
|  |   account_id, | ||||||
|  |   asset_code, | ||||||
|  |   SUM(amount) AS balance | ||||||
|  | FROM billing_ledger | ||||||
|  | GROUP BY account_id, asset_code; | ||||||
|  |  | ||||||
|  | -- ========================= | ||||||
|  | -- Helpful Indexes | ||||||
|  | -- ========================= | ||||||
|  |  | ||||||
|  | CREATE INDEX idx_services_default_currency ON services(default_currency); | ||||||
|  | CREATE INDEX idx_service_accepted_currencies_service ON service_accepted_currencies(service_id); | ||||||
|  | CREATE INDEX idx_provider_overrides_service ON provider_service_overrides(service_id); | ||||||
|  | CREATE INDEX idx_requests_account ON requests(account_id); | ||||||
|  | CREATE INDEX idx_requests_provider ON requests(provider_id); | ||||||
|  | CREATE INDEX idx_requests_service ON requests(service_id); | ||||||
|  | CREATE INDEX idx_billing_account_currency ON billing_ledger(account_id, asset_code); | ||||||
|  | CREATE INDEX idx_subscriptions_account_active ON subscriptions(account_id) WHERE active; | ||||||
							
								
								
									
										266
									
								
								specs/billingmanager_research/summary.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										266
									
								
								specs/billingmanager_research/summary.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,266 @@ | |||||||
|  | # Billing Logic — Whiteboard Version (for Devs) | ||||||
|  |  | ||||||
|  | ## 1) Inputs You Always Need | ||||||
|  |  | ||||||
|  | * `account_id`, `subscription_id` | ||||||
|  | * `service_id` (or group → resolved to a service at dispatch) | ||||||
|  | * `provider_id`, `asset_code` | ||||||
|  | * `payload` (validated against service schema) | ||||||
|  | * (Optional) `runner_id` | ||||||
|  | * Idempotency key for the request (client-provided) | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 2) Gatekeeping (Hard Checks) | ||||||
|  |  | ||||||
|  | 1. **Subscription** | ||||||
|  |  | ||||||
|  | * Must be `active`. | ||||||
|  | * Must target **exactly one** of {service, group}. | ||||||
|  | * If group: ensure `service_id` is a member. | ||||||
|  |  | ||||||
|  | 2. **Provider Allowlist** | ||||||
|  |  | ||||||
|  | * If `subscription_providers` exists → `provider_id` must be listed. | ||||||
|  |  | ||||||
|  | 3. **Spend Limit** (if set) | ||||||
|  |  | ||||||
|  | * Compute window by `limit_period` (`hour`/`day`/`month`, UTC unless tenant TZ). | ||||||
|  | * Current period spend = `SUM(ledger.amount WHERE account & currency & period)`. | ||||||
|  | * `current_spend + estimated_charge ≤ limit_amount`. | ||||||
|  |  | ||||||
|  | 4. **Max Duration** (effective; see §3): | ||||||
|  |  | ||||||
|  | * If billing mode is `per_second`, reject if requested/max exceeds effective cap. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 3) Effective Pricing (Single Resolution Function) | ||||||
|  |  | ||||||
|  | Inputs: `provider_id`, `service_id`, `asset_code` | ||||||
|  |  | ||||||
|  | Precedence: | ||||||
|  |  | ||||||
|  | 1. `provider_service_overrides` for `(service_id, asset_code)` | ||||||
|  | 2. `service_accepted_currencies` for `(service_id, asset_code)` | ||||||
|  | 3. `services` defaults | ||||||
|  |  | ||||||
|  | Outputs: | ||||||
|  |  | ||||||
|  | * `effective_billing_mode ∈ {per_request, per_second}` | ||||||
|  | * `effective_price` (NUMERIC) | ||||||
|  | * `effective_max_request_seconds` (nullable) | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 4) Request Lifecycle (States) | ||||||
|  |  | ||||||
|  | * `pending` → `running` → (`succeeded` | `failed` | `canceled`) | ||||||
|  | * Timestamps: set `started_at` on `running`, `ended_at` on terminal states. | ||||||
|  | * Enforce `ended_at ≥ started_at` and `duration ≤ effective_max_request_seconds` (if set). | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 5) Charging Rules | ||||||
|  |  | ||||||
|  | ### A) Per Request | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | charge = effective_price | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### B) Per Second | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | duration_seconds = ceil(extract(epoch from (ended_at - started_at))) | ||||||
|  | charge = duration_seconds * effective_price | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | * Cap with `effective_max_request_seconds` if present. | ||||||
|  | * If ended early/failed before `started_at`: charge = 0. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 6) Idempotency & Atomicity | ||||||
|  |  | ||||||
|  | * **Idempotency key** per `(account_id, subscription_id, provider_id, service_id, request_external_id)`; store on `requests` and enforce unique index. | ||||||
|  | * **Single transaction** to: | ||||||
|  |  | ||||||
|  |   1. finalize `REQUESTS` status + timestamps, | ||||||
|  |   2. insert **one** debit entry into `billing_ledger`. | ||||||
|  | * Never mutate ledger entries; use compensating **credit** entries for adjustments/refunds. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 7) Spend-Limit Enforcement (Before Charging) | ||||||
|  |  | ||||||
|  | Pseudocode (SQL-ish): | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | WITH window AS ( | ||||||
|  |   SELECT tsrange(period_start(:limit_period), period_end(:limit_period)) AS w | ||||||
|  | ), | ||||||
|  | spent AS ( | ||||||
|  |   SELECT COALESCE(SUM(amount), 0) AS total | ||||||
|  |   FROM billing_ledger, window | ||||||
|  |   WHERE account_id = :account_id | ||||||
|  |     AND asset_code = :asset_code | ||||||
|  |     AND created_at <@ (SELECT w FROM window) | ||||||
|  | ), | ||||||
|  | check AS ( | ||||||
|  |   SELECT (spent.total + :estimated_charge) <= :limit_amount AS ok FROM spent | ||||||
|  | ) | ||||||
|  | SELECT ok FROM check; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | * If not ok → reject before dispatch, or allow but **set hard cap** on max seconds and auto-stop at limit. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 8) Suggested DB Operations (Happy Path) | ||||||
|  |  | ||||||
|  | 1. **Create request** | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | INSERT INTO requests (...) | ||||||
|  | VALUES (...) | ||||||
|  | ON CONFLICT (idempotency_key) DO NOTHING | ||||||
|  | RETURNING id; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | 2. **Start execution** | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | UPDATE requests | ||||||
|  | SET status='running', started_at=now() | ||||||
|  | WHERE id=:id AND status='pending'; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | 3. **Finish & bill** (single transaction) | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | BEGIN; | ||||||
|  |  | ||||||
|  | -- lock for update to avoid double-billing | ||||||
|  | UPDATE requests | ||||||
|  | SET status=:final_status, ended_at=now() | ||||||
|  | WHERE id=:id AND status='running' | ||||||
|  | RETURNING started_at, ended_at; | ||||||
|  |  | ||||||
|  | -- compute charge in app (see §5), re-check spend window here | ||||||
|  |  | ||||||
|  | INSERT INTO billing_ledger ( | ||||||
|  |   account_id, provider_id, service_id, request_id, | ||||||
|  |   amount, asset_code, entry_type, description | ||||||
|  | ) VALUES ( | ||||||
|  |   :account_id, :provider_id, :service_id, :id, | ||||||
|  |   :charge, :asset_code, 'debit', :desc | ||||||
|  | ); | ||||||
|  |  | ||||||
|  | COMMIT; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 9) Balances & Reporting | ||||||
|  |  | ||||||
|  | * **Current balance** = `SUM(billing_ledger.amount) GROUP BY account_id, asset_code`. | ||||||
|  | * Keep a **view** or **materialized view**; refresh asynchronously if needed. | ||||||
|  | * Never rely on cached balance for hard checks — re-check within the billing transaction if **prepaid** semantics are required. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 10) Error & Edge Rules | ||||||
|  |  | ||||||
|  | * If runner fails before `running` → no charge. | ||||||
|  | * If runner starts, then fails: | ||||||
|  |  | ||||||
|  |   * **per\_second**: bill actual seconds (can be 0). | ||||||
|  |   * **per\_request**: default is **no charge** unless policy says otherwise; if charging partials, document it. | ||||||
|  | * Partial refunds/adjustments → insert **negative** ledger entries (type `credit`/`adjustment`) tied to the original `request_id`. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 11) Minimal Pricing Resolver (Sketch) | ||||||
|  |  | ||||||
|  | ```sql | ||||||
|  | WITH p AS ( | ||||||
|  |   SELECT price_override AS price, | ||||||
|  |          billing_mode_override AS mode, | ||||||
|  |          max_request_seconds_override AS maxsec | ||||||
|  |   FROM provider_service_overrides | ||||||
|  |   WHERE provider_id = :pid AND service_id = :sid AND asset_code = :asset | ||||||
|  |   LIMIT 1 | ||||||
|  | ), | ||||||
|  | sac AS ( | ||||||
|  |   SELECT price_override AS price, | ||||||
|  |          billing_mode_override AS mode | ||||||
|  |   FROM service_accepted_currencies | ||||||
|  |   WHERE service_id = :sid AND asset_code = :asset | ||||||
|  |   LIMIT 1 | ||||||
|  | ), | ||||||
|  | svc AS ( | ||||||
|  |   SELECT default_price AS price, | ||||||
|  |          default_billing_mode AS mode, | ||||||
|  |          max_request_seconds AS maxsec | ||||||
|  |   FROM services WHERE id = :sid | ||||||
|  | ) | ||||||
|  | SELECT | ||||||
|  |   COALESCE(p.price, sac.price, svc.price) AS price, | ||||||
|  |   COALESCE(p.mode,  sac.mode,  svc.mode)  AS mode, | ||||||
|  |   COALESCE(p.maxsec, svc.maxsec)          AS max_seconds; | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 12) Mermaid — Decision Trees | ||||||
|  |  | ||||||
|  | ### Pricing & Duration | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | flowchart TD | ||||||
|  |     A[provider_id, service_id, asset_code] --> B{Provider override exists?} | ||||||
|  |     B -- yes --> P[Use provider price/mode/max] | ||||||
|  |     B -- no --> C{Service currency override?} | ||||||
|  |     C -- yes --> S[Use service currency price/mode] | ||||||
|  |     C -- no --> D[Use service defaults] | ||||||
|  |     P --> OUT[effective price/mode/max] | ||||||
|  |     S --> OUT | ||||||
|  |     D --> OUT | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Spend Check & Charge | ||||||
|  |  | ||||||
|  | ```mermaid | ||||||
|  | flowchart TD | ||||||
|  |     S[Has subscription limit?] -->|No| D1[Dispatch] | ||||||
|  |     S -->|Yes| C{current_spend + est_charge <= limit?} | ||||||
|  |     C -->|No| REJ[Reject or cap duration] | ||||||
|  |     C -->|Yes| D1[Dispatch] | ||||||
|  |     D1 --> RUN[Run request] | ||||||
|  |     RUN --> DONE[Finalize + insert ledger] | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 13) Security Posture | ||||||
|  |  | ||||||
|  | * Store **hash of subscription secret**; compare hash on use. | ||||||
|  | * Sign client requests with **account pubkey**; verify before dispatch. | ||||||
|  | * Limit **request schema** to validated fields; reject unknowns. | ||||||
|  | * Enforce **IPv6** for runners where required. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | ## 14) What To Implement First | ||||||
|  |  | ||||||
|  | 1. Pricing resolver (single function). | ||||||
|  | 2. Spend-window checker (single query). | ||||||
|  | 3. Request lifecycle + idempotency. | ||||||
|  | 4. Ledger write (append-only) + balances view. | ||||||
|  |  | ||||||
|  | Everything else layers on top. | ||||||
|  |  | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | If you want, I can turn this into a small **README.md** with code blocks you can paste into the repo (plus a couple of SQL functions and example tests). | ||||||
| @@ -24,16 +24,6 @@ pub enum CurrencyType { | |||||||
| 	custom | 	custom | ||||||
| } | } | ||||||
|  |  | ||||||
| pub struct Price { |  | ||||||
| pub mut: |  | ||||||
| 	base_amount          f64 // Using f64 for Decimal |  | ||||||
| 	base_currency        string |  | ||||||
| 	display_currency     string |  | ||||||
| 	display_amount       f64 // Using f64 for Decimal |  | ||||||
| 	formatted_display    string |  | ||||||
| 	conversion_rate      f64 // Using f64 for Decimal |  | ||||||
| 	conversion_timestamp u64 // Unix timestamp |  | ||||||
| } |  | ||||||
|  |  | ||||||
| pub struct MarketplaceCurrencyConfig { | pub struct MarketplaceCurrencyConfig { | ||||||
| pub mut: | pub mut: | ||||||
|   | |||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user