This commit is contained in:
2025-08-30 11:26:36 +02:00
parent 6daffaeb94
commit 79d2bb49f9
27 changed files with 2602 additions and 266 deletions

View File

@@ -0,0 +1,73 @@
## `crypto.blake3` Module
```v
fn sum256(data []u8) []u8
```
Returns the Blake3 256-bit hash of the provided data.
```v
fn sum_derive_key256(context []u8, key_material []u8) []u8
```
Computes the Blake3 256-bit derived-key hash based on the context and key material.
```v
fn sum_keyed256(data []u8, key []u8) []u8
```
Returns the Blake3 256-bit keyed hash of the data using the specified key.
---
### Digest-Based API
```v
fn Digest.new_derive_key_hash(context []u8) !Digest
```
Initializes a `Digest` struct for creating a Blake3 derivedkey hash, using the provided context.
```v
fn Digest.new_hash() !Digest
```
Initializes a `Digest` struct for a standard (unkeyed) Blake3 hash.
```v
fn Digest.new_keyed_hash(key []u8) !Digest
```
Initializes a `Digest` struct for a keyed Blake3 hash, with the given key.
---
### `Digest` Methods
```v
fn (mut d Digest) write(data []u8) !
```
Feeds additional data bytes into the ongoing hash computation.
```v
fn (mut d Digest) checksum(size u64) []u8
```
Finalizes the hash and returns the resulting output.
* The `size` parameter specifies the number of output bytes—commonly `32` for a 256-bit digest, but can be up to `2**64`.
---
### Recommended Usage (in V)
```v
import crypto.blake3
mut hasher := crypto.blake3.Digest.new_hash() or { panic(err) }
hasher.write(data) or { panic(err) }
digest := hasher.checksum(24) // returns a []u8 of length 24 (192 bits)
```

View File

@@ -1,228 +0,0 @@
module hero_db
import json
import freeflowuniverse.herolib.clients.postgresql_client
import db.pg
import freeflowuniverse.herolib.core.texttools
// Generic database interface for Hero root objects
pub struct HeroDB[T] {
pub mut:
db pg.DB
table_name string
}
// new creates a new HeroDB instance for a specific type T
pub fn new[T]() !HeroDB[T] {
mut table_name := '${texttools.snake_case(T.name)}s'
// Map dirname from module path
module_path := T.name.split('.')
if module_path.len >= 2 {
dirname := texttools.snake_case(module_path[module_path.len - 2])
table_name = '${dirname}_${texttools.snake_case(T.name)}'
}
mut dbclient := postgresql_client.get()!
mut dbcl := dbclient.db() or { return error('Failed to connect to database') }
return HeroDB[T]{
db: dbcl
table_name: table_name
}
}
// ensure_table creates the database table with proper schema for type T
pub fn (mut self HeroDB[T]) ensure_table() ! {
// Get index fields from struct reflection
index_fields := self.get_index_fields()
// Build index column definitions
mut index_cols := []string{}
for field in index_fields {
index_cols << '${field} varchar(255)'
}
// Create table with JSON storage
create_sql := '
CREATE TABLE IF NOT EXISTS ${self.table_name} (
id serial PRIMARY KEY,
${index_cols.join(', ')},
data jsonb NOT NULL,
created_at timestamp DEFAULT CURRENT_TIMESTAMP,
updated_at timestamp DEFAULT CURRENT_TIMESTAMP
)
' // self.db.exec(create_sql)!
// Create indexes on index fields
for field in index_fields {
index_sql := 'CREATE INDEX IF NOT EXISTS idx_${self.table_name}_${field} ON ${self.table_name}(${field})'
// self.db.exec(index_sql)!
}
}
// Get index fields marked with @[index] from struct
fn (self HeroDB[T]) get_index_fields() []string {
mut fields := []string{}
$for field in T.fields {
if field.attrs.contains('index') {
fields << texttools.snake_case(field.name)
}
}
return fields
}
// save stores the object T in the database, updating if it already exists
pub fn (mut self HeroDB[T]) save(obj T) ! {
// Get index values from object
index_data := self.extract_index_values(obj)
// Serialize to JSON
json_data := json.encode_pretty(obj)
// Check if object already exists
mut query := 'SELECT id FROM ${self.table_name} WHERE '
mut params := []string{}
// Build WHERE clause for unique lookup
for key, value in index_data {
params << '${key} = \'${value}\''
}
query += params.join(' AND ')
existing := self.db.exec(query)!
if existing.len > 0 {
// Update existing record
id_val := existing[0].vals[0] or { return error('no id') }
// id := id_val.int()
println('Updating existing record with ID: ${id_val}')
if true {
panic('sd111')
}
// update_sql := '
// UPDATE ${self.table_name}
// SET data = \$1, updated_at = CURRENT_TIMESTAMP
// WHERE id = \$2
// '
// self.db_client.db()!.exec_param(update_sql, [json_data, id.str()])!
} else {
// Insert new record
mut columns := []string{}
mut values := []string{}
// Add index columns
for key, value in index_data {
columns << key
values << "'${value}'"
}
// Add JSON data
columns << 'data'
values << "'${json_data}'"
insert_sql := '
INSERT INTO ${self.table_name} (${columns.join(', ')})
VALUES (${values.join(', ')})
' // self.db.exec(insert_sql)!
}
}
// get_by_index retrieves an object T by its index values
pub fn (mut self HeroDB[T]) get_by_index(index_values map[string]string) !T {
mut query := 'SELECT data FROM ${self.table_name} WHERE '
mut params := []string{}
for key, value in index_values {
params << '${key} = \'${value}\''
}
query += params.join(' AND ')
rows := self.db.exec(query)!
if rows.len == 0 {
return error('${T.name} not found with index values: ${index_values}')
}
json_data_val := rows[0].vals[0] or { return error('no data') }
println('json_data_val: ${json_data_val}')
if true {
panic('sd2221')
}
// mut obj := json.decode(T, json_data_val) or {
// return error('Failed to decode JSON: ${err}')
// }
// return &obj
return T{}
}
// // get_all retrieves all objects T from the database
// pub fn (mut self HeroDB[T]) get_all() ![]T {
// query := 'SELECT data FROM ${self.table_name} ORDER BY id DESC'
// rows := self.db_client.db()!.exec(query)!
// mut results := []T{}
// for row in rows {
// json_data_val := row.vals[0] or { continue }
// json_data := json_data_val.str()
// mut obj := json.decode(T, json_data) or {
// // e.g. an error could be given here
// continue // Skip invalid JSON
// }
// results << &obj
// }
// return results
// }
// // search_by_index searches for objects T by a specific index field
// pub fn (mut self HeroDB[T]) search_by_index(field_name string, value string) ![]T {
// query := 'SELECT data FROM ${self.table_name} WHERE ${field_name} = \'${value}\' ORDER BY id DESC'
// rows := self.db_client.db()!.exec(query)!
// mut results := []T{}
// for row in rows {
// json_data_val := row.vals[0] or { continue }
// json_data := json_data_val.str()
// mut obj := json.decode(T, json_data) or {
// continue
// }
// results << &obj
// }
// return results
// }
// // delete_by_index removes objects T matching the given index values
// pub fn (mut self HeroDB[T]) delete_by_index(index_values map[string]string) ! {
// mut query := 'DELETE FROM ${self.table_name} WHERE '
// mut params := []string{}
// for key, value in index_values {
// params << '${key} = \'${value}\''
// }
// query += params.join(' AND ')
// self.db_client.db()!.exec(query)!
// }
// Helper to extract index values from object
fn (self HeroDB[T]) extract_index_values(obj T) map[string]string {
mut index_data := map[string]string{}
$for field in T.fields {
// $if field.attrs.contains('index') {
// field_name := texttools.snake_case(field.name)
// $if field.typ is string {
// value := obj.$(field.name)
// index_data[field_name] = value
// } $else $if field.typ is int {
// value := obj.$(field.name).str()
// index_data[field_name] = value
// } $else {
// value := obj.$(field.name).str()
// index_data[field_name] = value
// }
// }
}
return index_data
}

View File

@@ -1,37 +0,0 @@
## hero db - OSIS in vlang
```v
// Example usage:
// Initialize database client
mut db_client := postgresql_client.get(name: "default")!
// Create HeroDB for Circle type
mut circle_db := hero_db.new[circle.Circle](db_client)!
circle_db.ensure_table()!
// Create and save a circle
mut my_circle := circle.Circle{
name: "Tech Community"
description: "A community for tech enthusiasts"
domain: "tech.example.com"
config: circle.CircleConfig{
max_members: 1000
allow_guests: true
auto_approve: false
theme: "modern"
}
status: circle.CircleStatus.active
}
circle_db.save(&my_circle)!
// Retrieve the circle
retrieved_circle := circle_db.get_by_index({
"domain": "tech.example.com"
})!
// Search circles by status
active_circles := circle_db.search_by_index("status", "active")!
```

View File

@@ -0,0 +1,94 @@
the main data is in key value stor:
- each object has u32 id
- each object has u16 version (version of same data)
- each object has u16 schemaid (if schema changes)
- each object has tags u32 (to tag table)
- each object has a created_at timestamp
- each object has a updated_at timestamp
- each object has binary content (the data)
- each object has link to who can read/write/delete (lists of u32 per read/write/delete to group or user), link to security policy u32
- each object has a signature of the data by the user who created/updated it
- there are users & groups
- groups can have other groups and users inside
- users & groups are unique u32 as well in the DB, so no collision
this database does not know what the data is about, its agnostic to schema
now make the 4 structs which represent above
- data
- user
- group ([]u32) each links to user or group, name, description
- tags ([]string which gets a unique id, so its shorter to link to data object)
- securitypolicy (see below)
and encoding scheme using lib/data/encoder, we need encode/decode on the structs, so we have densest possible encoding
now we need the implementation details for each struct, including the fields and their types, as well as the encoding/decoding logic.
the outside is a server over openrpc which has
- set (userid:u32, id:u32, data: Data, signature: string, tags:[]string) -> u32. (id can be 0 then its new, if existing we need to check if user can do it), tags will be recalculated based on []string (lower case, sorted list then md5 -> u32)
- get (userid:u32, id: u32, signedid: string) -> Data,Tags as []string
- exist (userid:u32, id: u32) -> bool //this we allow without signature
- delete (userid:u32, id: u32, signedid: string) -> bool
- list (userid:u32, signature: string, based on tags, schemaid, from creation/update and to creation/update), returns max 200 items -> u32
the interface is stateless, no previous connection known, based on signature the server can verify the user is allowed to perform the action
the backend database is redis (hsets and sets)
## signing implementation
the signing is in the same redis implemented, so no need to use vlang for that
```bash
# Generate an ephemeral signing keypair
redis-cli -p $PORT AGE GENSIGN
# Example output:
# 1) "<verify_pub_b64>"
# 2) "<sign_secret_b64>"
# Sign a message with the secret
redis-cli -p $PORT AGE SIGN "<sign_secret_b64>" "msg"
# → returns "<signature_b64>"
# Verify with the public key
redis-cli -p $PORT AGE VERIFY "<verify_pub_b64>" "msg" "<signature_b64>"
# → 1 (valid) or 0 (invalid)
```
versioning: when stored we don't have to worry about version the database will check if it exists, newest version and then update
## some of the base objects
```v
@[heap]
pub struct SecurityPolicy {
pub mut:
id u32
read []u32 //links to users & groups
write []u32 //links to users & groups
delete []u32 //links to users & groups
public bool
}
@[heap]
pub struct Tags {
pub mut:
id u32
names []string //unique per id
md5 string //of sorted names, to make easy to find unique id
}
```

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
if os.args.len < 3 {
eprintln('Usage: ./prog <node_id> <status>')
eprintln(' status: active|buffer')
return
}
node_id := os.args[1]
status_str := os.args[2]
status := match status_str {
'active' { NodeStatus.active }
'buffer' { NodeStatus.buffer }
else {
eprintln('Invalid status. Use: active|buffer')
return
}
}
// --- Generate ephemeral keys for demo ---
// In real use: load from PEM files
priv, pub := ed25519.generate_key(rand.reader) or { panic(err) }
mut pubkeys := map[string]ed25519.PublicKey{}
pubkeys[node_id] = pub
// TODO: load all pubkeys from config file so every node knows others
// Initialize all nodes (in real scenario, load from config)
mut all_nodes := map[string]Node{}
all_nodes['node1'] = Node{id: 'node1', status: .active}
all_nodes['node2'] = Node{id: 'node2', status: .active}
all_nodes['node3'] = Node{id: 'node3', status: .active}
all_nodes['node4'] = Node{id: 'node4', status: .buffer}
// Set current node status
all_nodes[node_id].status = status
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382']
mut conns := []redis.Connection{}
for s in servers {
mut c := redis.connect(redis.Options{ server: s }) or {
panic('could not connect to redis $s: $err')
}
conns << c
}
mut election := Election{
clients: conns
pubkeys: pubkeys
self: Node{
id: node_id
term: 0
leader: false
status: status
}
keys: Keys{ priv: priv, pub: pub }
all_nodes: all_nodes
buffer_nodes: ['node4'] // Initially node4 is buffer
}
println('[$node_id] started as $status_str, connected to 4 redis servers.')
// Start health monitoring in background
go election.health_monitor_loop()
// Start main heartbeat loop
election.heartbeat_loop()

View File

@@ -0,0 +1,378 @@
module herocluster
import db.redis
import crypto.ed25519
import crypto.rand
import encoding.hex
import os
import time
const election_timeout_ms = 3000
const heartbeat_interval_ms = 1000
const node_unavailable_threshold_ms = 24 * 60 * 60 * 1000 // 1 day in milliseconds
const health_check_interval_ms = 30000 // 30 seconds
// --- Crypto helpers ---
struct Keys {
priv ed25519.PrivateKey
pub ed25519.PublicKey
}
// sign a message
fn (k Keys) sign(msg string) string {
sig := ed25519.sign(k.priv, msg.bytes())
return hex.encode(sig)
}
// verify signature
fn verify(pub ed25519.PublicKey, msg string, sig_hex string) bool {
sig := hex.decode(sig_hex) or { return false }
return ed25519.verify(pub, msg.bytes(), sig)
}
// --- Node & Election ---
enum NodeStatus {
active
buffer
unavailable
}
struct Node {
id string
mut:
term int
leader bool
voted_for string
status NodeStatus
last_seen i64 // timestamp
}
struct HealthReport {
reporter_id string
target_id string
status string // "available" or "unavailable"
timestamp i64
signature string
}
struct Election {
mut:
clients []redis.Connection
pubkeys map[string]ed25519.PublicKey
self Node
keys Keys
all_nodes map[string]Node
buffer_nodes []string
}
// Redis keys
fn vote_key(term int, node_id string) string { return 'vote:${term}:${node_id}' }
fn health_key(reporter_id string, target_id string) string { return 'health:${reporter_id}:${target_id}' }
fn node_status_key(node_id string) string { return 'node_status:${node_id}' }
// Write vote (signed) to ALL redis servers
fn (mut e Election) vote_for(candidate string) {
msg := '${e.self.term}:${candidate}'
sig_hex := e.keys.sign(msg)
for mut c in e.clients {
k := vote_key(e.self.term, e.self.id)
c.hset(k, 'candidate', candidate) or {}
c.hset(k, 'sig', sig_hex) or {}
c.expire(k, 5) or {}
}
println('[${e.self.id}] voted for $candidate (term=${e.self.term})')
}
// Report node health status
fn (mut e Election) report_node_health(target_id string, status string) {
now := time.now().unix_time()
msg := '${target_id}:${status}:${now}'
sig_hex := e.keys.sign(msg)
report := HealthReport{
reporter_id: e.self.id
target_id: target_id
status: status
timestamp: now
signature: sig_hex
}
for mut c in e.clients {
k := health_key(e.self.id, target_id)
c.hset(k, 'status', status) or {}
c.hset(k, 'timestamp', now.str()) or {}
c.hset(k, 'signature', sig_hex) or {}
c.expire(k, 86400) or {} // expire after 24 hours
}
println('[${e.self.id}] reported $target_id as $status')
}
// Collect health reports and check for consensus on unavailable nodes
fn (mut e Election) check_node_availability() {
now := time.now().unix_time()
mut unavailable_reports := map[string]map[string]i64{} // target_id -> reporter_id -> timestamp
for mut c in e.clients {
keys := c.keys('health:*') or { continue }
for k in keys {
parts := k.split(':')
if parts.len != 3 { continue }
reporter_id := parts[1]
target_id := parts[2]
vals := c.hgetall(k) or { continue }
status := vals['status']
timestamp_str := vals['timestamp']
sig_hex := vals['signature']
if reporter_id !in e.pubkeys { continue }
timestamp := timestamp_str.i64()
msg := '${target_id}:${status}:${timestamp}'
if verify(e.pubkeys[reporter_id], msg, sig_hex) {
if status == 'unavailable' && (now - timestamp) >= (node_unavailable_threshold_ms / 1000) {
if target_id !in unavailable_reports {
unavailable_reports[target_id] = map[string]i64{}
}
unavailable_reports[target_id][reporter_id] = timestamp
}
}
}
}
// Check for consensus (2 out of 3 active nodes agree)
for target_id, reports in unavailable_reports {
if reports.len >= 2 && target_id in e.all_nodes {
if e.all_nodes[target_id].status == .active {
println('[${e.self.id}] Consensus reached: $target_id is unavailable for >1 day')
e.promote_buffer_node(target_id)
}
}
}
}
// Promote a buffer node to active status
fn (mut e Election) promote_buffer_node(failed_node_id string) {
if e.buffer_nodes.len == 0 {
println('[${e.self.id}] No buffer nodes available for promotion')
return
}
// Select first available buffer node
buffer_id := e.buffer_nodes[0]
// Update node statuses
if failed_node_id in e.all_nodes {
e.all_nodes[failed_node_id].status = .unavailable
}
if buffer_id in e.all_nodes {
e.all_nodes[buffer_id].status = .active
}
// Remove from buffer list
e.buffer_nodes = e.buffer_nodes.filter(it != buffer_id)
// Announce the promotion
for mut c in e.clients {
k := node_status_key(buffer_id)
c.hset(k, 'status', 'active') or {}
c.hset(k, 'promoted_at', time.now().unix_time().str()) or {}
c.hset(k, 'replaced_node', failed_node_id) or {}
// Mark failed node as unavailable
failed_k := node_status_key(failed_node_id)
c.hset(failed_k, 'status', 'unavailable') or {}
c.hset(failed_k, 'failed_at', time.now().unix_time().str()) or {}
}
println('[${e.self.id}] Promoted buffer node $buffer_id to replace failed node $failed_node_id')
}
// Collect votes from ALL redis servers, verify signatures (only from active nodes)
fn (mut e Election) collect_votes(term int) map[string]int {
mut counts := map[string]int{}
mut seen := map[string]bool{} // avoid double-counting same vote from multiple servers
for mut c in e.clients {
keys := c.keys('vote:${term}:*') or { continue }
for k in keys {
if seen[k] { continue }
seen[k] = true
vals := c.hgetall(k) or { continue }
candidate := vals['candidate']
sig_hex := vals['sig']
voter_id := k.split(':')[2]
// Only count votes from active nodes
if voter_id !in e.pubkeys || voter_id !in e.all_nodes { continue }
if e.all_nodes[voter_id].status != .active { continue }
msg := '${term}:${candidate}'
if verify(e.pubkeys[voter_id], msg, sig_hex) {
counts[candidate]++
} else {
println('[${e.self.id}] invalid signature from $voter_id')
}
}
}
return counts
}
// Run election (only active nodes participate)
fn (mut e Election) run_election() {
if e.self.status != .active {
return // Buffer nodes don't participate in elections
}
e.self.term++
e.vote_for(e.self.id)
// wait a bit for other nodes to also vote
time.sleep(500 * time.millisecond)
votes := e.collect_votes(e.self.term)
active_node_count := e.all_nodes.values().filter(it.status == .active).len
majority_threshold := (active_node_count / 2) + 1
for cand, cnt in votes {
if cnt >= majority_threshold {
if cand == e.self.id {
println('[${e.self.id}] I AM LEADER (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)')
e.self.leader = true
} else {
println('[${e.self.id}] sees LEADER = $cand (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)')
e.self.leader = false
}
}
}
}
// Health monitoring loop (runs in background)
fn (mut e Election) health_monitor_loop() {
for {
if e.self.status == .active {
// Check health of other nodes
for node_id, node in e.all_nodes {
if node_id == e.self.id { continue }
// Simple health check: try to read a heartbeat key
mut is_available := false
for mut c in e.clients {
heartbeat_key := 'heartbeat:${node_id}'
val := c.get(heartbeat_key) or { continue }
last_heartbeat := val.i64()
if (time.now().unix_time() - last_heartbeat) < 60 { // 60 seconds threshold
is_available = true
break
}
}
status := if is_available { 'available' } else { 'unavailable' }
e.report_node_health(node_id, status)
}
// Check for consensus on failed nodes
e.check_node_availability()
}
time.sleep(health_check_interval_ms * time.millisecond)
}
}
// Heartbeat loop
fn (mut e Election) heartbeat_loop() {
for {
// Update own heartbeat
now := time.now().unix_time()
for mut c in e.clients {
heartbeat_key := 'heartbeat:${e.self.id}'
c.set(heartbeat_key, now.str()) or {}
c.expire(heartbeat_key, 120) or {} // expire after 2 minutes
}
if e.self.status == .active {
if e.self.leader {
println('[${e.self.id}] Heartbeat term=${e.self.term} (LEADER)')
} else {
e.run_election()
}
} else if e.self.status == .buffer {
println('[${e.self.id}] Buffer node monitoring cluster')
}
time.sleep(heartbeat_interval_ms * time.millisecond)
}
}
// --- MAIN ---
fn main() {
if os.args.len < 3 {
eprintln('Usage: ./prog <node_id> <status>')
eprintln(' status: active|buffer')
return
}
node_id := os.args[1]
status_str := os.args[2]
status := match status_str {
'active' { NodeStatus.active }
'buffer' { NodeStatus.buffer }
else {
eprintln('Invalid status. Use: active|buffer')
return
}
}
// --- Generate ephemeral keys for demo ---
// In real use: load from PEM files
priv, pub := ed25519.generate_key(rand.reader) or { panic(err) }
mut pubkeys := map[string]ed25519.PublicKey{}
pubkeys[node_id] = pub
// TODO: load all pubkeys from config file so every node knows others
// Initialize all nodes (in real scenario, load from config)
mut all_nodes := map[string]Node{}
all_nodes['node1'] = Node{id: 'node1', status: .active}
all_nodes['node2'] = Node{id: 'node2', status: .active}
all_nodes['node3'] = Node{id: 'node3', status: .active}
all_nodes['node4'] = Node{id: 'node4', status: .buffer}
// Set current node status
all_nodes[node_id].status = status
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382']
mut conns := []redis.Connection{}
for s in servers {
mut c := redis.connect(redis.Options{ server: s }) or {
panic('could not connect to redis $s: $err')
}
conns << c
}
mut election := Election{
clients: conns
pubkeys: pubkeys
self: Node{
id: node_id
term: 0
leader: false
status: status
}
keys: Keys{ priv: priv, pub: pub }
all_nodes: all_nodes
buffer_nodes: ['node4'] // Initially node4 is buffer
}
println('[$node_id] started as $status_str, connected to 4 redis servers.')
// Start health monitoring in background
go election.health_monitor_loop()
// Start main heartbeat loop
election.heartbeat_loop()
}

View File

@@ -0,0 +1,202 @@
Great 👍 Lets extend the **Redis + ed25519 leader election** so that:
* We have **3 Redis servers** (`:6379`, `:6380`, `:6381`).
* Each node writes its **signed vote** to **all 3 servers**.
* Each node reads all votes from all servers, verifies them with the **known public keys**, and tallies majority (≥2/3 = 2 votes).
* Leader is declared if majority agrees.
---
## Full V Implementation
```v
import db.redis
import crypto.ed25519
import crypto.rand
import encoding.hex
import os
import time
const election_timeout_ms = 3000
const heartbeat_interval_ms = 1000
// --- Crypto helpers ---
struct Keys {
priv ed25519.PrivateKey
pub ed25519.PublicKey
}
// sign a message
fn (k Keys) sign(msg string) string {
sig := ed25519.sign(k.priv, msg.bytes())
return hex.encode(sig)
}
// verify signature
fn verify(pub ed25519.PublicKey, msg string, sig_hex string) bool {
sig := hex.decode(sig_hex) or { return false }
return ed25519.verify(pub, msg.bytes(), sig)
}
// --- Node & Election ---
struct Node {
id string
mut:
term int
leader bool
voted_for string
}
struct Election {
mut:
clients []redis.Connection
pubkeys map[string]ed25519.PublicKey
self Node
keys Keys
}
// Redis keys
fn vote_key(term int, node_id string) string { return 'vote:${term}:${node_id}' }
// Write vote (signed) to ALL redis servers
fn (mut e Election) vote_for(candidate string) {
msg := '${e.self.term}:${candidate}'
sig_hex := e.keys.sign(msg)
for mut c in e.clients {
k := vote_key(e.self.term, e.self.id)
c.hset(k, 'candidate', candidate) or {}
c.hset(k, 'sig', sig_hex) or {}
c.expire(k, 5) or {}
}
println('[${e.self.id}] voted for $candidate (term=${e.self.term})')
}
// Collect votes from ALL redis servers, verify signatures
fn (mut e Election) collect_votes(term int) map[string]int {
mut counts := map[string]int{}
mut seen := map[string]bool{} // avoid double-counting same vote from multiple servers
for mut c in e.clients {
keys := c.keys('vote:${term}:*') or { continue }
for k in keys {
if seen[k] { continue }
seen[k] = true
vals := c.hgetall(k) or { continue }
candidate := vals['candidate']
sig_hex := vals['sig']
voter_id := k.split(':')[2]
if voter_id !in e.pubkeys {
println('[${e.self.id}] unknown voter $voter_id')
continue
}
msg := '${term}:${candidate}'
if verify(e.pubkeys[voter_id], msg, sig_hex) {
counts[candidate]++
} else {
println('[${e.self.id}] invalid signature from $voter_id')
}
}
}
return counts
}
// Run election
fn (mut e Election) run_election() {
e.self.term++
e.vote_for(e.self.id)
// wait a bit for other nodes to also vote
time.sleep(500 * time.millisecond)
votes := e.collect_votes(e.self.term)
for cand, cnt in votes {
if cnt >= 2 { // majority of 3
if cand == e.self.id {
println('[${e.self.id}] I AM LEADER (term=${e.self.term}, votes=$cnt)')
e.self.leader = true
} else {
println('[${e.self.id}] sees LEADER = $cand (term=${e.self.term}, votes=$cnt)')
e.self.leader = false
}
}
}
}
// Heartbeat loop
fn (mut e Election) heartbeat_loop() {
for {
if e.self.leader {
println('[${e.self.id}] Heartbeat term=${e.self.term}')
} else {
e.run_election()
}
time.sleep(heartbeat_interval_ms * time.millisecond)
}
}
// --- MAIN ---
fn main() {
if os.args.len < 2 {
eprintln('Usage: ./prog <node_id>')
return
}
node_id := os.args[1]
// --- Generate ephemeral keys for demo ---
// In real use: load from PEM files
priv, pub := ed25519.generate_key(rand.reader) or { panic(err) }
mut pubkeys := map[string]ed25519.PublicKey{}
pubkeys[node_id] = pub
// TODO: load all pubkeys from config file so every node knows others
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381']
mut conns := []redis.Connection{}
for s in servers {
mut c := redis.connect(redis.Options{ server: s }) or {
panic('could not connect to redis $s: $err')
}
conns << c
}
mut election := Election{
clients: conns
pubkeys: pubkeys
self: Node{
id: node_id
term: 0
leader: false
}
keys: Keys{ priv: priv, pub: pub }
}
println('[$node_id] started, connected to 3 redis servers.')
election.heartbeat_loop()
}
```
---
## How to Run
1. Start 3 redis servers (different ports):
```bash
redis-server --port 6379 --dir /tmp/redis1 --daemonize yes
redis-server --port 6380 --dir /tmp/redis2 --daemonize yes
redis-server --port 6381 --dir /tmp/redis3 --daemonize yes
```
2. Run 3 nodes, each with its own ID:
```bash
v run raft_sign.v node1
v run raft_sign.v node2
v run raft_sign.v node3
```
3. Youll see one leader elected with **2/3 majority verified votes**.

View File

@@ -0,0 +1,455 @@
# Hero Cluster Instructions v2: 4-Node Cluster with Buffer Node
This extends the **Redis + ed25519 leader election** from instruct1.md to include a **4th buffer node** mechanism for enhanced fault tolerance.
## Overview
* We have **4 Redis servers** (`:6379`, `:6380`, `:6381`, `:6382`).
* **3 active nodes** participate in normal leader election.
* **1 buffer node** remains standby and monitors the cluster health.
* If **2 of 3 active nodes** agree that a 3rd node is unavailable for **longer than 1 day**, the buffer node automatically becomes active.
---
## Extended V Implementation
```v
import db.redis
import crypto.ed25519
import crypto.rand
import encoding.hex
import os
import time
const election_timeout_ms = 3000
const heartbeat_interval_ms = 1000
const node_unavailable_threshold_ms = 24 * 60 * 60 * 1000 // 1 day in milliseconds
const health_check_interval_ms = 30000 // 30 seconds
// --- Crypto helpers ---
struct Keys {
priv ed25519.PrivateKey
pub ed25519.PublicKey
}
// sign a message
fn (k Keys) sign(msg string) string {
sig := ed25519.sign(k.priv, msg.bytes())
return hex.encode(sig)
}
// verify signature
fn verify(pub ed25519.PublicKey, msg string, sig_hex string) bool {
sig := hex.decode(sig_hex) or { return false }
return ed25519.verify(pub, msg.bytes(), sig)
}
// --- Node & Election ---
enum NodeStatus {
active
buffer
unavailable
}
struct Node {
id string
mut:
term int
leader bool
voted_for string
status NodeStatus
last_seen i64 // timestamp
}
struct HealthReport {
reporter_id string
target_id string
status string // "available" or "unavailable"
timestamp i64
signature string
}
struct Election {
mut:
clients []redis.Connection
pubkeys map[string]ed25519.PublicKey
self Node
keys Keys
all_nodes map[string]Node
buffer_nodes []string
}
// Redis keys
fn vote_key(term int, node_id string) string { return 'vote:${term}:${node_id}' }
fn health_key(reporter_id string, target_id string) string { return 'health:${reporter_id}:${target_id}' }
fn node_status_key(node_id string) string { return 'node_status:${node_id}' }
// Write vote (signed) to ALL redis servers
fn (mut e Election) vote_for(candidate string) {
msg := '${e.self.term}:${candidate}'
sig_hex := e.keys.sign(msg)
for mut c in e.clients {
k := vote_key(e.self.term, e.self.id)
c.hset(k, 'candidate', candidate) or {}
c.hset(k, 'sig', sig_hex) or {}
c.expire(k, 5) or {}
}
println('[${e.self.id}] voted for $candidate (term=${e.self.term})')
}
// Report node health status
fn (mut e Election) report_node_health(target_id string, status string) {
now := time.now().unix_time()
msg := '${target_id}:${status}:${now}'
sig_hex := e.keys.sign(msg)
report := HealthReport{
reporter_id: e.self.id
target_id: target_id
status: status
timestamp: now
signature: sig_hex
}
for mut c in e.clients {
k := health_key(e.self.id, target_id)
c.hset(k, 'status', status) or {}
c.hset(k, 'timestamp', now.str()) or {}
c.hset(k, 'signature', sig_hex) or {}
c.expire(k, 86400) or {} // expire after 24 hours
}
println('[${e.self.id}] reported $target_id as $status')
}
// Collect health reports and check for consensus on unavailable nodes
fn (mut e Election) check_node_availability() {
now := time.now().unix_time()
mut unavailable_reports := map[string]map[string]i64{} // target_id -> reporter_id -> timestamp
for mut c in e.clients {
keys := c.keys('health:*') or { continue }
for k in keys {
parts := k.split(':')
if parts.len != 3 { continue }
reporter_id := parts[1]
target_id := parts[2]
vals := c.hgetall(k) or { continue }
status := vals['status']
timestamp_str := vals['timestamp']
sig_hex := vals['signature']
if reporter_id !in e.pubkeys { continue }
timestamp := timestamp_str.i64()
msg := '${target_id}:${status}:${timestamp}'
if verify(e.pubkeys[reporter_id], msg, sig_hex) {
if status == 'unavailable' && (now - timestamp) >= (node_unavailable_threshold_ms / 1000) {
if target_id !in unavailable_reports {
unavailable_reports[target_id] = map[string]i64{}
}
unavailable_reports[target_id][reporter_id] = timestamp
}
}
}
}
// Check for consensus (2 out of 3 active nodes agree)
for target_id, reports in unavailable_reports {
if reports.len >= 2 && target_id in e.all_nodes {
if e.all_nodes[target_id].status == .active {
println('[${e.self.id}] Consensus reached: $target_id is unavailable for >1 day')
e.promote_buffer_node(target_id)
}
}
}
}
// Promote a buffer node to active status
fn (mut e Election) promote_buffer_node(failed_node_id string) {
if e.buffer_nodes.len == 0 {
println('[${e.self.id}] No buffer nodes available for promotion')
return
}
// Select first available buffer node
buffer_id := e.buffer_nodes[0]
// Update node statuses
if failed_node_id in e.all_nodes {
e.all_nodes[failed_node_id].status = .unavailable
}
if buffer_id in e.all_nodes {
e.all_nodes[buffer_id].status = .active
}
// Remove from buffer list
e.buffer_nodes = e.buffer_nodes.filter(it != buffer_id)
// Announce the promotion
for mut c in e.clients {
k := node_status_key(buffer_id)
c.hset(k, 'status', 'active') or {}
c.hset(k, 'promoted_at', time.now().unix_time().str()) or {}
c.hset(k, 'replaced_node', failed_node_id) or {}
// Mark failed node as unavailable
failed_k := node_status_key(failed_node_id)
c.hset(failed_k, 'status', 'unavailable') or {}
c.hset(failed_k, 'failed_at', time.now().unix_time().str()) or {}
}
println('[${e.self.id}] Promoted buffer node $buffer_id to replace failed node $failed_node_id')
}
// Collect votes from ALL redis servers, verify signatures (only from active nodes)
fn (mut e Election) collect_votes(term int) map[string]int {
mut counts := map[string]int{}
mut seen := map[string]bool{} // avoid double-counting same vote from multiple servers
for mut c in e.clients {
keys := c.keys('vote:${term}:*') or { continue }
for k in keys {
if seen[k] { continue }
seen[k] = true
vals := c.hgetall(k) or { continue }
candidate := vals['candidate']
sig_hex := vals['sig']
voter_id := k.split(':')[2]
// Only count votes from active nodes
if voter_id !in e.pubkeys || voter_id !in e.all_nodes { continue }
if e.all_nodes[voter_id].status != .active { continue }
msg := '${term}:${candidate}'
if verify(e.pubkeys[voter_id], msg, sig_hex) {
counts[candidate]++
} else {
println('[${e.self.id}] invalid signature from $voter_id')
}
}
}
return counts
}
// Run election (only active nodes participate)
fn (mut e Election) run_election() {
if e.self.status != .active {
return // Buffer nodes don't participate in elections
}
e.self.term++
e.vote_for(e.self.id)
// wait a bit for other nodes to also vote
time.sleep(500 * time.millisecond)
votes := e.collect_votes(e.self.term)
active_node_count := e.all_nodes.values().filter(it.status == .active).len
majority_threshold := (active_node_count / 2) + 1
for cand, cnt in votes {
if cnt >= majority_threshold {
if cand == e.self.id {
println('[${e.self.id}] I AM LEADER (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)')
e.self.leader = true
} else {
println('[${e.self.id}] sees LEADER = $cand (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)')
e.self.leader = false
}
}
}
}
// Health monitoring loop (runs in background)
fn (mut e Election) health_monitor_loop() {
for {
if e.self.status == .active {
// Check health of other nodes
for node_id, node in e.all_nodes {
if node_id == e.self.id { continue }
// Simple health check: try to read a heartbeat key
mut is_available := false
for mut c in e.clients {
heartbeat_key := 'heartbeat:${node_id}'
val := c.get(heartbeat_key) or { continue }
last_heartbeat := val.i64()
if (time.now().unix_time() - last_heartbeat) < 60 { // 60 seconds threshold
is_available = true
break
}
}
status := if is_available { 'available' } else { 'unavailable' }
e.report_node_health(node_id, status)
}
// Check for consensus on failed nodes
e.check_node_availability()
}
time.sleep(health_check_interval_ms * time.millisecond)
}
}
// Heartbeat loop
fn (mut e Election) heartbeat_loop() {
for {
// Update own heartbeat
now := time.now().unix_time()
for mut c in e.clients {
heartbeat_key := 'heartbeat:${e.self.id}'
c.set(heartbeat_key, now.str()) or {}
c.expire(heartbeat_key, 120) or {} // expire after 2 minutes
}
if e.self.status == .active {
if e.self.leader {
println('[${e.self.id}] Heartbeat term=${e.self.term} (LEADER)')
} else {
e.run_election()
}
} else if e.self.status == .buffer {
println('[${e.self.id}] Buffer node monitoring cluster')
}
time.sleep(heartbeat_interval_ms * time.millisecond)
}
}
// --- MAIN ---
fn main() {
if os.args.len < 3 {
eprintln('Usage: ./prog <node_id> <status>')
eprintln(' status: active|buffer')
return
}
node_id := os.args[1]
status_str := os.args[2]
status := match status_str {
'active' { NodeStatus.active }
'buffer' { NodeStatus.buffer }
else {
eprintln('Invalid status. Use: active|buffer')
return
}
}
// --- Generate ephemeral keys for demo ---
// In real use: load from PEM files
priv, pub := ed25519.generate_key(rand.reader) or { panic(err) }
mut pubkeys := map[string]ed25519.PublicKey{}
pubkeys[node_id] = pub
// TODO: load all pubkeys from config file so every node knows others
// Initialize all nodes (in real scenario, load from config)
mut all_nodes := map[string]Node{}
all_nodes['node1'] = Node{id: 'node1', status: .active}
all_nodes['node2'] = Node{id: 'node2', status: .active}
all_nodes['node3'] = Node{id: 'node3', status: .active}
all_nodes['node4'] = Node{id: 'node4', status: .buffer}
// Set current node status
all_nodes[node_id].status = status
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382']
mut conns := []redis.Connection{}
for s in servers {
mut c := redis.connect(redis.Options{ server: s }) or {
panic('could not connect to redis $s: $err')
}
conns << c
}
mut election := Election{
clients: conns
pubkeys: pubkeys
self: Node{
id: node_id
term: 0
leader: false
status: status
}
keys: Keys{ priv: priv, pub: pub }
all_nodes: all_nodes
buffer_nodes: ['node4'] // Initially node4 is buffer
}
println('[$node_id] started as $status_str, connected to 4 redis servers.')
// Start health monitoring in background
go election.health_monitor_loop()
// Start main heartbeat loop
election.heartbeat_loop()
}
```
---
## Key Extensions from instruct1.md
### 1. **4th Redis Server**
- Added `:6382` as the 4th Redis server for enhanced redundancy.
### 2. **Node Status Management**
- **NodeStatus enum**: `active`, `buffer`, `unavailable`
- **Buffer nodes**: Don't participate in elections but monitor cluster health.
### 3. **Health Monitoring System**
- **Health reports**: Signed reports about node availability.
- **Consensus mechanism**: 2 out of 3 active nodes must agree a node is unavailable.
- **1-day threshold**: Node must be unavailable for >24 hours before replacement.
### 4. **Automatic Buffer Promotion**
- When consensus is reached about a failed node, buffer node automatically becomes active.
- Failed node is marked as unavailable.
- Cluster continues with 3 active nodes.
### 5. **Enhanced Election Logic**
- Only active nodes participate in voting.
- Majority threshold adapts to current number of active nodes.
- Buffer nodes monitor but don't vote.
---
## How to Run
1. **Start 4 redis servers**:
```bash
redis-server --port 6379 --dir /tmp/redis1 --daemonize yes
redis-server --port 6380 --dir /tmp/redis2 --daemonize yes
redis-server --port 6381 --dir /tmp/redis3 --daemonize yes
redis-server --port 6382 --dir /tmp/redis4 --daemonize yes
```
2. **Run 3 active nodes + 1 buffer**:
```bash
v run raft_sign_v2.v node1 active
v run raft_sign_v2.v node2 active
v run raft_sign_v2.v node3 active
v run raft_sign_v2.v node4 buffer
```
3. **Test failure scenario**:
- Stop one active node (e.g., kill node3)
- Wait >1 day (or reduce threshold for testing)
- Watch buffer node4 automatically become active
- Cluster continues with 3 active nodes
---
## Benefits
- **Enhanced fault tolerance**: Can survive 1 node failure without service interruption.
- **Automatic recovery**: No manual intervention needed for node replacement.
- **Consensus-based decisions**: Prevents false positives in failure detection.
- **Cryptographic security**: All health reports are signed and verified.
- **Scalable design**: Easy to add more buffer nodes if needed.

156
lib/hero/heromodels/base.v Normal file
View File

@@ -0,0 +1,156 @@
module heromodels
import crypto.md5
import json
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.encoder
// Group represents a collection of users with roles and permissions
@[heap]
pub struct Base {
pub mut:
id u32
name string
description string
created_at i64
updated_at i64
securitypolicy u32
tags u32 //when we set/get we always do as []string but this can then be sorted and md5ed this gies the unique id of tags
comments []u32
}
@[heap]
pub struct SecurityPolicy {å
pub mut:
id u32
read []u32 //links to users & groups
write []u32 //links to users & groups
delete []u32 //links to users & groups
public bool
md5 string //this sorts read, write and delete u32 + hash, then do md5 hash, this allows to go from a random read/write/delete/public config to a hash
}
@[heap]
pub struct Tags {
pub mut:
id u32
names []string //unique per id
md5 string //of sorted names, to make easy to find unique id, each name lowercased and made ascii
}
@[heap]
pub struct Comment {
pub mut:
id u32
comment string
parent u32 //id of parent comment if any, 0 means none
updated_at i64
author u32 //links to user
}
pub fn (self Comment) dump() ![]u8{
// Create a new encoder
mut e := encoder.new()
e.add_u8(1)
e.add_u32(self.id)
e.add_string(self.comment)
e.add_u32(self.parent)
e.add_i64(self.updated_at)
e.add_u32(self.author)
return e.data
}
pub fn comment_load(self []u8) !Comment{
// Create a new encoder
mut e := decoder.new()
version:=e.get_u8(1)
if version != 1 {
panic("wrong version in comment load")
}
self.id = e.get_u32()
self.comment = e.get_string()
self.parent = e.get_u32()
self.updated_at = e.get_i64()
self.author = e.get_u32()
return e.data
}
/////////////////
@[params]
pub struct BaseArgs {
pub mut:
id ?u32
name string
description string
securitypolicy ?u32
tags []string
comments []CommentArg
}
pub struct CommentArg {
pub mut:
comment string
parent u32 //id of parent comment if any, 0 means none
author u32 //links to user
}
pub fn tags2id(tags []string) !u32 {
mut myid:=0
if tags.len>0{
mytags:=tags.map(it.to_lower_ascii().trim_space()).sort().join(",")
mymd5:=crypto.hexhash(mytags)
tags:=redis.hget("db:tags", mymd5)!
if tags == ""{
myid = u32(redis.incr("db:tags:id")!)
redis.hset("db:tags", mymd5, myid)!
redis.hset("db:tags", myid, mytags)!
}else{
myid = tags.int()
}
}
return myid
}
pub fn comment2id(args CommentArg) !u32{
myid := redis.incr("db:comments:id")!
mut o:=Comment {
id:
comment: args.comment
parent:args.parent
updated_at: ourtime.now().unix()
author: args.author
}
data:=o.dump()!
redis.hset("db:comments:data", myid, data)!
return myid
}
pub fn [T] new(args BaseArgs) Base {
mut redis := redisclient.core_get()!
redis.hget("db:comments")
return T{
id: args.id or { 0 }
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 }
tags: args.tags
comments: args.comments.map(it.to_base())
}
}

View File

@@ -0,0 +1,69 @@
module heromodels
import crypto.blake3
import json
import freeflowuniverse.herolib.data.ourtime
import time
// Calendar represents a collection of events
@[heap]
pub struct Calendar {
Base
pub mut:
group_id u32 // Associated group for permissions
events []u32 // IDs of calendar events (changed to u32 to match CalendarEvent)
color string // Hex color code
timezone string
is_public bool
}
@[params]
pub struct CalendarArgs {
BaseArgs
pub mut:
group_id u32
events []u32
color string
timezone string
is_public bool
}
pub fn calendar_new(args CalendarArgs) Calendar {
commentids:=[]u32{}
for comment in args.comments{
commentids << comment_set(comment)!
}
mut obj := Calendar{
id: args.id
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy
tags: args.tags
comments: commentids
group_id: args.group_id
events: args.events
color: args.color
timezone: args.timezone
is_public: args.is_public
}
return obj
}
pub fn (mut c Calendar) add_event(event_id u32) { // Changed event_id to u32
if event_id !in c.events {
c.events << event_id
c.updated_at = ourtime.now().unix() // Use Base's updated_at
}
}
pub fn (mut c Calendar) dump() []u8 {
//TODO: implement based on lib/data/encoder/readme.md
return []u8{}
}
pub fn calendar_load(data []u8) Calendar {
//TODO: implement based on lib/data/encoder/readme.md
return Calendar{}
}

View File

@@ -0,0 +1,113 @@
module heromodels
import crypto.blake3
import json
import freeflowuniverse.herolib.data.ourtime
// CalendarEvent represents a single event in a calendar
@[heap]
pub struct CalendarEvent {
Base
pub mut:
title string
description string
start_time i64 // Unix timestamp
end_time i64 // Unix timestamp
location string
attendees []u32 // IDs of user groups
fs_items []u32 // IDs of linked files or dirs
calendar_id u32 // Associated calendar
status EventStatus
is_all_day bool
is_recurring bool
recurrence []RecurrenceRule //normally empty
reminder_mins []int // Minutes before event for reminders
color string // Hex color code
timezone string
}
pub struct Attendee {
pub mut:
user_id u32
status AttendanceStatus
role AttendeeRole
}
pub enum AttendanceStatus {
no_response
accepted
declined
tentative
}
pub enum AttendeeRole {
required
optional
organizer
}
pub enum EventStatus {
draft
published
cancelled
completed
}
pub struct RecurrenceRule {
pub mut:
frequency RecurrenceFreq
interval int // Every N frequencies
until i64 // End date (Unix timestamp)
count int // Number of occurrences
by_weekday []int // Days of week (0=Sunday)
by_monthday []int // Days of month
}
pub enum RecurrenceFreq {
none
daily
weekly
monthly
yearly
}
@[params]
pub struct CalendarEventArgs {
Base
pub mut:
title string
description string
start_time string // use ourtime module to go from string to epoch
end_time string // use ourtime module to go from string to epoch
location string
attendees []u32 // IDs of user groups
fs_items []u32 // IDs of linked files or dirs
calendar_id u32 // Associated calendar
status EventStatus
is_all_day bool
is_recurring bool
recurrence []RecurrenceRule
reminder_mins []int // Minutes before event for reminders
color string // Hex color code
timezone string
}
pub fn calendar_event_new(args CalendarEventArgs) CalendarEvent {
//TODO: ...
mut obj:=CalendarEvent{
start_time: ourtime.new(args.start_time)!.unix()
//TODO: ...
}
return event
}
pub fn (mut e CalendarEvent) dump() []u8 {
//TODO: implement based on lib/data/encoder/readme.md
}
pub fn calendar_event_load(data []u8) CalendarEvent {
//TODO: implement based on lib/data/encoder/readme.md
}

View File

@@ -0,0 +1,63 @@
module heromodels
import crypto.blake3
import json
// ChatGroup represents a chat channel or conversation
@[heap]
pub struct ChatGroup {
pub mut:
id string // blake192 hash
name string
description string
group_id string // Associated group for permissions
chat_type ChatType
messages []string // IDs of chat messages
created_at i64
updated_at i64
last_activity i64
is_archived bool
tags []string
}
pub enum ChatType {
public_channel
private_channel
direct_message
group_message
}
pub fn (mut c ChatGroup) calculate_id() {
content := json.encode(ChatGroupContent{
name: c.name
description: c.description
group_id: c.group_id
chat_type: c.chat_type
is_archived: c.is_archived
tags: c.tags
})
hash := blake3.sum256(content.bytes())
c.id = hash.hex()[..48]
}
struct ChatGroupContent {
name string
description string
group_id string
chat_type ChatType
is_archived bool
tags []string
}
pub fn new_chat_group(name string, group_id string, chat_type ChatType) ChatGroup {
mut chat_group := ChatGroup{
name: name
group_id: group_id
chat_type: chat_type
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
last_activity: time.now().unix_time()
}
chat_group.calculate_id()
return chat_group
}

View File

@@ -0,0 +1,103 @@
module heromodels
import crypto.blake3
import json
// ChatMessage represents a message in a chat group
@[heap]
pub struct ChatMessage {
pub mut:
id string // blake192 hash
content string
chat_group_id string // Associated chat group
sender_id string // User ID of sender
parent_messages []MessageLink // Referenced/replied messages
fs_files []string // IDs of linked files
message_type MessageType
status MessageStatus
created_at i64
updated_at i64
edited_at i64
deleted_at i64
reactions []MessageReaction
mentions []string // User IDs mentioned in message
tags []string
}
pub struct MessageLink {
pub mut:
message_id string
link_type MessageLinkType
}
pub enum MessageLinkType {
reply
reference
forward
quote
}
pub enum MessageType {
text
image
file
voice
video
system
announcement
}
pub enum MessageStatus {
sent
delivered
read
failed
deleted
}
pub struct MessageReaction {
pub mut:
user_id string
emoji string
timestamp i64
}
pub fn (mut m ChatMessage) calculate_id() {
content := json.encode(MessageContent{
content: m.content
chat_group_id: m.chat_group_id
sender_id: m.sender_id
parent_messages: m.parent_messages
fs_files: m.fs_files
message_type: m.message_type
mentions: m.mentions
tags: m.tags
})
hash := blake3.sum256(content.bytes())
m.id = hash.hex()[..48]
}
struct MessageContent {
content string
chat_group_id string
sender_id string
parent_messages []MessageLink
fs_files []string
message_type MessageType
mentions []string
tags []string
}
pub fn new_chat_message(content string, chat_group_id string, sender_id string) ChatMessage {
mut message := ChatMessage{
content: content
chat_group_id: chat_group_id
sender_id: sender_id
message_type: .text
status: .sent
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
}
message.calculate_id()
return message
}

View File

@@ -0,0 +1,90 @@
module heromodels
import crypto.md5
import json
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.encoder
@[heap]
pub struct Comment {
pub mut:
id u32
comment string
parent u32 //id of parent comment if any, 0 means none
updated_at i64
author u32 //links to user
}
pub fn (self Comment) dump() ![]u8{
// Create a new encoder
mut e := encoder.new()
e.add_u8(1)
e.add_u32(self.id)
e.add_string(self.comment)
e.add_u32(self.parent)
e.add_i64(self.updated_at)
e.add_u32(self.author)
return e.data
}
pub fn comment_load(self []u8) !Comment{
// Create a new encoder
mut e := decoder.new()
version:=e.get_u8(1)
if version != 1 {
panic("wrong version in comment load")
}
self.id = e.get_u32()
self.comment = e.get_string()
self.parent = e.get_u32()
self.updated_at = e.get_i64()
self.author = e.get_u32()
return e.data
}
pub struct CommentArg {
pub mut:
comment string
parent u32 //id of parent comment if any, 0 means none
author u32 //links to user
}
//get new comment, not our of db
pub fn comment_new(args CommentArg) !Comment{
mut o:=Comment {
comment: args.comment
parent:args.parent
updated_at: ourtime.now().unix()
author: args.author
}
return o
}
pub fn comment_set(o Comment) !u32{
mut redis := redisclient.core_get()!
myid := redis.incr("db:comments:id")!
i.id = myid
data:=o.dump()!
redis.hset("db:comments:data", myid, data)!
return myid
}
pub fn comment_exist(id u32) !bool{
mut redis := redisclient.core_get()!
return redis.hexist("db:comments",id)!
}
pub fn comment_get(id u32) !Comment{
mut redis := redisclient.core_get()!
mut data:= redis.hget("db:comments",id)!
if data.len>0{
return comment_load(data)!
}else{
return error("Can't find comment with id: ${id}")
}
}

View File

@@ -0,0 +1,37 @@
// Create a user
mut user := new_user('John Doe', 'john@example.com')
// Create a group
mut group := new_group('Development Team', 'Software development group')
group.add_member(user.id, .admin)
// Create a project
mut project := new_project('Website Redesign', 'Redesign company website', group.id)
// Create an issue
mut issue := new_project_issue('Fix login bug', project.id, user.id, .bug)
// Create a calendar
mut calendar := new_calendar('Team Calendar', group.id)
// Create an event
mut event := new_calendar_event('Sprint Planning', 1672531200, 1672534800, calendar.id, user.id)
calendar.add_event(event.id)
// Create a filesystem
mut fs := new_fs('Team Files', group.id)
// Create a blob for file content
mut blob := new_fs_blob('Hello World!'.bytes())!
println('User ID: ${user.id}')
println('Group ID: ${group.id}')
println('Project ID: ${project.id}')
println('Issue ID: ${issue.id}')
println('Calendar ID: ${calendar.id}')
println('Event ID: ${event.id}')
println('Filesystem ID: ${fs.id}')
println('Blob ID: ${blob.id}')

51
lib/hero/heromodels/fs.v Normal file
View File

@@ -0,0 +1,51 @@
module heromodels
import crypto.blake3
import json
// Fs represents a filesystem namespace
@[heap]
pub struct Fs {
pub mut:
id string // blake192 hash
name string
description string
group_id string // Associated group for permissions
root_dir_id string // ID of root directory
created_at i64
updated_at i64
quota_bytes i64 // Storage quota in bytes
used_bytes i64 // Current usage in bytes
tags []string
}
pub fn (mut f Fs) calculate_id() {
content := json.encode(FsContent{
name: f.name
description: f.description
group_id: f.group_id
quota_bytes: f.quota_bytes
tags: f.tags
})
hash := blake3.sum256(content.bytes())
f.id = hash.hex()[..48]
}
struct FsContent {
name string
description string
group_id string
quota_bytes i64
tags []string
}
pub fn new_fs(name string, group_id string) Fs {
mut fs := Fs{
name: name
group_id: group_id
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
}
fs.calculate_id()
return fs
}

View File

@@ -0,0 +1,40 @@
module heromodels
import crypto.blake3
// FsBlob represents binary data up to 1MB
@[heap]
pub struct FsBlob {
pub mut:
id string // blake192 hash of content
data []u8 // Binary data (max 1MB)
size_bytes int // Size in bytes
created_at i64
mime_type string
encoding string // e.g., "gzip", "none"
}
pub fn (mut b FsBlob) calculate_id() {
hash := blake3.sum256(b.data)
b.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
}
pub fn new_fs_blob(data []u8) !FsBlob {
if data.len > 1024 * 1024 { // 1MB limit
return error('Blob size exceeds 1MB limit')
}
mut blob := FsBlob{
data: data
size_bytes: data.len
created_at: time.now().unix_time()
encoding: 'none'
}
blob.calculate_id()
return blob
}
pub fn (b FsBlob) verify_integrity() bool {
hash := blake3.sum256(b.data)
return hash.hex()[..48] == b.id
}

View File

@@ -0,0 +1,52 @@
module heromodels
import crypto.blake3
import json
// FsDir represents a directory in a filesystem
@[heap]
pub struct FsDir {
pub mut:
id string // blake192 hash
name string
fs_id string // Associated filesystem
parent_id string // Parent directory ID (empty for root)
group_id string // Associated group for permissions
children []string // Child directory and file IDs
created_at i64
updated_at i64
tags []string
}
pub fn (mut d FsDir) calculate_id() {
content := json.encode(DirContent{
name: d.name
fs_id: d.fs_id
parent_id: d.parent_id
group_id: d.group_id
tags: d.tags
})
hash := blake3.sum256(content.bytes())
d.id = hash.hex()[..48]
}
struct DirContent {
name string
fs_id string
parent_id string
group_id string
tags []string
}
pub fn new_fs_dir(name string, fs_id string, parent_id string, group_id string) FsDir {
mut dir := FsDir{
name: name
fs_id: fs_id
parent_id: parent_id
group_id: group_id
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
}
dir.calculate_id()
return dir
}

View File

@@ -0,0 +1,64 @@
module heromodels
import crypto.blake3
import json
// FsFile represents a file in a filesystem
@[heap]
pub struct FsFile {
pub mut:
id string // blake192 hash
name string
fs_id string // Associated filesystem
directories []string // Directory IDs where this file exists
blobs []string // Blake192 IDs of file content blobs
size_bytes i64 // Total file size
mime_type string
checksum string // Overall file checksum
created_at i64
updated_at i64
accessed_at i64
tags []string
metadata map[string]string // Custom metadata
}
pub fn (mut f FsFile) calculate_id() {
content := json.encode(FileContent{
name: f.name
fs_id: f.fs_id
directories: f.directories
blobs: f.blobs
size_bytes: f.size_bytes
mime_type: f.mime_type
checksum: f.checksum
tags: f.tags
metadata: f.metadata
})
hash := blake3.sum256(content.bytes())
f.id = hash.hex()[..48]
}
struct FileContent {
name string
fs_id string
directories []string
blobs []string
size_bytes i64
mime_type string
checksum string
tags []string
metadata map[string]string
}
pub fn new_fs_file(name string, fs_id string, directories []string) FsFile {
mut file := FsFile{
name: name
fs_id: fs_id
directories: directories
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
accessed_at: time.now().unix_time()
}
file.calculate_id()
return file
}

View File

@@ -0,0 +1,60 @@
module heromodels
import crypto.blake3
import json
// FsSymlink represents a symbolic link in a filesystem
@[heap]
pub struct FsSymlink {
pub mut:
id string // blake192 hash
name string
fs_id string // Associated filesystem
parent_id string // Parent directory ID
target_id string // ID of target file or directory
target_type SymlinkTargetType
created_at i64
updated_at i64
tags []string
}
pub enum SymlinkTargetType {
file
directory
}
pub fn (mut s FsSymlink) calculate_id() {
content := json.encode(SymlinkContent{
name: s.name
fs_id: s.fs_id
parent_id: s.parent_id
target_id: s.target_id
target_type: s.target_type
tags: s.tags
})
hash := blake3.sum256(content.bytes())
s.id = hash.hex()[..48]
}
struct SymlinkContent {
name string
fs_id string
parent_id string
target_id string
target_type SymlinkTargetType
tags []string
}
pub fn new_fs_symlink(name string, fs_id string, parent_id string, target_id string, target_type SymlinkTargetType) FsSymlink {
mut symlink := FsSymlink{
name: name
fs_id: fs_id
parent_id: parent_id
target_id: target_id
target_type: target_type
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
}
symlink.calculate_id()
return symlink
}

View File

@@ -0,0 +1,80 @@
module heromodels
import crypto.blake3
import json
// Group represents a collection of users with roles and permissions
@[heap]
pub struct Group {
pub mut:
id string // blake192 hash
name string
description string
members []GroupMember
subgroups []string // IDs of child groups
parent_group string // ID of parent group
created_at i64
updated_at i64
is_public bool
tags []string
}
pub struct GroupMember {
pub mut:
user_id string
role GroupRole
joined_at i64
}
pub enum GroupRole {
reader
writer
admin
owner
}
pub fn (mut g Group) calculate_id() {
content := json.encode(GroupContent{
name: g.name
description: g.description
members: g.members
subgroups: g.subgroups
parent_group: g.parent_group
is_public: g.is_public
tags: g.tags
})
hash := blake3.sum256(content.bytes())
g.id = hash.hex()[..48]
}
struct GroupContent {
name string
description string
members []GroupMember
subgroups []string
parent_group string
is_public bool
tags []string
}
pub fn new_group(name string, description string) Group {
mut group := Group{
name: name
description: description
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
is_public: false
}
group.calculate_id()
return group
}
pub fn (mut g Group) add_member(user_id string, role GroupRole) {
g.members << GroupMember{
user_id: user_id
role: role
joined_at: time.now().unix_time()
}
g.updated_at = time.now().unix_time()
g.calculate_id()
}

View File

@@ -0,0 +1,31 @@
distill vlang objects out of the calendr/contact/circle and create the missing parts
organze per root object which are @[heap] and in separate file with name.v
the rootobjects are
- user
- group (which users are members and in which role can be admin, writer, reader, can be linked to subgroups)
- calendar (references to event, group)
- calendar_event (everything related to an event on calendar, link to one or more fs_file)
- project (grouping per project, defines swimlanes and milestones this allows us to visualize as kanban, link to group, link to one or more fs_file )
- project_issue (and issue is specific type, e.g. task, story, bug, question,…), issue is linked to project by id, also defined priority…, on which swimlane, deadline, assignees, … ,,,, has tags, link to one or more fs_file
- chat_group (link to group, name/description/tags)
- chat_message (link to chat_group, link to parent_chat_messages and what type of link e.g. reply or reference or? , status, … link to one or more fs_file)
- fs = filesystem (link to group)
- fs_dir = directory in filesystem, link to parent, link to group
- fs_file (link to one or more fs_dir, list of references to blobs as blake192)
- fs_symlink (can be link to dir or file)
- fs_blob (the data itself, max size 1 MB, binary data, id = blake192)
the groups define how people can interact with the parts e.g. calendar linked to group, so readers of that group can read and have copy of the info linked to that group
all the objects are identified by their blake192 (based on the content)
there is a special table which has link between blake192 and their previous & next version, so we can always walk the three, both parts are indexed (this is independent of type of object)

View File

@@ -0,0 +1,102 @@
module heromodels
import crypto.blake3
import json
// Project represents a collection of issues organized in swimlanes
@[heap]
pub struct Project {
pub mut:
id string // blake192 hash
name string
description string
group_id string // Associated group for permissions
swimlanes []Swimlane
milestones []Milestone
issues []string // IDs of project issues
fs_files []string // IDs of linked files
status ProjectStatus
start_date i64
end_date i64
created_at i64
updated_at i64
tags []string
}
pub struct Swimlane {
pub mut:
id string
name string
description string
order int
color string
is_done bool
}
pub struct Milestone {
pub mut:
id string
name string
description string
due_date i64
completed bool
issues []string // IDs of issues in this milestone
}
pub enum ProjectStatus {
planning
active
on_hold
completed
cancelled
}
pub fn (mut p Project) calculate_id() {
content := json.encode(ProjectContent{
name: p.name
description: p.description
group_id: p.group_id
swimlanes: p.swimlanes
milestones: p.milestones
issues: p.issues
fs_files: p.fs_files
status: p.status
start_date: p.start_date
end_date: p.end_date
tags: p.tags
})
hash := blake3.sum256(content.bytes())
p.id = hash.hex()[..48]
}
struct ProjectContent {
name string
description string
group_id string
swimlanes []Swimlane
milestones []Milestone
issues []string
fs_files []string
status ProjectStatus
start_date i64
end_date i64
tags []string
}
pub fn new_project(name string, description string, group_id string) Project {
mut project := Project{
name: name
description: description
group_id: group_id
status: .planning
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
swimlanes: [
Swimlane{id: 'todo', name: 'To Do', order: 1, color: '#f1c40f'},
Swimlane{id: 'in_progress', name: 'In Progress', order: 2, color: '#3498db'},
Swimlane{id: 'done', name: 'Done', order: 3, color: '#2ecc71', is_done: true}
]
}
project.calculate_id()
return project
}

View File

@@ -0,0 +1,115 @@
module heromodels
import crypto.blake3
import json
// ProjectIssue represents a task, story, bug, or question in a project
@[heap]
pub struct ProjectIssue {
pub mut:
id string // blake192 hash
title string
description string
project_id string // Associated project
issue_type IssueType
priority IssuePriority
status IssueStatus
swimlane_id string // Current swimlane
assignees []string // User IDs
reporter string // User ID who created the issue
milestone_id string // Associated milestone
deadline i64 // Unix timestamp
estimate int // Story points or hours
fs_files []string // IDs of linked files
parent_id string // Parent issue ID (for sub-tasks)
children []string // Child issue IDs
created_at i64
updated_at i64
tags []string
}
pub enum IssueType {
task
story
bug
question
epic
subtask
}
pub enum IssuePriority {
lowest
low
medium
high
highest
critical
}
pub enum IssueStatus {
open
in_progress
blocked
review
testing
done
closed
}
pub fn (mut i ProjectIssue) calculate_id() {
content := json.encode(IssueContent{
title: i.title
description: i.description
project_id: i.project_id
issue_type: i.issue_type
priority: i.priority
status: i.status
swimlane_id: i.swimlane_id
assignees: i.assignees
reporter: i.reporter
milestone_id: i.milestone_id
deadline: i.deadline
estimate: i.estimate
fs_files: i.fs_files
parent_id: i.parent_id
children: i.children
tags: i.tags
})
hash := blake3.sum256(content.bytes())
i.id = hash.hex()[..48]
}
struct IssueContent {
title string
description string
project_id string
issue_type IssueType
priority IssuePriority
status IssueStatus
swimlane_id string
assignees []string
reporter string
milestone_id string
deadline i64
estimate int
fs_files []string
parent_id string
children []string
tags []string
}
pub fn new_project_issue(title string, project_id string, reporter string, issue_type IssueType) ProjectIssue {
mut issue := ProjectIssue{
title: title
project_id: project_id
reporter: reporter
issue_type: issue_type
priority: .medium
status: .open
swimlane_id: 'todo'
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
}
issue.calculate_id()
return issue
}

View File

@@ -0,0 +1,67 @@
module heromodels
import crypto.blake3
import json
// User represents a person in the system
@[heap]
pub struct User {
pub mut:
id string // blake192 hash
name string
email string
public_key string // for encryption/signing
phone string
address string
avatar_url string
bio string
timezone string
created_at i64
updated_at i64
status UserStatus
}
pub enum UserStatus {
active
inactive
suspended
pending
}
pub fn (mut u User) calculate_id() {
content := json.encode(UserContent{
name: u.name
email: u.email
public_key: u.public_key
phone: u.phone
address: u.address
bio: u.bio
timezone: u.timezone
status: u.status
})
hash := blake3.sum256(content.bytes())
u.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
}
struct UserContent {
name string
email string
public_key string
phone string
address string
bio string
timezone string
status UserStatus
}
pub fn new_user(name string, email string) User {
mut user := User{
name: name
email: email
created_at: time.now().unix_time()
updated_at: time.now().unix_time()
status: .active
}
user.calculate_id()
return user
}

View File

@@ -0,0 +1,40 @@
module heromodels
// VersionHistory tracks the evolution of objects by their blake192 IDs
@[heap]
pub struct VersionHistory {
pub mut:
current_id string // blake192 hash of current version
previous_id string // blake192 hash of previous version
next_id string // blake192 hash of next version (if exists)
object_type string // Type of object (User, Group, etc.)
change_type ChangeType
changed_by string // User ID who made the change
changed_at i64 // Unix timestamp
change_notes string // Optional description of changes
}
pub enum ChangeType {
create
update
delete
restore
}
pub fn new_version_history(current_id string, previous_id string, object_type string, change_type ChangeType, changed_by string) VersionHistory {
return VersionHistory{
current_id: current_id
previous_id: previous_id
object_type: object_type
change_type: change_type
changed_by: changed_by
changed_at: time.now().unix_time()
}
}
// Database indexes needed:
// - Index on current_id for fast lookup
// - Index on previous_id for walking backward
// - Index on next_id for walking forward
// - Index on object_type for filtering by type
// - Index on changed_by for user activity tracking

View File

@@ -1 +0,0 @@
../../../../../git.threefold.info/herocode/db/specs/models