Merge branch 'development' into development_heroprompt
This commit is contained in:
@@ -45,7 +45,7 @@ compile_cmd := if os.user_os() == 'macos' {
|
||||
if prod_mode {
|
||||
'v -enable-globals -g -w -n -prod hero.v'
|
||||
} else {
|
||||
'v -n -g -w -cg -gc none -cc tcc -d use_openssl -enable-globals hero.v'
|
||||
'v -n -g -w -cg -cc tcc -d use_openssl -enable-globals hero.v'
|
||||
}
|
||||
} else {
|
||||
if prod_mode {
|
||||
@@ -56,7 +56,7 @@ compile_cmd := if os.user_os() == 'macos' {
|
||||
}
|
||||
|
||||
println('Building in ${if prod_mode { 'production' } else { 'debug' }} mode...')
|
||||
// eprintln(compile_cmd)
|
||||
eprintln(compile_cmd)
|
||||
|
||||
if os.system(compile_cmd) != 0 {
|
||||
panic('Failed to compile hero.v with command: ${compile_cmd}')
|
||||
|
||||
@@ -85,7 +85,7 @@ fn do() ! {
|
||||
herocmds.cmd_git(mut cmd)
|
||||
herocmds.cmd_generator(mut cmd)
|
||||
herocmds.cmd_docusaurus(mut cmd)
|
||||
herocmds.cmd_web(mut cmd)
|
||||
// herocmds.cmd_web(mut cmd)
|
||||
|
||||
cmd.setup()
|
||||
cmd.parse(os.args)
|
||||
|
||||
@@ -1100,9 +1100,6 @@ if [ "$RESET" = true ] || ! command_exists v; then
|
||||
|
||||
sshknownkeysadd
|
||||
|
||||
# Install secp256k1
|
||||
install_secp256k1
|
||||
|
||||
# Only clone and install if directory doesn't exist
|
||||
if [ ! -d ~/code/v ]; then
|
||||
echo "Installing V..."
|
||||
|
||||
@@ -229,35 +229,35 @@ function hero_lib_get {
|
||||
fi
|
||||
}
|
||||
|
||||
function install_secp256k1 {
|
||||
echo "Installing secp256k1..."
|
||||
if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
brew install secp256k1
|
||||
elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# Install build dependencies
|
||||
apt-get install -y build-essential wget autoconf libtool
|
||||
# function install_secp256k1 {
|
||||
# echo "Installing secp256k1..."
|
||||
# if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
# brew install secp256k1
|
||||
# elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# # Install build dependencies
|
||||
# apt-get install -y build-essential wget autoconf libtool
|
||||
|
||||
# Download and extract secp256k1
|
||||
cd "${DIR_BUILD}"
|
||||
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
|
||||
tar -xvf v0.3.2.tar.gz
|
||||
# # Download and extract secp256k1
|
||||
# cd "${DIR_BUILD}"
|
||||
# wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
|
||||
# tar -xvf v0.3.2.tar.gz
|
||||
|
||||
# Build and install
|
||||
cd secp256k1-0.3.2/
|
||||
./autogen.sh
|
||||
./configure
|
||||
make -j 5
|
||||
make install
|
||||
# # Build and install
|
||||
# cd secp256k1-0.3.2/
|
||||
# ./autogen.sh
|
||||
# ./configure
|
||||
# make -j 5
|
||||
# make install
|
||||
|
||||
# Cleanup
|
||||
cd ..
|
||||
rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
|
||||
else
|
||||
echo "secp256k1 installation not implemented for ${OSNAME}"
|
||||
exit 1
|
||||
fi
|
||||
echo "secp256k1 installation complete!"
|
||||
}
|
||||
# # Cleanup
|
||||
# cd ..
|
||||
# rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
|
||||
# else
|
||||
# echo "secp256k1 installation not implemented for ${OSNAME}"
|
||||
# exit 1
|
||||
# fi
|
||||
# echo "secp256k1 installation complete!"
|
||||
# }
|
||||
|
||||
|
||||
remove_all() {
|
||||
|
||||
65
install_v.sh
65
install_v.sh
@@ -257,42 +257,42 @@ function hero_lib_get {
|
||||
fi
|
||||
}
|
||||
|
||||
function install_secp256k1 {
|
||||
# function install_secp256k1 {
|
||||
|
||||
echo "Installing secp256k1..."
|
||||
if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
# Attempt installation only if not already found
|
||||
echo "Attempting secp256k1 installation via Homebrew..."
|
||||
brew install secp256k1
|
||||
elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# Install build dependencies
|
||||
package_install "build-essential wget autoconf libtool"
|
||||
# echo "Installing secp256k1..."
|
||||
# if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
# # Attempt installation only if not already found
|
||||
# echo "Attempting secp256k1 installation via Homebrew..."
|
||||
# brew install secp256k1
|
||||
# elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# # Install build dependencies
|
||||
# package_install "build-essential wget autoconf libtool"
|
||||
|
||||
# Download and extract secp256k1
|
||||
cd "${DIR_BUILD}"
|
||||
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
|
||||
tar -xvf v0.3.2.tar.gz
|
||||
# # Download and extract secp256k1
|
||||
# cd "${DIR_BUILD}"
|
||||
# wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
|
||||
# tar -xvf v0.3.2.tar.gz
|
||||
|
||||
# Build and install
|
||||
cd secp256k1-0.3.2/
|
||||
./autogen.sh
|
||||
./configure
|
||||
make -j 5
|
||||
if is_github_actions; then
|
||||
run_sudo make install
|
||||
else
|
||||
make install
|
||||
fi
|
||||
# # Build and install
|
||||
# cd secp256k1-0.3.2/
|
||||
# ./autogen.sh
|
||||
# ./configure
|
||||
# make -j 5
|
||||
# if is_github_actions; then
|
||||
# run_sudo make install
|
||||
# else
|
||||
# make install
|
||||
# fi
|
||||
|
||||
# Cleanup
|
||||
cd ..
|
||||
rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
|
||||
else
|
||||
echo "secp256k1 installation not implemented for ${OSNAME}"
|
||||
exit 1
|
||||
fi
|
||||
echo "secp256k1 installation complete!"
|
||||
}
|
||||
# # Cleanup
|
||||
# cd ..
|
||||
# rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
|
||||
# else
|
||||
# echo "secp256k1 installation not implemented for ${OSNAME}"
|
||||
# exit 1
|
||||
# fi
|
||||
# echo "secp256k1 installation complete!"
|
||||
# }
|
||||
|
||||
|
||||
remove_all() {
|
||||
@@ -564,7 +564,6 @@ if [ "$RESET" = true ] || ! command_exists v; then
|
||||
sshknownkeysadd
|
||||
|
||||
# Install secp256k1
|
||||
install_secp256k1
|
||||
|
||||
v-install
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
module herocmds
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.web.heroprompt
|
||||
import freeflowuniverse.herolib.web.ui
|
||||
import os
|
||||
import cli { Command, Flag }
|
||||
import time
|
||||
@@ -58,9 +58,9 @@ fn cmd_web_execute(cmd Command) ! {
|
||||
|
||||
console.print_header('Starting Heroprompt...')
|
||||
|
||||
// Prepare arguments for the Heroprompt server
|
||||
mut factory_args := heroprompt.FactoryArgs{
|
||||
title: 'Heroprompt'
|
||||
// Prepare arguments for the UI factory
|
||||
mut factory_args := ui.FactoryArgs{
|
||||
title: 'Hero Admin Panel'
|
||||
host: host
|
||||
port: port
|
||||
}
|
||||
@@ -70,7 +70,7 @@ fn cmd_web_execute(cmd Command) ! {
|
||||
|
||||
// Start the server in a separate thread to allow for browser opening
|
||||
spawn fn [factory_args] () {
|
||||
heroprompt.start(factory_args) or {
|
||||
ui.start(factory_args) or {
|
||||
console.print_stderr('Failed to start Heroprompt server: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ fn test_ipv6() {
|
||||
mut addr := new('202:6a34:cd78:b0d7:5521:8de7:218e:6680') or { panic(err) }
|
||||
assert addr.cat == .ipv6
|
||||
assert addr.port == 0
|
||||
assert addr.ping(timeout: 3)! == false
|
||||
// assert addr.ping(timeout: 3)! == false
|
||||
}
|
||||
|
||||
fn test_ipv6b() {
|
||||
|
||||
@@ -37,5 +37,5 @@ pub mut:
|
||||
|
||||
pub struct GitRepoConfig {
|
||||
pub mut:
|
||||
remote_check_period int = 3600 // seconds = 1h
|
||||
remote_check_period int = 3600*24*7 // seconds = 7d
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
module model
|
||||
|
||||
// a actor is a participant in the new internet, the one who can ask for work
|
||||
// user can have more than one actor operating for them, an actor always operates in a context which is hosted by the hero of the user
|
||||
// stored in the context db at actor:<id> (actor is hset)
|
||||
@[heap]
|
||||
pub struct Actor {
|
||||
pub mut:
|
||||
id u32
|
||||
pubkey string
|
||||
address []Address // address (is to reach the actor back), normally mycelium but doesn't have to be
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub fn (self Actor) redis_key() string {
|
||||
return 'actor:${self.id}'
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
module model
|
||||
|
||||
// each job is run in a context, this corresponds to a DB in redis and has specific rights to actors
|
||||
// context is a redis db and also a locaction on a filesystem which can be used for e.g. logs, temporary files, etc.
|
||||
// actors create contexts for others to work in
|
||||
// stored in the context db at context:<id> (context is hset)
|
||||
@[heap]
|
||||
pub struct Context {
|
||||
pub mut:
|
||||
id u32 // corresponds with the redis db (in our ourdb or other redis)
|
||||
admins []u32 // actors which have admin rights on this context (means can do everything)
|
||||
readers []u32 // actors which can read the context info
|
||||
executors []u32 // actors which can execute jobs in this context
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub fn (self Context) redis_key() string {
|
||||
return 'context:${self.id}'
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
module model
|
||||
|
||||
// what get's executed by an actor and needs to be tracked as a whole, can be represented as a DAG graph
|
||||
// this is the high level representation of a workflow to execute on work, its fully decentralized and distributed
|
||||
// only the actor who created the flow can modify it and holds it in DB
|
||||
// stored in the context db at flow:<id> (flow is hset)
|
||||
@[heap]
|
||||
pub struct Flow {
|
||||
pub mut:
|
||||
id u32 // this job id is given by the actor who called for it
|
||||
caller_id u32 // is the actor which called for this job
|
||||
context_id u32 // each job is executed in a context
|
||||
jobs []u32 // links to all jobs which make up this flow, this can be dynamically modified
|
||||
env_vars map[string]string // they are copied to every job done
|
||||
result map[string]string // the result of the flow
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status FlowStatus
|
||||
}
|
||||
|
||||
pub fn (self Flow) redis_key() string {
|
||||
return 'flow:${self.id}'
|
||||
}
|
||||
|
||||
// FlowStatus represents the status of a flow
|
||||
pub enum FlowStatus {
|
||||
dispatched
|
||||
started
|
||||
error
|
||||
finished
|
||||
}
|
||||
|
||||
// str returns the string representation of FlowStatus
|
||||
pub fn (self FlowStatus) str() string {
|
||||
return match self {
|
||||
.dispatched { 'dispatched' }
|
||||
.started { 'started' }
|
||||
.error { 'error' }
|
||||
.finished { 'finished' }
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
module model
|
||||
|
||||
// Messages is what goes over mycelium (which is our messaging system), they can have a job inside
|
||||
// stored in the context db at msg:<callerid>:<id> (msg is hset)
|
||||
// there are 2 queues in the context db: queue: msg_out and msg_in these are generic queues which get all messages from mycelium (in) and the ones who need to be sent (out) are in the outqueue
|
||||
@[heap]
|
||||
pub struct Message {
|
||||
pub mut:
|
||||
id u32 // is unique id for the message, has been given by the caller
|
||||
caller_id u32 // is the actor whos send this message
|
||||
context_id u32 // each message is for a specific context
|
||||
message string
|
||||
message_type ScriptType
|
||||
message_format_type MessageFormatType
|
||||
timeout u32 // in sec, to arrive destination
|
||||
timeout_ack u32 // in sec, to acknowledge receipt
|
||||
timeout_result u32 // in sec, to process result and have it back
|
||||
job []Job
|
||||
logs []Log // e.g. for streaming logs back to originator
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status MessageStatus
|
||||
}
|
||||
|
||||
// MessageType represents the type of message
|
||||
pub enum MessageType {
|
||||
job
|
||||
chat
|
||||
mail
|
||||
}
|
||||
|
||||
// MessageFormatType represents the format of a message
|
||||
pub enum MessageFormatType {
|
||||
html
|
||||
text
|
||||
md
|
||||
}
|
||||
|
||||
pub fn (self Message) redis_key() string {
|
||||
return 'message:${self.caller_id}:${self.id}'
|
||||
}
|
||||
|
||||
// queue_suffix returns the queue suffix for the message type
|
||||
pub fn (mt MessageType) queue_suffix() string {
|
||||
return match mt {
|
||||
.job { 'job' }
|
||||
.chat { 'chat' }
|
||||
.mail { 'mail' }
|
||||
}
|
||||
}
|
||||
|
||||
// MessageStatus represents the status of a message
|
||||
pub enum MessageStatus {
|
||||
dispatched
|
||||
acknowledged
|
||||
error
|
||||
processed // e.g. can be something which comes back
|
||||
}
|
||||
|
||||
// str returns the string representation of MessageStatus
|
||||
pub fn (ms MessageStatus) str() string {
|
||||
return match ms {
|
||||
.dispatched { 'dispatched' }
|
||||
.acknowledged { 'acknowledged' }
|
||||
.error { 'error' }
|
||||
.processed { 'processed' }
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
module model
|
||||
|
||||
// a runner executes a job, this can be in VM, in a container or just some processes running somewhere
|
||||
// the messages always come in over a topic
|
||||
// stored in the context db at runner:<id> (runner is hset)
|
||||
@[heap]
|
||||
pub struct Runner {
|
||||
pub mut:
|
||||
id u32
|
||||
pubkey string // from mycelium
|
||||
address string // mycelium address
|
||||
topic string // needs to be set by the runner but often runner<runnerid> e.g. runner20
|
||||
local bool // if local then goes on redis using the id
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
}
|
||||
|
||||
pub enum RunnerType {
|
||||
v
|
||||
python
|
||||
osis
|
||||
rust
|
||||
}
|
||||
|
||||
pub fn (self Runner) redis_key() string {
|
||||
return 'runner:${self.id}'
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
module model
|
||||
|
||||
// Job represents a job, a job is only usable in the context of a runner (which is part of a hero)
|
||||
// stored in the context db at job:<callerid>:<id> (job is hset)
|
||||
@[heap]
|
||||
pub struct RunnerJob {
|
||||
pub mut:
|
||||
id u32 // this job id is given by the actor who called for it
|
||||
caller_id u32 // is the actor which called for this job
|
||||
context_id u32 // each job is executed in a context
|
||||
script string
|
||||
script_type ScriptType
|
||||
timeout u32 // in sec
|
||||
retries u8
|
||||
env_vars map[string]string
|
||||
result map[string]string
|
||||
prerequisites []string
|
||||
dependends []u32
|
||||
created_at u32 // epoch
|
||||
updated_at u32 // epoch
|
||||
status JobStatus
|
||||
}
|
||||
|
||||
// ScriptType represents the type of script
|
||||
pub enum ScriptType {
|
||||
osis
|
||||
sal
|
||||
v
|
||||
python
|
||||
}
|
||||
|
||||
pub fn (self RunnerJob) redis_key() string {
|
||||
return 'job:${self.caller_id}:${self.id}'
|
||||
}
|
||||
|
||||
// queue_suffix returns the queue suffix for the script type
|
||||
pub fn (st ScriptType) queue_suffix() string {
|
||||
return match st {
|
||||
.osis { 'osis' }
|
||||
.sal { 'sal' }
|
||||
.v { 'v' }
|
||||
.python { 'python' }
|
||||
}
|
||||
}
|
||||
|
||||
// JobStatus represents the status of a job
|
||||
pub enum JobStatus {
|
||||
dispatched
|
||||
waiting_for_prerequisites
|
||||
started
|
||||
error
|
||||
finished
|
||||
}
|
||||
|
||||
// str returns the string representation of JobStatus
|
||||
pub fn (js JobStatus) str() string {
|
||||
return match js {
|
||||
.dispatched { 'dispatched' }
|
||||
.waiting_for_prerequisites { 'waiting_for_prerequisites' }
|
||||
.started { 'started' }
|
||||
.error { 'error' }
|
||||
.finished { 'finished' }
|
||||
}
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
# Models Specification
|
||||
*Freeflow Universe – mycojobs*
|
||||
|
||||
This document gathers **all data‑models** that exist in the `lib/mycojobs/model/` package, together with a concise purpose description, field semantics, Redis storage layout and the role each model plays in the overall *decentralised workflow* architecture.
|
||||
|
||||
|
||||
## Table of Contents
|
||||
1. [Actor](#actor)
|
||||
2. [Context](#context)
|
||||
3. [Flow](#flow)
|
||||
4. [Message](#message)
|
||||
5. [Runner](#runner)
|
||||
6. [RunnerJob](#runnerjob)
|
||||
7. [Enums & Shared Types](#enums-shared-types)
|
||||
8. [Key‑generation helpers](#key-generation-helpers)
|
||||
|
||||
---
|
||||
|
||||
## <a name="actor"></a>1️⃣ `Actor` – Identity & entry‑point
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Sequential identifier **unique per tenant**. Used as part of the Redis key `actor:<id>`. |
|
||||
| `pubkey` | `string` | Public key (Mycelium‑compatible) that authenticates the actor when it sends/receives messages. |
|
||||
| `address` | `[]Address` | One or more reachable addresses (normally Mycelium topics) that other participants can use to contact the actor. |
|
||||
| `created_at` | `u32` | Unix‑epoch time when the record was created. |
|
||||
| `updated_at` | `u32` | Unix‑epoch time of the last mutation. |
|
||||
|
||||
### Purpose
|
||||
* An **Actor** is the *human‑or‑service* that **requests work**, receives results and can be an administrator of a **Context**.
|
||||
* It is the *security principal* – every operation in a context is authorised against the actor’s ID and its public key signature.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `actor:${id}` | `actor:12` | **hash** (`HSET`) | `id`, `pubkey`, `address` (list), `created_at`, `updated_at` |
|
||||
|
||||
---
|
||||
|
||||
## <a name="context"></a>2️⃣ `Context` – Tenant & permission container
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Identifier that also selects the underlying **Redis DB** for this tenant. |
|
||||
| `admins` | `[]u32` | Actor IDs that have **full control** (create/delete any object, manage permissions). |
|
||||
| `readers` | `[]u32` | Actor IDs that may **read** any object in the context but cannot modify. |
|
||||
| `executors` | `[]u32` | Actor IDs allowed to **run** `RunnerJob`s and update their status. |
|
||||
| `created_at` | `u32` | Unix‑epoch of creation. |
|
||||
| `updated_at` | `u32` | Unix‑epoch of last modification. |
|
||||
|
||||
### Purpose
|
||||
* A **Context** isolates a *tenant* – each tenant gets its own Redis database and a dedicated filesystem area (for logs, temporary files, …).
|
||||
* It stores **permission lists** that the system consults before any operation (e.g., creating a `Flow`, enqueuing a `RunnerJob`).
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `context:${id}` | `context:7` | **hash** | `id`, `admins`, `readers`, `executors`, `created_at`, `updated_at` |
|
||||
|
||||
---
|
||||
|
||||
## <a name="flow"></a>3️⃣ `Flow` – High‑level workflow (DAG)
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Flow identifier – *unique inside the creator’s actor space*. |
|
||||
| `caller_id` | `u32` | Actor that **created** the flow (owner). |
|
||||
| `context_id` | `u32` | Context in which the flow lives. |
|
||||
| `jobs` | `[]u32` | List of **RunnerJob** IDs that belong to this flow (the DAG edges are stored in each job’s `dependends`). |
|
||||
| `env_vars` | `map[string]string` | Global environment variables injected into **every** job of the flow. |
|
||||
| `result` | `map[string]string` | Aggregated output produced by the flow (filled by the orchestrator when the flow finishes). |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last update timestamp. |
|
||||
| `status` | `FlowStatus` | Current lifecycle stage (`dispatched`, `started`, `error`, `finished`). |
|
||||
|
||||
### Purpose
|
||||
* A **Flow** is the *public‑facing* representation of a **workflow**.
|
||||
* It groups many `RunnerJob`s, supplies common env‑vars, tracks overall status and collects the final result.
|
||||
* Only the *creator* (the `caller_id`) may mutate the flow definition.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `flow:${id}` | `flow:33` | **hash** | `id`, `caller_id`, `context_id`, `jobs`, `env_vars`, `result`, `created_at`, `updated_at`, `status` |
|
||||
|
||||
### `FlowStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Flow has been stored but not yet started. |
|
||||
| `started` | At least one job is running. |
|
||||
| `error` | One or more jobs failed; flow aborted. |
|
||||
| `finished` | All jobs succeeded, `result` is final. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="message"></a>4️⃣ `Message` – Transport unit (Mycelium)
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` |u32 `_type` | `ScriptType` | *Kind* of the message – currently re‑used for job payloads (`osis`, `sal`, `v`, `python`). |
|
||||
| `message_format_type` | `MessageFormatType` | Formatting of `message` (`html`, `text`, `md`). |
|
||||
| `timeout` | `u32` | Seconds before the message is considered *lost* if not delivered. |
|
||||
| `timeout_ack` | `u32` | Seconds allowed for the receiver to acknowledge. |
|
||||
| `timeout_result` | `u32` | Seconds allowed for the receiver to send back a result. |
|
||||
| `job` | `[]Job` | Embedded **RunnerJob** objects (normally a single job). |
|
||||
| `logs` | `[]Log` | Optional streaming logs attached to the message. |
|
||||
| `created_at` | `u32` | Timestamp of creation. |
|
||||
| `updated_at` | `u32` | Timestamp of latest update. |
|
||||
| `status` | `MessageStatus` | Current lifecycle (`dispatched`, `acknowledged`, `error`, `processed`). |
|
||||
|
||||
### Purpose
|
||||
* `Message` is the **payload carrier** that travels over **Mycelium** (the pub/sub system).
|
||||
* It can be a **job request**, a **chat line**, an **email**, or any generic data that needs to be routed between actors, runners, or services.
|
||||
* Every message is persisted as a Redis hash; the system also maintains two *generic* queues:
|
||||
|
||||
* `msg_out` – outbound messages waiting to be handed to Mycelium.
|
||||
* `msg_in` – inbound messages that have already arrived and are awaiting local processing.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type | Fields |
|
||||
|-----|---------|--------------|--------|
|
||||
| `message:${caller_id}:${id}` | `message:12:101` | **hash** | All fields above (`id`, `caller_id`, `context_id`, …, `status`). |
|
||||
|
||||
### `MessageType` enum (legacy – not used in current code but documented)
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `job` | Payload carries a `RunnerJob`. |
|
||||
| `chat` | Human‑to‑human communication. |
|
||||
| `mail` | Email‑like message. |
|
||||
|
||||
### `MessageFormatType` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `html` | HTML formatted body. |
|
||||
| `text` | Plain‑text. |
|
||||
| `md` | Markdown. |
|
||||
|
||||
### `MessageStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Stored, not yet processed. |
|
||||
| `acknowledged` | Receiver has confirmed receipt. |
|
||||
| `error` | Delivery or processing failed. |
|
||||
|` | Message handled (e.g., job result returned). |
|
||||
|
||||
---
|
||||
|
||||
## <a name="runner"></a>5️⃣ `Runner` – Worker that executes jobs
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Unique runner identifier. |
|
||||
| `pubkey` | `string` | Public key of the runner (used by Mycelium for auth). |
|
||||
| `address` | `string` | Mycelium address (e.g., `mycelium://…`). |
|
||||
| `topic` | `string` | Pub/Sub topic the runner subscribes to; defaults to `runner${id}`. |
|
||||
| `local` | `bool` | If `true`, the runner also consumes jobs directly from **Redis queues** (e.g., `queue:v`). |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last modification timestamp. |
|
||||
|
||||
### Purpose
|
||||
* A **Runner** is the *execution engine* – it could be a VM, a container, or a process that knows how to run a specific script type (`v`, `python`, `osis`, `rust`).
|
||||
* It **subscribes** to a Mycelium topic to receive job‑related messages, and, when `local==true`, it also **polls** a Redis list named after the script‑type (`queue:<suffix>`).
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type |
|
||||
|-----|---------|--------------|
|
||||
| `runner:${id}` | `runner:20` | **hash** *(all fields above)* |
|
||||
|
||||
### `RunnerType` enum
|
||||
|
||||
| Value | Intended runtime |
|
||||
|-------|------------------|
|
||||
| `v` | V language VM |
|
||||
| `python` | CPython / PyPy |
|
||||
| `osis` | OSIS‑specific runtime |
|
||||
| `rust` | Native Rust binary |
|
||||
|
||||
---
|
||||
|
||||
## <a name="runnerjob"></a>6️⃣ `RunnerJob` – Executable unit
|
||||
|
||||
| Field | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `id` | `u32` | Job identifier **provided by the caller**. |
|
||||
| `caller_id` | `u32` | Actor that created the job. |
|
||||
| `context_id` | `u32` | Context in which the job will run. |
|
||||
| `script` | `string` | Source code / command to be executed. |
|
||||
| `script_type` | `ScriptType` | Language or runtime of the script (`osis`, `sal`, `v`, `python`). |
|
||||
| `timeout` | `u32` | Maximum execution time (seconds). |
|
||||
| `retries` | `u8` | Number of automatic retries on failure. |
|
||||
| `env_vars` | `map[string]string` | Job‑specific environment variables (merged with `Flow.env_vars`). |
|
||||
| `result` | `map[string]string` | Key‑value map that the job writes back upon completion. |
|
||||
| `prerequisites` | `[]string` | Human‑readable IDs of **external** prerequisites (e.g., files, other services). |
|
||||
| `dependends` | `[]u32` | IDs of **other RunnerJob** objects that must finish before this job can start. |
|
||||
| `created_at` | `u32` | Creation timestamp. |
|
||||
| `updated_at` | `u32` | Last update timestamp. |
|
||||
| `status` | `JobStatus` | Lifecycle status (`dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished`). |
|
||||
|
||||
### Purpose
|
||||
* A **RunnerJob** is the *atomic piece of work* that a `Runner` executes.
|
||||
* It lives inside a **Context**, is queued according to its `script_type`, and moves through a well‑defined **state machine**.
|
||||
* The `dependends` field enables the *DAG* behaviour that the `Flow` model represents at a higher level.
|
||||
|
||||
### Redis representation
|
||||
|
||||
| Key | Example | Storage type |
|
||||
|-----|---------|--------------|
|
||||
| `job:${caller_id}:${id}` | `job:12:2001` | **hash** *(all fields above)* |
|
||||
|
||||
### `ScriptType` enum
|
||||
|
||||
| Value | Runtime |
|
||||
|-------|---------|
|
||||
| `osis` | OSIS interpreter |
|
||||
| `sal` | SAL DSL (custom) |
|
||||
| `v` | V language |
|
||||
| `python`| CPython / PyPy |
|
||||
|
||||
*The enum provides a **`queue_suffix()`** helper that maps a script type to the name of the Redis list used for local job dispatch (`queue:python`, `queue:v`, …).*
|
||||
|
||||
### `JobStatus` enum
|
||||
|
||||
| Value | Meaning |
|
||||
|-------|---------|
|
||||
| `dispatched` | Stored, waiting to be examined for prerequisites. |
|
||||
| `waiting_for_prerequisites` | Has `dependends` that are not yet finished. |
|
||||
| `started` | Currently executing on a runner. |
|
||||
| `error` | Execution failed (or exceeded retries). |
|
||||
| `finished` | Successfully completed, `result` populated. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="enums-shared-types"></a>7️⃣ Other Enums & Shared Types
|
||||
|
||||
| Enum | Location | Values | Note |
|
||||
|------|----------|--------|------|
|
||||
| `MessageType` | `message.v` | `job`, `chat`, `mail` | Determines how a `Message` is interpreted. |
|
||||
| `MessageFormatType` | `message.v` | `html`, `text`, `md` | UI‑layer rendering hint. |
|
||||
| `MessageStatus` | `message.v` | `dispatched`, `acknowledged`, `error`, `processed` | Life‑cycle of a `Message`. |
|
||||
| `FlowStatus` | `flow.v` | `dispatched`, `started`, `error`, `finished` | High‑level flow progress. |
|
||||
| `RunnerType` | `runner.v` | `v`, `python`, `osis`, `rust` | Not currently stored; used by the orchestration layer to pick a runner implementation. |
|
||||
| `ScriptType` | `runnerjob.v` | `osis`, `sal`, `v`, `python` | Determines queue suffix & runtime. |
|
||||
| `JobStatus` | `runnerjob.v` | `dispatched`, `waiting_for_prerequisites`, `started`, `error`, `finished` | Per‑job state machine. |
|
||||
|
||||
---
|
||||
|
||||
## <a name="key-generation-helpers"></a>8️⃣ Key‑generation helpers (methods)
|
||||
|
||||
| Model | Method | Returns | Example |
|
||||
|-------|--------|---------|---------|
|
||||
| `Actor` | `redis_key()` | `"actor:${self.id}"` | `actor:12` |
|
||||
| `Context` | `redis_key()` | `"context:${self.id}"` | `context:7` |
|
||||
| `Flow` | `redis_key()` | `"flow:${self.id}"` | `flow:33` |
|
||||
| `Message` | `redis_key()` | `"message:${self.caller_id}:${self.id}"` | `message:12:101` |
|
||||
| `Runner` | `redis_key()` | `"runner:${self.id}"` | `runner:20` |
|
||||
| `RunnerJob` | `redis_key()` | `"job:${self.caller_id}:${self.id}"` | `job:12:2001` |
|
||||
| `MessageType` | `queue_suffix()` | `"job"` / `"chat"` / `"mail"` | `MessageType.job.queue_suffix() → "job"` |
|
||||
| `ScriptType` | `queue_suffix()` | `"osis"` / `"sal"` / `"v"` / `"python"` | `ScriptType.python.queue_suffix() → "python"` |
|
||||
|
||||
These helpers guarantee **canonical key naming** throughout the code base and simplify Redis interactions.
|
||||
|
||||
---
|
||||
|
||||
## 📌 Summary Diagram (quick reference)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph TD
|
||||
%% Actors and what they can create
|
||||
A[Actor] -->|creates| Ctx[Context]
|
||||
A -->|creates| Fl[Flow]
|
||||
A -->|creates| Msg[Message]
|
||||
A -->|creates| Rnr[Runner]
|
||||
A -->|creates| Job[RunnerJob]
|
||||
|
||||
%% All objects live inside one Redis DB that belongs to a Context
|
||||
subgraph "Redis DB (per Context)"
|
||||
Ctx
|
||||
A
|
||||
Fl
|
||||
Msg
|
||||
Rnr
|
||||
Job
|
||||
end
|
||||
|
||||
%% Messaging queues (global, outside the Context DB)
|
||||
Msg -->|pushes key onto| OutQ[msg_out]
|
||||
OutQ -->|transport via Mycelium| InQ[msg_in]
|
||||
InQ -->|pulled by| Rnr
|
||||
|
||||
%% Local runner queues (only when runner.local == true)
|
||||
Rnr -->|BRPOP from| QueueV["queue:v"]
|
||||
Rnr -->|BRPOP from| QueuePy["queue:python"]
|
||||
Rnr -->|BRPOP from| QueueOSIS["queue:osis"]
|
||||
|
||||
```
|
||||
|
||||
## context based
|
||||
|
||||
* Inside a Context, an **Actor** can create a **Flow** that references many **RunnerJob** IDs (the DAG).
|
||||
* To *initiate* execution, the Actor packages a **RunnerJob** (or a full Flow) inside a **Message**, pushes it onto `msg_out`, and the system routes it via **Mycelium** to the target Context.
|
||||
* The remote **Runner** receives the Message, materialises the **RunnerJob**, queues it on a script‑type list, executes it, writes back `result` and status, and optionally sends a *result Message* back to the originator.
|
||||
|
||||
All state is persisted as **Redis hashes**, guaranteeing durability and enabling *idempotent* retries. The uniform naming conventions (`actor:<id>`, `job:<caller_id>:<id>`, …) make it trivial to locate any object given its identifiers.
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
|
||||
## Objects Used
|
||||
|
||||
| Component | What it **stores** | Where it lives (Redis key) | Main responsibilities |
|
||||
|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Actor** | Public key, reachable addresses, timestamps | `actor:<id>` (hash) | An identity that can request work, receive results and act as an administrator of a *Context*. |
|
||||
| **Context**| Permission lists (`admins`, `readers`, `executors`), timestamps | `context:<id>` (hash) | An isolated “tenant” – a separate Redis DB and filesystem area. All objects (flows, messages, jobs, runners) belonging to a given workflow are stored under this context. The permission lists control who may read, execute or administer the context. |
|
||||
| **Flow** | DAG of job IDs, env‑vars, result map, status, timestamps | `flow:<id>` (hash) | A high‑level workflow created by a single **Actor**. It groups many **RunnerJob** objects, records their execution order, supplies common environment variables and aggregates the final result. |
|
||||
| **Message**| Payload, type (`job\|chat\|mail`), format (`html\|text\|md`), time‑outs, embedded **Job** objects, log stream, status, timestamps | `message:<caller_id>:<id>` (hash) | The transport unit that travels over **Mycelium** (the pub/sub/message bus). A message can contain a **RunnerJob** (or a list of jobs) and is queued in two generic Redis lists: `msg_out` (to be sent) and `msg_in` (already received). |
|
||||
| **Runner** | Public key, Mycelium address, topic name, type (`v\|python\|osis\|rust`), local flag, timestamps | `runner:<id>` (hash) | The *worker* that actually executes **RunnerJob** scripts. It subscribes to a Mycelium topic (normally `runner<id>`). If `local == true` the runner also consumes jobs directly from a Redis queue that is named after the script‑type suffix (`v`, `python`, …). |
|
||||
| **RunnerJob**| Script source, type (`osis\|sal\|v\|python`), env‑vars, prerequisites, dependencies, status, timestamps, result map | `job:<caller_id>:<id>` (hash) | A single executable unit. It lives inside a **Context**, belongs to a **Runner**, and is queued according to its `script_type` (e.g. `queue:python`). Its status moves through the lifecycle `dispatched → waiting_for_prerequisites → started → finished|error`. |
|
||||
|
||||
> **Key idea:** All objects are persisted as *hashes* in a **Redis** database that is dedicated to a *Context*. The system is completely **decentralised** – each actor owns its own context and can spin up as many runners as needed. Communication between actors, runners and the rest of the system happens over **Mycelium**, a message‑bus that uses Redis lists as queues.
|
||||
|
||||
---
|
||||
|
||||
## Interaction diagram (who talks to who)
|
||||
|
||||
### Sequence diagram – “Submit a flow and run it”
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
sequenceDiagram
|
||||
participant A as Actor
|
||||
participant L as Local‑Context (Redis)
|
||||
participant M as Mycelium (msg_out / msg_in)
|
||||
participant R as Remote‑Context (Redis)
|
||||
participant W as Runner (worker)
|
||||
|
||||
%% 1. Actor creates everything locally
|
||||
A->>L: create Flow + RunnerJob (J)
|
||||
A->>L: LPUSH msg_out Message{type=job, payload=J, target=Remote}
|
||||
|
||||
%% 2. Mycelium transports the message
|
||||
M->>R: LPUSH msg_in (Message key)
|
||||
|
||||
%% 3. Remote context materialises the job
|
||||
R->>R: HSET Message hash
|
||||
R->>R: HSET RunnerJob (J') // copy of payload
|
||||
R->>R: LPUSH queue:v (job key)
|
||||
|
||||
%% 4. Runner consumes and executes
|
||||
W->>R: BRPOP queue:v (job key)
|
||||
W->>R: HSET job status = started
|
||||
W->>W: execute script
|
||||
W->>R: HSET job result + status = finished
|
||||
|
||||
%% 5. Result is sent back
|
||||
W->>M: LPUSH msg_out Message{type=result, payload=result, target=Local}
|
||||
M->>L: LPUSH msg_in (result Message key)
|
||||
|
||||
%% 6. Actor receives the result
|
||||
A->>L: RPOP msg_in → read result
|
||||
```
|
||||
|
||||
### 2.2 Component diagram – “Static view of objects & links”
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph LR
|
||||
subgraph Redis["Redis (per Context)"]
|
||||
A[Actor] -->|stores| Ctx[Context]
|
||||
Ctx -->|stores| Fl[Flow]
|
||||
Ctx -->|stores| Msg[Message]
|
||||
Ctx -->|stores| Rnr[Runner]
|
||||
Ctx -->|stores| Job[RunnerJob]
|
||||
end
|
||||
|
||||
subgraph Mycelium["Mycelium (Pub/Sub)"]
|
||||
MsgOut["queue:msg_out"] -->|outgoing| Mcel[Mycelium Bus]
|
||||
Mcel -->|incoming| MsgIn["queue:msg_in"]
|
||||
RnrTopic["topic:runnerX"] -->|subscribed by| Rnr
|
||||
queueV["queue:v"] -->|local jobs| Rnr
|
||||
queuePython["queue:python"] -->|local jobs| Rnr
|
||||
end
|
||||
|
||||
A -->|creates / reads| Fl
|
||||
A -->|creates / reads| Msg
|
||||
A -->|creates / reads| Rnr
|
||||
A -->|creates / reads| Job
|
||||
Fl -->|references| Job
|
||||
Msg -->|may embed| Job
|
||||
Rnr -->|executes| Job
|
||||
Job -->|updates| Fl
|
||||
Msg -->|carries result back to| A
|
||||
```
|
||||
|
||||
### 2.3 Flow‑status life‑cycle (state diagram)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
stateDiagram-v2
|
||||
[*] --> dispatched
|
||||
dispatched --> waiting_for_prerequisites : has prereqs
|
||||
waiting_for_prerequisites --> started : prereqs met
|
||||
dispatched --> started : no prereqs
|
||||
started --> finished : success
|
||||
started --> error : failure
|
||||
waiting_for_prerequisites --> error : timeout / impossible
|
||||
error --> [*]
|
||||
finished --> [*]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3️⃣ Redis objects – concrete key & data layout
|
||||
|
||||
All objects are stored as **hashes** (`HSET`). Below is a concise catalog that can be copied into a design doc.
|
||||
|
||||
| Key pattern | Example | Fields (type) | Comments |
|
||||
|-------------|---------|---------------|----------|
|
||||
| `actor:${id}` | `actor:12` | `id` u32, `pubkey` str, `address` list\<Address\>, `created_at` u32, `updated_at` u32 | One hash per actor. |
|
||||
| `context:${id}` | `context:7` | `id` u32, `admins` list\<u32\>, `readers` list\<u32\>, `executors` list\<u32\>, `created_at` u32, `updated_at` u32 | Holds permission lists for a tenant. |
|
||||
| `flow:${id}` | `flow:33` | `id` u32, `caller_id` u32, `context_id` u32, `jobs` list\<u32\>, `env_vars` map\<str,str\>, `result` map\<str,str\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|started|error|finished`) |
|
||||
| `message:${caller_id}:${id}` | `message:12:101` | `id` u32, `caller_id` u32, `context_id` u32, `message` str, `message_type` str (`job|chat|mail`), `message_format_type` str (`html|text|md`), `timeout` u32, `timeout_ack` u32, `timeout_result` u32, `job` list\<RunnerJob\> (serialized), `logs` list\<Log\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|acknowledged|error|processed`) |
|
||||
| `runner:${id}` | `runner:20` | `id` u32, `pubkey` str, `address` str, `topic` str, `local` bool, `created_at` u32, `updated_at` u32 |
|
||||
| `job:${caller_id}:${id}` | `job:12:2001` | `id` u32, `caller_id` u32, `context_id` u32, `script` str, `script_type` str (`osis|sal|v|python`), `timeout` u32, `retries` u8, `env_vars` map\<str,str\>, `result` map\<str,str\>, `prerequisites` list\<str\>, `dependends` list\<u32\>, `created_at` u32, `updated_at` u32, `status` str (`dispatched|waiting_for_prerequisites|started|error|finished`) |
|
||||
|
||||
#### Queue objects (lists)
|
||||
|
||||
| Queue name | Purpose |
|
||||
|------------|---------|
|
||||
| `msg_out` | **Outbound** generic queue – every `Message` that an actor wants to send is pushed here. |
|
||||
| `msg_in` | **Inbound** generic queue – every message received from Mycelium is placed here for the local consumer to process. |
|
||||
| `queue:${suffix}` (e.g. `queue:v`, `queue:python`) | Local job queues used by a **Runner** when `local == true`. The suffix comes from `ScriptType.queue_suffix()`. |
|
||||
|
||||
---
|
||||
|
||||
## 4️⃣ System specification (as a concise “specs” section)
|
||||
|
||||
### 4.1 Naming conventions
|
||||
* All Redis **hashes** are prefixed with the object name (`actor:`, `context:`, …).
|
||||
* All **queues** are simple Redis lists (`LPUSH` / `RPOP`).
|
||||
* **Message** keys embed both the *caller* and a locally unique *message id* – this guarantees global uniqueness across contexts.
|
||||
|
||||
### 4.2 Permissions & security
|
||||
* Only IDs present in `Context.admins` may **create** or **delete** any object inside that context.
|
||||
* `Context.readers` can **GET** any hash but not modify it.
|
||||
* `Context.executors` are allowed to **update** `RunnerJob.status`, `result` and to **pop** from local job queues.
|
||||
* Every `Actor` must present a `pubkey` that can be verified by the receiving side (Mycelium uses asymmetric crypto).
|
||||
|
||||
|
||||
|
||||
### 4.3 Message flow (publish / consume)
|
||||
|
||||
|
||||
|
||||
Below is a **re‑written “Message flow (publish / consume)”** that reflects the real runtime components:
|
||||
|
||||
* **Supervisor daemon** – runs on the node that owns the **Flow** (the *actor’s* side).
|
||||
It is the only process that ever **RPOP**s from the global `msg_out` queue, adds the proper routing information and hands the message to **Mycelium**.
|
||||
|
||||
* **Mycelium** – the pure pub/sub/message‑bus. It never touches Redis directly; it only receives a *payload key* from the coordinator and delivers that key to the remote tenant’s `msg_in` list.
|
||||
|
||||
* **Remote‑side runner / service** – consumes from its own `msg_in`, materialises the job and executes it.
|
||||
|
||||
The table now uses the exact component names and adds a short note about the permission check that the coordinator performs before it releases a message.
|
||||
|
||||
| # | Action (what the system does) | Component that performs it | Redis interaction (exact commands) |
|
||||
|---|-------------------------------|----------------------------|------------------------------------|
|
||||
| **1️⃣ Publish** | Actor creates a `Message` hash and **LPUSH**es its key onto the *outbound* queue. | **Actor** (client code) | `HSET message:12:101 …` <br/> `LPUSH msg_out message:12:101` |
|
||||
| **2️⃣ Coordinate & route** | The **Supervisor daemon** (running at source) **RPOP**s the key, checks the actor’s permissions, adds the *target‑context* and *topic* fields, then forwards the key to Mycelium. | **Supervisor daemon** (per‑actor) | `RPOP msg_out` → (in‑process) → `LPUSH msg_out_coordinator <key>` (internal buffer) |
|
||||
| **3️⃣ Transport** | Mycelium receives the key, looks at `Message.message_type` (or the explicit `topic`) and pushes the key onto the *inbound* queue of the **remote** tenant. | **Mycelium bus** (network layer) | `LPUSH msg_in:<remote‑ctx> <key>` |
|
||||
| **4️⃣ Consume** | The **Remote side** (runner or service) **RPOP**s from its `msg_in`, loads the full hash, verifies the actor’s signature and decides what to do based on `message_type`. | **Remote consumer** (runner / service | `RPOP msg_in:<remote‑ctx>` → `HGETALL message:<key>` |
|
||||
| **5️⃣ Job materialisation** | If `message_type == "job"` the consumer creates a **RunnerJob** entry inside the **remote** context, adds the job **key** to the proper *script‑type* queue (`queue:v`, `queue:python`, …). | **Remote consumer** | `HSET job:<caller_id>:<job_id> …` <br/> `LPUSH queue:<script_type> job:<caller_id>:<job_id>` |
|
||||
| **6️⃣ Runner execution loop** | A **Runner** attached to that remote context **BRPOP**s from its script‑type queue, sets `status = started`, runs the script, writes `result` and final `status`. | **Runner** | `BRPOP queue:<script_type>` → `HSET job:<…> status started` → … → `HSET job:<…> result … status finished` |
|
||||
| **7️⃣ Result notification** | The runner builds a new `Message` (type `chat`, `result`, …) and pushes it onto **msg_out** again. The **Supervisor daemon** on the *originating* side will later pick it up and route it back to the original actor. | **Runner** → **Supervisor (remote side)** → **Mycelium** → **Supervisor (origin side)** → **Actor** | `HSET message:<res_key> …` <br/> `LPUSH msg_out message:<res_key>` (steps 2‑3 repeat in reverse direction) |
|
||||
|
||||
---
|
||||
|
||||
## Tiny end‑to‑end sequence (still simple enough to render)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
sequenceDiagram
|
||||
participant A as Actor
|
||||
participant L as Local‑Redis (Flow ctx)
|
||||
participant C as Supervisor daemon (local)
|
||||
participant M as Mycelium bus
|
||||
participant R as Remote‑Redis (target ctx)
|
||||
participant W as Runner (remote)
|
||||
|
||||
%% 1️⃣ publish
|
||||
A->>L: HSET message:12:101 …
|
||||
A->>L: LPUSH msg_out message:12:101
|
||||
|
||||
%% 2️⃣ coordinate
|
||||
C->>L: RPOP msg_out
|
||||
C->>C: check permissions / add routing info
|
||||
C->>M: push key to Mycelium (msg_out_coordinator)
|
||||
|
||||
%% 3️⃣ transport
|
||||
M->>R: LPUSH msg_in message:12:101
|
||||
|
||||
%% 4️⃣ consume
|
||||
R->>W: RPOP msg_in
|
||||
R->>R: HGETALL message:12:101
|
||||
R->>R: verify signature
|
||||
alt message_type == job
|
||||
R->>R: HSET job:12:2001 …
|
||||
R->>R: LPUSH queue:v job:12:2001
|
||||
end
|
||||
|
||||
%% 5️⃣ runner loop
|
||||
W->>R: BRPOP queue:v (job:12:2001)
|
||||
W->>R: HSET job:12:2001 status started
|
||||
W->>W: execute script
|
||||
W->>R: HSET job:12:2001 result … status finished
|
||||
|
||||
%% 6️⃣ result back
|
||||
W->>R: HSET message:12:900 result …
|
||||
W->>R: LPUSH msg_out message:12:900
|
||||
C->>M: (coordinator on remote side) routes back
|
||||
M->>L: LPUSH msg_in message:12:900
|
||||
A->>L: RPOP msg_in → read result
|
||||
```
|
||||
|
||||
|
||||
## 5️⃣ What the **system** is trying to achieve
|
||||
|
||||
| Goal | How it is realized |
|
||||
|------|--------------------|
|
||||
| **Decentralised execution** | Every *actor* owns a **Context**; any number of **Runners** can be attached to that context, possibly on different machines, and they all talk over the same Mycelium/Redis backend. |
|
||||
| **Fine‑grained permissions** | `Context.admins/readers/executors` enforce who can create, view or run jobs. |
|
||||
| **Loose coupling via messages** | All actions (job submission, result propagation, chat, mail …) use the generic `Message` object; the same transport pipeline handles all of them. |
|
||||
| **Workflow orchestration** | The **Flow** object models a DAG of jobs, tracks collective status and aggregates results, without needing a central scheduler. |
|
||||
| **Pluggable runtimes** | `ScriptType` and `RunnerType` let a runner choose the proper execution environment (V, Python, OSIS, Rust, …) – adding a new language only means adding a new `ScriptType` and a corresponding worker. |
|
||||
| **Observability** | `Log` arrays attached to a `Message` and the timestamps on every hash give a complete audit trail. |
|
||||
| **Resilience** | Jobs are idempotent hash entries; queues are persisted in Redis, and status changes are atomic (`HSET`). Retries and time‑outs guarantee eventual consistency. |
|
||||
|
||||
---
|
||||
|
||||
## 6️⃣ Diagram summary (quick visual cheat‑sheet)
|
||||
|
||||
```mermaid
|
||||
%%{init: {"theme":"dark"}}%%
|
||||
graph TD
|
||||
A[Actor] -->|creates| Ctx[Context]
|
||||
A -->|creates| Flow
|
||||
A -->|creates| Msg
|
||||
A -->|creates| Rnr[Runner]
|
||||
A -->|creates| Job[RunnerJob]
|
||||
|
||||
subgraph Redis["Redis (per Context)"]
|
||||
Ctx --> A
|
||||
Ctx --> Flow
|
||||
Ctx --> Msg
|
||||
Ctx --> Rnr
|
||||
Ctx --> Job
|
||||
end
|
||||
|
||||
Msg -->|push to| OutQ[msg_out]
|
||||
OutQ --> Myc[Mycelium Bus]
|
||||
Myc -->|deliver| InQ[msg_in]
|
||||
InQ --> Rnr
|
||||
Rnr -->|pop from| Qv["queue:v"]
|
||||
Rnr -->|pop from| Qpy["queue:python"]
|
||||
|
||||
Rnr -->|updates| Job
|
||||
Job -->|updates| Flow
|
||||
Flow -->|result Message| Msg
|
||||
```
|
||||
|
||||
473
lib/web/ui/factory.v
Normal file
473
lib/web/ui/factory.v
Normal file
@@ -0,0 +1,473 @@
|
||||
module ui
|
||||
|
||||
import veb
|
||||
import os
|
||||
|
||||
|
||||
// Public Context type for veb
|
||||
pub struct Context {
|
||||
veb.Context
|
||||
}
|
||||
|
||||
// Simple tree menu structure
|
||||
pub struct MenuItem {
|
||||
pub:
|
||||
title string
|
||||
href string
|
||||
children []MenuItem
|
||||
}
|
||||
|
||||
// Factory args
|
||||
@[params]
|
||||
pub struct WebArgs {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
host string = 'localhost'
|
||||
port int = 8080
|
||||
title string = 'Admin'
|
||||
menu []MenuItem
|
||||
open bool
|
||||
}
|
||||
|
||||
// The App holds server state and config
|
||||
pub struct App {
|
||||
veb.StaticHandler
|
||||
pub mut:
|
||||
title string = "default"
|
||||
menu []MenuItem
|
||||
port int = 7711
|
||||
}
|
||||
|
||||
|
||||
// Start the webserver (blocking)
|
||||
pub fn start(args WebArgs) !{
|
||||
mut app := App{
|
||||
title: args.title,
|
||||
menu: args.menu,
|
||||
port: args.port
|
||||
}
|
||||
veb.run[App, Context](mut app, app.port)
|
||||
}
|
||||
|
||||
// Routes
|
||||
|
||||
// Redirect root to /admin
|
||||
@['/'; get]
|
||||
pub fn (app &App) root(mut ctx Context) veb.Result {
|
||||
return ctx.redirect('/admin')
|
||||
}
|
||||
|
||||
// Admin home page
|
||||
@['/admin'; get]
|
||||
pub fn (app &App) admin_index(mut ctx Context) veb.Result {
|
||||
return ctx.html(app.render_admin('/', 'Welcome'))
|
||||
}
|
||||
|
||||
// HeroScript editor page
|
||||
@['/admin/heroscript'; get]
|
||||
pub fn (app &App) admin_heroscript(mut ctx Context) veb.Result {
|
||||
return ctx.html(app.render_heroscript())
|
||||
}
|
||||
|
||||
// Chat page
|
||||
@['/admin/chat'; get]
|
||||
pub fn (app &App) admin_chat(mut ctx Context) veb.Result {
|
||||
return ctx.html(app.render_chat())
|
||||
}
|
||||
|
||||
// Static CSS files
|
||||
@['/static/css/colors.css'; get]
|
||||
pub fn (app &App) serve_colors_css(mut ctx Context) veb.Result {
|
||||
css_path := os.join_path(os.dir(@FILE), 'templates', 'css', 'colors.css')
|
||||
css_content := os.read_file(css_path) or { return ctx.text('/* CSS file not found */') }
|
||||
ctx.set_content_type('text/css')
|
||||
return ctx.text(css_content)
|
||||
}
|
||||
|
||||
@['/static/css/main.css'; get]
|
||||
pub fn (app &App) serve_main_css(mut ctx Context) veb.Result {
|
||||
css_path := os.join_path(os.dir(@FILE), 'templates', 'css', 'main.css')
|
||||
css_content := os.read_file(css_path) or { return ctx.text('/* CSS file not found */') }
|
||||
ctx.set_content_type('text/css')
|
||||
return ctx.text(css_content)
|
||||
}
|
||||
|
||||
// Static JS files
|
||||
@['/static/js/theme.js'; get]
|
||||
pub fn (app &App) serve_theme_js(mut ctx Context) veb.Result {
|
||||
js_path := os.join_path(os.dir(@FILE), 'templates', 'js', 'theme.js')
|
||||
js_content := os.read_file(js_path) or { return ctx.text('/* JS file not found */') }
|
||||
ctx.set_content_type('application/javascript')
|
||||
return ctx.text(js_content)
|
||||
}
|
||||
|
||||
@['/static/js/heroscript.js'; get]
|
||||
pub fn (app &App) serve_heroscript_js(mut ctx Context) veb.Result {
|
||||
js_path := os.join_path(os.dir(@FILE), 'templates', 'js', 'heroscript.js')
|
||||
js_content := os.read_file(js_path) or { return ctx.text('/* JS file not found */') }
|
||||
ctx.set_content_type('application/javascript')
|
||||
return ctx.text(js_content)
|
||||
}
|
||||
|
||||
@['/static/js/chat.js'; get]
|
||||
pub fn (app &App) serve_chat_js(mut ctx Context) veb.Result {
|
||||
js_path := os.join_path(os.dir(@FILE), 'templates', 'js', 'chat.js')
|
||||
js_content := os.read_file(js_path) or { return ctx.text('/* JS file not found */') }
|
||||
ctx.set_content_type('application/javascript')
|
||||
return ctx.text(js_content)
|
||||
}
|
||||
|
||||
@['/static/css/heroscript.css'; get]
|
||||
pub fn (app &App) serve_heroscript_css(mut ctx Context) veb.Result {
|
||||
css_path := os.join_path(os.dir(@FILE), 'templates', 'css', 'heroscript.css')
|
||||
css_content := os.read_file(css_path) or { return ctx.text('/* CSS file not found */') }
|
||||
ctx.set_content_type('text/css')
|
||||
return ctx.text(css_content)
|
||||
}
|
||||
|
||||
@['/static/css/chat.css'; get]
|
||||
pub fn (app &App) serve_chat_css(mut ctx Context) veb.Result {
|
||||
css_path := os.join_path(os.dir(@FILE), 'templates', 'css', 'chat.css')
|
||||
css_content := os.read_file(css_path) or { return ctx.text('/* CSS file not found */') }
|
||||
ctx.set_content_type('text/css')
|
||||
return ctx.text(css_content)
|
||||
}
|
||||
|
||||
// Catch-all content under /admin/*
|
||||
@['/admin/:path...'; get]
|
||||
pub fn (app &App) admin_section(mut ctx Context, path string) veb.Result {
|
||||
// Render current path in the main content
|
||||
return ctx.html(app.render_admin(path, 'Content'))
|
||||
}
|
||||
|
||||
// View rendering using external template
|
||||
|
||||
fn (app &App) render_admin(path string, heading string) string {
|
||||
// Get the template file path relative to the module
|
||||
template_path := os.join_path(os.dir(@FILE), 'templates', 'admin_layout.html')
|
||||
|
||||
// Read the template file
|
||||
template_content := os.read_file(template_path) or {
|
||||
// Fallback to inline template if file not found
|
||||
return app.render_admin_fallback(path, heading)
|
||||
}
|
||||
|
||||
// Generate menu HTML
|
||||
menu_content := menu_html(app.menu, 0, 'm')
|
||||
|
||||
// Simple template variable replacement
|
||||
mut result := template_content
|
||||
result = result.replace('{{.title}}', app.title)
|
||||
result = result.replace('{{.heading}}', heading)
|
||||
result = result.replace('{{.path}}', path)
|
||||
result = result.replace('{{.menu_html}}', menu_content)
|
||||
result = result.replace('{{.css_colors_url}}', '/static/css/colors.css')
|
||||
result = result.replace('{{.css_main_url}}', '/static/css/main.css')
|
||||
result = result.replace('{{.js_theme_url}}', '/static/js/theme.js')
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// HeroScript editor rendering using external template
|
||||
fn (app &App) render_heroscript() string {
|
||||
// Get the template file path relative to the module
|
||||
template_path := os.join_path(os.dir(@FILE), 'templates', 'heroscript_editor.html')
|
||||
|
||||
// Read the template file
|
||||
template_content := os.read_file(template_path) or {
|
||||
// Fallback to basic template if file not found
|
||||
return app.render_heroscript_fallback()
|
||||
}
|
||||
|
||||
// Generate menu HTML
|
||||
menu_content := menu_html(app.menu, 0, 'm')
|
||||
|
||||
// Simple template variable replacement
|
||||
mut result := template_content
|
||||
result = result.replace('{{.title}}', app.title)
|
||||
result = result.replace('{{.menu_html}}', menu_content)
|
||||
result = result.replace('{{.css_colors_url}}', '/static/css/colors.css')
|
||||
result = result.replace('{{.css_main_url}}', '/static/css/main.css')
|
||||
result = result.replace('{{.css_heroscript_url}}', '/static/css/heroscript.css')
|
||||
result = result.replace('{{.js_theme_url}}', '/static/js/theme.js')
|
||||
result = result.replace('{{.js_heroscript_url}}', '/static/js/heroscript.js')
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Chat rendering using external template
|
||||
fn (app &App) render_chat() string {
|
||||
// Get the template file path relative to the module
|
||||
template_path := os.join_path(os.dir(@FILE), 'templates', 'chat.html')
|
||||
|
||||
// Read the template file
|
||||
template_content := os.read_file(template_path) or {
|
||||
// Fallback to basic template if file not found
|
||||
return app.render_chat_fallback()
|
||||
}
|
||||
|
||||
// Generate menu HTML
|
||||
menu_content := menu_html(app.menu, 0, 'm')
|
||||
|
||||
// Simple template variable replacement
|
||||
mut result := template_content
|
||||
result = result.replace('{{.title}}', app.title)
|
||||
result = result.replace('{{.menu_html}}', menu_content)
|
||||
result = result.replace('{{.css_colors_url}}', '/static/css/colors.css')
|
||||
result = result.replace('{{.css_main_url}}', '/static/css/main.css')
|
||||
result = result.replace('{{.css_chat_url}}', '/static/css/chat.css')
|
||||
result = result.replace('{{.js_theme_url}}', '/static/js/theme.js')
|
||||
result = result.replace('{{.js_chat_url}}', '/static/js/chat.js')
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Fallback HeroScript rendering method
|
||||
fn (app &App) render_heroscript_fallback() string {
|
||||
return '
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>${app.title} - HeroScript Editor</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||
</head>
|
||||
<body>
|
||||
<div class="container mt-5">
|
||||
<h1>HeroScript Editor</h1>
|
||||
<p>HeroScript editor template not found. Please check the template files.</p>
|
||||
<a href="/admin" class="btn btn-primary">Back to Admin</a>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
'
|
||||
}
|
||||
|
||||
// Fallback Chat rendering method
|
||||
fn (app &App) render_chat_fallback() string {
|
||||
return '
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>${app.title} - Chat</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||
</head>
|
||||
<body>
|
||||
<div class="container mt-5">
|
||||
<h1>Chat Assistant</h1>
|
||||
<p>Chat template not found. Please check the template files.</p>
|
||||
<a href="/admin" class="btn btn-primary">Back to Admin</a>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
'
|
||||
}
|
||||
|
||||
// Fallback rendering method (inline template)
|
||||
fn (app &App) render_admin_fallback(path string, heading string) string {
|
||||
return '
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>${app.title}</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
|
||||
<style>
|
||||
body { padding-top: 44px; }
|
||||
.header {
|
||||
height: 44px;
|
||||
line-height: 44px;
|
||||
font-size: 14px;
|
||||
}
|
||||
.sidebar {
|
||||
position: fixed;
|
||||
top: 44px;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
width: 260px;
|
||||
overflow-y: auto;
|
||||
background: #f8f9fa;
|
||||
border-right: 1px solid #e0e0e0;
|
||||
}
|
||||
.main {
|
||||
margin-left: 260px;
|
||||
padding: 16px;
|
||||
}
|
||||
.list-group-item {
|
||||
border: 0;
|
||||
padding: .35rem .75rem;
|
||||
background: transparent;
|
||||
}
|
||||
.menu-leaf a {
|
||||
color: #212529;
|
||||
text-decoration: none;
|
||||
}
|
||||
.menu-toggle {
|
||||
text-decoration: none;
|
||||
color: #212529;
|
||||
}
|
||||
.menu-toggle .chev {
|
||||
font-size: 10px;
|
||||
opacity: .6;
|
||||
}
|
||||
.menu-section {
|
||||
font-weight: 600;
|
||||
color: #6c757d;
|
||||
padding: .5rem .75rem;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<nav class="navbar navbar-dark bg-dark fixed-top header px-2">
|
||||
<div class="d-flex w-100 align-items-center justify-content-between">
|
||||
<div class="text-white fw-bold">${app.title}</div>
|
||||
<div class="text-white-50">Admin</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<aside class="sidebar">
|
||||
<div class="p-2">
|
||||
<div class="menu-section">Navigation</div>
|
||||
<div class="list-group list-group-flush">
|
||||
${menu_html(app.menu,
|
||||
0, 'm')}
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<main class="main">
|
||||
<div class="container-fluid">
|
||||
<div class="d-flex align-items-center mb-3">
|
||||
<h5 class="mb-0">${heading}</h5>
|
||||
<span class="ms-2 text-muted small">/admin/${path}</span>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-body">
|
||||
<p class="text-muted">This is a placeholder admin content area for: <code>/admin/${path}</code>.</p>
|
||||
<p class="mb-0">Use the treeview on the left to navigate.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-MrcW6ZMFYlzcLA8Nl+NtUVF0sA7MsXsP1UyJoMp4YLEuNSfAP+JcXn/tWtIaxVXM" crossorigin="anonymous"></script>
|
||||
</body>
|
||||
</html>
|
||||
'
|
||||
}
|
||||
|
||||
// Recursive menu renderer
|
||||
|
||||
fn menu_html(items []MenuItem, depth int, prefix string) string {
|
||||
mut out := []string{}
|
||||
for i, it in items {
|
||||
id := '${prefix}_${depth}_${i}'
|
||||
if it.children.len > 0 {
|
||||
// expandable group
|
||||
out << '<div class="list-group-item">'
|
||||
out << '<a class="menu-toggle d-flex align-items-center justify-content-between" data-bs-toggle="collapse" href="#${id}" role="button" aria-expanded="${if depth == 0 {
|
||||
'true'
|
||||
} else {
|
||||
'false'
|
||||
}}" aria-controls="${id}">'
|
||||
out << '<span>${it.title}</span><span class="chev">›</span>'
|
||||
out << '</a>'
|
||||
out << '<div class="collapse ${if depth == 0 { 'show' } else { '' }}" id="${id}">'
|
||||
out << '<div class="ms-2 mt-1">'
|
||||
out << menu_html(it.children, depth + 1, id)
|
||||
out << '</div>'
|
||||
out << '</div>'
|
||||
out << '</div>'
|
||||
} else {
|
||||
// leaf
|
||||
out << '<div class="list-group-item menu-leaf"><a href="${if it.href.len > 0 {
|
||||
it.href
|
||||
} else {
|
||||
'/admin'
|
||||
}}">${it.title}</a></div>'
|
||||
}
|
||||
}
|
||||
return out.join('\n')
|
||||
}
|
||||
|
||||
// Default sample menu
|
||||
fn default_menu() []MenuItem {
|
||||
return [
|
||||
MenuItem{
|
||||
title: 'Dashboard'
|
||||
href: '/admin'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'HeroScript'
|
||||
href: '/admin/heroscript'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Chat'
|
||||
href: '/admin/chat'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Users'
|
||||
children: [
|
||||
MenuItem{
|
||||
title: 'Overview'
|
||||
href: '/admin/users/overview'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Create'
|
||||
href: '/admin/users/create'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Roles'
|
||||
href: '/admin/users/roles'
|
||||
},
|
||||
]
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Content'
|
||||
children: [
|
||||
MenuItem{
|
||||
title: 'Pages'
|
||||
href: '/admin/content/pages'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Media'
|
||||
href: '/admin/content/media'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Settings'
|
||||
children: [
|
||||
MenuItem{
|
||||
title: 'SEO'
|
||||
href: '/admin/content/settings/seo'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Themes'
|
||||
href: '/admin/content/settings/themes'
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
},
|
||||
MenuItem{
|
||||
title: 'System'
|
||||
children: [
|
||||
MenuItem{
|
||||
title: 'Status'
|
||||
href: '/admin/system/status'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Logs'
|
||||
href: '/admin/system/logs'
|
||||
},
|
||||
MenuItem{
|
||||
title: 'Backups'
|
||||
href: '/admin/system/backups'
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -163,7 +163,6 @@ tests := '
|
||||
lib/data
|
||||
lib/osal
|
||||
lib/lang
|
||||
lib/code
|
||||
lib/clients
|
||||
lib/core
|
||||
lib/develop
|
||||
@@ -185,6 +184,11 @@ clients/livekit
|
||||
core/playcmds
|
||||
doctree/
|
||||
jina/
|
||||
params_reflection_test.v
|
||||
python/
|
||||
rust_test.v
|
||||
rclone/
|
||||
qdrant/
|
||||
'
|
||||
|
||||
if in_github_actions() {
|
||||
@@ -209,6 +213,8 @@ timetools_test.v
|
||||
code/codeparser
|
||||
gittools_test.v
|
||||
link_def_test.v
|
||||
python_test.v
|
||||
ipaddress_test.v
|
||||
'
|
||||
|
||||
// Split tests into array and remove empty lines
|
||||
|
||||
Reference in New Issue
Block a user