initial commit

This commit is contained in:
Timur Gordon
2025-07-29 01:15:23 +02:00
commit 7d7ff0f0ab
108 changed files with 24713 additions and 0 deletions

1
core/dispatcher/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

View File

@@ -0,0 +1,25 @@
[package]
name = "hero_dispatcher"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "dispatcher"
path = "cmd/dispatcher.rs"
[dependencies]
clap = { version = "4.4", features = ["derive"] }
env_logger = "0.10"
redis = { version = "0.25.0", features = ["tokio-comp"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "1.6", features = ["v4", "serde"] }
chrono = { version = "0.4", features = ["serde"] }
log = "0.4"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] } # For async main in examples, and general async
colored = "2.0"
hero_job = { path = "../job" }
[dev-dependencies] # For examples later
env_logger = "0.10"
rhai = "1.18.0" # For examples that might need to show engine setup

128
core/dispatcher/README.md Normal file
View File

@@ -0,0 +1,128 @@
# Hero Dispatcher
A Redis-based job dispatcher for managing Rhai/HeroScript execution across distributed workers.
## Overview
The Hero Dispatcher provides a robust job queue system where:
- **Jobs** represent script execution requests (Rhai or HeroScript)
- **Creating a job** stores job parameters in Redis as an hset entry
- **Submitting a job** pushes the job ID to a worker's queue
- **Running a job** creates, submits, and awaits results on a dedicated reply queue
## Key Features
- **Asynchronous Operations**: Built with `tokio` for non-blocking I/O
- **Request-Reply Pattern**: Submit jobs and await results without polling
- **Configurable Jobs**: Set timeouts, retries, concurrency, and logging options
- **Worker Targeting**: Direct job routing to specific worker queues
- **Job Lifecycle**: Create, submit, monitor status, and retrieve results
## Core Components
### `DispatcherBuilder`
Builder for creating `Dispatcher` instances with caller ID, worker ID, context ID, and Redis URL.
### `Dispatcher`
Main interface for job management:
- `new_job()` - Create a new `JobBuilder`
- `create_job()` - Store job in Redis
- `run_job_and_await_result()` - Execute job and wait for completion
- `get_job_status()` - Check job execution status
- `get_job_output()` - Retrieve job results
### `JobBuilder`
Fluent builder for configuring jobs:
- `script()` - Set the script content
- `worker_id()` - Target specific worker
- `timeout()` - Set execution timeout
- `build()` - Create the job
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result
### `Job`
Represents a script execution request with:
- Unique ID and timestamps
- Script content and target worker
- Execution settings (timeout, retries, concurrency)
- Logging configuration
## Redis Schema
Jobs are stored using the `hero:` namespace:
- `hero:job:{job_id}` - Job parameters as Redis hash
- `hero:work_queue:{worker_id}` - Worker-specific job queues
- `hero:reply:{job_id}` - Dedicated reply queues for results
## Prerequisites
- Redis server accessible by dispatcher and workers
## Usage Example
### Basic Job Creation and Submission
```rust
use hero_dispatcher::{DispatcherBuilder, DispatcherError};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create dispatcher
let dispatcher = DispatcherBuilder::new()
.caller_id("my-app")
.worker_id("worker-1")
.context_id("my-context")
.redis_url("redis://127.0.0.1:6379")
.build()?;
// Create a job
let job = dispatcher
.new_job()
.script(r#"print("Hello from worker!"); "success""#)
.timeout(Duration::from_secs(30))
.build()?;
// Store job in Redis
dispatcher.create_job(&job)?;
println!("Job {} created and stored in Redis", job.id);
// Run job and await result (requires worker)
match dispatcher.run_job_and_await_result(&job, "worker-1".to_string()) {
Ok(result) => println!("Job completed: {}", result),
Err(DispatcherError::Timeout(_)) => println!("Job timed out"),
Err(e) => println!("Job failed: {}", e),
}
Ok(())
}
```
### Job Status Monitoring
```rust
// Check job status
match dispatcher.get_job_status(&job.id) {
Ok(status) => println!("Job status: {:?}", status),
Err(e) => println!("Error getting status: {}", e),
}
// Get job output
match dispatcher.get_job_output(&job.id) {
Ok(output) => println!("Job output: {:?}", output),
Err(e) => println!("Error getting output: {}", e),
}
```
## Examples
Run the comprehensive demo to see dispatcher functionality and Redis entries:
```bash
cargo run --example dispatcher_demo
```
Other examples:
- `timeout_example.rs` - Demonstrates timeout handling
Ensure Redis is running at `redis://127.0.0.1:6379`.

View File

@@ -0,0 +1,157 @@
# Rhai Client Binary
A command-line client for executing Rhai scripts on remote workers via Redis.
## Binary: `client`
### Installation
Build the binary:
```bash
cargo build --bin client --release
```
### Usage
```bash
# Basic usage - requires caller and circle keys
client --caller-key <CALLER_KEY> --circle-key <CIRCLE_KEY>
# Execute inline script
client -c <CALLER_KEY> -k <CIRCLE_KEY> --script "print('Hello World!')"
# Execute script from file
client -c <CALLER_KEY> -k <CIRCLE_KEY> --file script.rhai
# Use specific worker (defaults to circle key)
client -c <CALLER_KEY> -k <CIRCLE_KEY> -w <WORKER_KEY> --script "2 + 2"
# Custom Redis and timeout
client -c <CALLER_KEY> -k <CIRCLE_KEY> --redis-url redis://localhost:6379/1 --timeout 60
# Remove timestamps from logs
client -c <CALLER_KEY> -k <CIRCLE_KEY> --no-timestamp
# Increase verbosity
client -c <CALLER_KEY> -k <CIRCLE_KEY> -v --script "debug_info()"
```
### Command-Line Options
| Option | Short | Default | Description |
|--------|-------|---------|-------------|
| `--caller-key` | `-c` | **Required** | Caller public key (your identity) |
| `--circle-key` | `-k` | **Required** | Circle public key (execution context) |
| `--worker-key` | `-w` | `circle-key` | Worker public key (target worker) |
| `--redis-url` | `-r` | `redis://localhost:6379` | Redis connection URL |
| `--script` | `-s` | | Rhai script to execute |
| `--file` | `-f` | | Path to Rhai script file |
| `--timeout` | `-t` | `30` | Timeout for script execution (seconds) |
| `--no-timestamp` | | `false` | Remove timestamps from log output |
| `--verbose` | `-v` | | Increase verbosity (stackable) |
### Execution Modes
#### Inline Script Execution
```bash
# Execute a simple calculation
client -c caller_123 -k circle_456 -s "let result = 2 + 2; print(result);"
# Execute with specific worker
client -c caller_123 -k circle_456 -w worker_789 -s "get_user_data()"
```
#### Script File Execution
```bash
# Execute script from file
client -c caller_123 -k circle_456 -f examples/data_processing.rhai
# Execute with custom timeout
client -c caller_123 -k circle_456 -f long_running_script.rhai -t 120
```
#### Interactive Mode
```bash
# Enter interactive REPL mode (when no script or file provided)
client -c caller_123 -k circle_456
# Interactive mode with verbose logging
client -c caller_123 -k circle_456 -v --no-timestamp
```
### Interactive Mode
When no script (`-s`) or file (`-f`) is provided, the client enters interactive mode:
```
🔗 Starting Rhai Client
📋 Configuration:
Caller Key: caller_123
Circle Key: circle_456
Worker Key: circle_456
Redis URL: redis://localhost:6379
Timeout: 30s
✅ Connected to Redis at redis://localhost:6379
🎮 Entering interactive mode
Type Rhai scripts and press Enter to execute. Type 'exit' or 'quit' to close.
rhai> let x = 42; print(x);
Status: completed
Output: 42
rhai> exit
👋 Goodbye!
```
### Configuration Examples
#### Development Usage
```bash
# Simple development client
client -c dev_user -k dev_circle
# Development with clean logs
client -c dev_user -k dev_circle --no-timestamp -v
```
#### Production Usage
```bash
# Production client with specific worker
client \
--caller-key prod_user_123 \
--circle-key prod_circle_456 \
--worker-key prod_worker_789 \
--redis-url redis://redis-cluster:6379/0 \
--timeout 300 \
--file production_script.rhai
```
#### Batch Processing
```bash
# Process multiple scripts
for script in scripts/*.rhai; do
client -c batch_user -k batch_circle -f "$script" --no-timestamp
done
```
### Key Concepts
- **Caller Key**: Your identity - used for authentication and tracking
- **Circle Key**: Execution context - defines the environment/permissions
- **Worker Key**: Target worker - which worker should execute the script (defaults to circle key)
### Error Handling
The client provides clear error messages for:
- Missing required keys
- Redis connection failures
- Script execution timeouts
- Worker unavailability
- Script syntax errors
### Dependencies
- `rhai_dispatcher`: Core client library for Redis-based script execution
- `redis`: Redis client for task queue communication
- `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure
- `tokio`: Async runtime

View File

@@ -0,0 +1,271 @@
use clap::Parser;
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType};
use log::{error, info};
use colored::Colorize;
use std::io::{self, Write};
use std::time::Duration;
#[derive(Parser, Debug)]
#[command(author, version, about = "Rhai Client - Script execution client", long_about = None)]
struct Args {
/// Caller ID (your identity)
#[arg(short = 'c', long = "caller-id", help = "Caller ID (your identity)")]
caller_id: String,
/// Context ID (execution context)
#[arg(short = 'k', long = "context-id", help = "Context ID (execution context)")]
context_id: String,
/// Script type to execute (heroscript, rhai-sal, rhai-dsl)
#[arg(short = 'T', long = "script-type", default_value = "heroscript", help = "Script type: heroscript, rhai-sal, or rhai-dsl")]
script_type: String,
/// HeroScript workers (comma-separated)
#[arg(long = "hero-workers", default_value = "hero-worker-1", help = "HeroScript worker IDs (comma-separated)")]
hero_workers: String,
/// Rhai SAL workers (comma-separated)
#[arg(long = "rhai-sal-workers", default_value = "rhai-sal-worker-1", help = "Rhai SAL worker IDs (comma-separated)")]
rhai_sal_workers: String,
/// Rhai DSL workers (comma-separated)
#[arg(long = "rhai-dsl-workers", default_value = "rhai-dsl-worker-1", help = "Rhai DSL worker IDs (comma-separated)")]
rhai_dsl_workers: String,
/// Redis URL
#[arg(short, long, default_value = "redis://localhost:6379", help = "Redis connection URL")]
redis_url: String,
/// Rhai script to execute
#[arg(short, long, help = "Rhai script to execute")]
script: Option<String>,
/// Path to Rhai script file
#[arg(short, long, help = "Path to Rhai script file")]
file: Option<String>,
/// Timeout for script execution (in seconds)
#[arg(short, long, default_value = "30", help = "Timeout for script execution in seconds")]
timeout: u64,
/// Increase verbosity (can be used multiple times)
#[arg(short, long, action = clap::ArgAction::Count, help = "Increase verbosity (-v for debug, -vv for trace)")]
verbose: u8,
/// Disable timestamps in log output
#[arg(long, help = "Remove timestamps from log output")]
no_timestamp: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
// Configure logging based on verbosity level
let log_config = match args.verbose {
0 => "warn,hero_dispatcher=warn",
1 => "info,hero_dispatcher=info",
2 => "debug,hero_dispatcher=debug",
_ => "trace,hero_dispatcher=trace",
};
std::env::set_var("RUST_LOG", log_config);
// Configure env_logger with or without timestamps
if args.no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
// Parse worker lists
let hero_workers: Vec<String> = args.hero_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_sal_workers: Vec<String> = args.rhai_sal_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
let rhai_dsl_workers: Vec<String> = args.rhai_dsl_workers.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect();
// Validate that at least one worker is provided for the selected script type
match args.script_type.to_lowercase().as_str() {
"heroscript" => {
if hero_workers.is_empty() {
error!("❌ No HeroScript workers provided. Use --hero-workers to specify at least one worker.");
return Err("At least one HeroScript worker must be provided".into());
}
}
"rhai-sal" => {
if rhai_sal_workers.is_empty() {
error!("❌ No Rhai SAL workers provided. Use --rhai-sal-workers to specify at least one worker.");
return Err("At least one Rhai SAL worker must be provided".into());
}
}
"rhai-dsl" => {
if rhai_dsl_workers.is_empty() {
error!("❌ No Rhai DSL workers provided. Use --rhai-dsl-workers to specify at least one worker.");
return Err("At least one Rhai DSL worker must be provided".into());
}
}
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", args.script_type);
return Err(format!("Invalid script type: {}", args.script_type).into());
}
}
if args.verbose > 0 {
info!("🔗 Starting Hero Dispatcher");
info!("📋 Configuration:");
info!(" Caller ID: {}", args.caller_id);
info!(" Context ID: {}", args.context_id);
info!(" Script Type: {}", args.script_type);
info!(" HeroScript Workers: {:?}", hero_workers);
info!(" Rhai SAL Workers: {:?}", rhai_sal_workers);
info!(" Rhai DSL Workers: {:?}", rhai_dsl_workers);
info!(" Redis URL: {}", args.redis_url);
info!(" Timeout: {}s", args.timeout);
info!("");
}
// Create the dispatcher client
let client = DispatcherBuilder::new()
.caller_id(&args.caller_id)
.context_id(&args.context_id)
.heroscript_workers(hero_workers)
.rhai_sal_workers(rhai_sal_workers)
.rhai_dsl_workers(rhai_dsl_workers)
.redis_url(&args.redis_url)
.build()?;
if args.verbose > 0 {
info!("✅ Connected to Redis at {}", args.redis_url);
}
// Determine execution mode
if let Some(script_content) = args.script {
// Execute inline script
if args.verbose > 0 {
info!("📜 Executing inline script");
}
execute_script(&client, script_content, &args.script_type, args.timeout).await?;
} else if let Some(file_path) = args.file {
// Execute script from file
if args.verbose > 0 {
info!("📁 Loading script from file: {}", file_path);
}
let script_content = std::fs::read_to_string(&file_path)
.map_err(|e| format!("Failed to read script file '{}': {}", file_path, e))?;
execute_script(&client, script_content, &args.script_type, args.timeout).await?;
} else {
// Interactive mode
info!("🎮 Entering interactive mode");
info!("Type Rhai scripts and press Enter to execute. Type 'exit' or 'quit' to close.");
run_interactive_mode(&client, &args.script_type, args.timeout, args.verbose).await?;
}
Ok(())
}
async fn execute_script(
client: &Dispatcher,
script: String,
script_type_str: &str,
timeout_secs: u64,
) -> Result<(), Box<dyn std::error::Error>> {
info!("⚡ Executing script: {:.50}...", script);
// Parse script type
let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript,
"rhai-sal" => ScriptType::RhaiSAL,
"rhai-dsl" => ScriptType::RhaiDSL,
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into());
}
};
let timeout = Duration::from_secs(timeout_secs);
match client
.new_job()
.script_type(script_type)
.script(&script)
.timeout(timeout)
.await_response()
.await
{
Ok(result) => {
info!("✅ Script execution completed");
println!("{}", "Result:".green().bold());
println!("{}", result);
}
Err(e) => {
error!("❌ Script execution failed: {}", e);
return Err(Box::new(e));
}
}
Ok(())
}
async fn run_interactive_mode(
client: &Dispatcher,
script_type_str: &str,
timeout_secs: u64,
verbose: u8,
) -> Result<(), Box<dyn std::error::Error>> {
// Parse script type
let script_type = match script_type_str.to_lowercase().as_str() {
"heroscript" => ScriptType::HeroScript,
"rhai-sal" => ScriptType::RhaiSAL,
"rhai-dsl" => ScriptType::RhaiDSL,
_ => {
error!("❌ Invalid script type: {}. Valid types: heroscript, rhai-sal, rhai-dsl", script_type_str);
return Err(format!("Invalid script type: {}", script_type_str).into());
}
};
let timeout = Duration::from_secs(timeout_secs);
loop {
print!("rhai> ");
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();
if input.is_empty() {
continue;
}
if input == "exit" || input == "quit" {
info!("👋 Goodbye!");
break;
}
if verbose > 0 {
info!("⚡ Executing: {}", input);
}
match client
.new_job()
.script_type(script_type.clone())
.script(input)
.timeout(timeout)
.await_response()
.await
{
Ok(result) => {
println!("{}", result.green());
}
Err(e) => {
println!("{}", format!("error: {}", e).red());
}
}
println!(); // Add blank line for readability
}
Ok(())
}

View File

@@ -0,0 +1,190 @@
# Architecture of the `rhai_dispatcher` Crate
The `rhai_dispatcher` crate provides a Redis-based client library for submitting Rhai scripts to distributed worker services and awaiting their execution results. It implements a request-reply pattern using Redis as the message broker.
## Core Architecture
The client follows a builder pattern design with clear separation of concerns:
```mermaid
graph TD
A[RhaiDispatcherBuilder] --> B[RhaiDispatcher]
B --> C[PlayRequestBuilder]
C --> D[PlayRequest]
D --> E[Redis Task Queue]
E --> F[Worker Service]
F --> G[Redis Reply Queue]
G --> H[Client Response]
subgraph "Client Components"
A
B
C
D
end
subgraph "Redis Infrastructure"
E
G
end
subgraph "External Services"
F
end
```
## Key Components
### 1. RhaiDispatcherBuilder
A builder pattern implementation for constructing `RhaiDispatcher` instances with proper configuration validation.
**Responsibilities:**
- Configure Redis connection URL
- Set caller ID for task attribution
- Validate configuration before building client
**Key Methods:**
- `caller_id(id: &str)` - Sets the caller identifier
- `redis_url(url: &str)` - Configures Redis connection
- `build()` - Creates the final `RhaiDispatcher` instance
### 2. RhaiDispatcher
The main client interface that manages Redis connections and provides factory methods for creating play requests.
**Responsibilities:**
- Maintain Redis connection pool
- Provide factory methods for request builders
- Handle low-level Redis operations
- Manage task status queries
**Key Methods:**
- `new_play_request()` - Creates a new `PlayRequestBuilder`
- `get_task_status(task_id)` - Queries task status from Redis
- Internal methods for Redis operations
### 3. PlayRequestBuilder
A fluent builder for constructing and submitting script execution requests.
**Responsibilities:**
- Configure script execution parameters
- Handle script loading from files or strings
- Manage request timeouts
- Provide submission methods (fire-and-forget vs await-response)
**Key Methods:**
- `worker_id(id: &str)` - Target worker queue (determines which worker processes the task)
- `context_id(id: &str)` - Target context ID (determines execution context/circle)
- `script(content: &str)` - Set script content directly
- `script_path(path: &str)` - Load script from file
- `timeout(duration: Duration)` - Set execution timeout
- `submit()` - Fire-and-forget submission
- `await_response()` - Submit and wait for result
**Architecture Note:** The decoupling of `worker_id` and `context_id` allows a single worker to process tasks for multiple contexts (circles), providing greater deployment flexibility.
### 4. Data Structures
#### RhaiTaskDetails
Represents the complete state of a task throughout its lifecycle.
```rust
pub struct RhaiTaskDetails {
pub task_id: String,
pub script: String,
pub status: String, // "pending", "processing", "completed", "error"
pub output: Option<String>,
pub error: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub caller_id: String,
}
```
#### RhaiDispatcherError
Comprehensive error handling for various failure scenarios:
- `RedisError` - Redis connection/operation failures
- `SerializationError` - JSON serialization/deserialization issues
- `Timeout` - Task execution timeouts
- `TaskNotFound` - Missing tasks after submission
## Communication Protocol
### Task Submission Flow
1. **Task Creation**: Client generates unique UUID for task identification
2. **Task Storage**: Task details stored in Redis hash: `rhailib:<task_id>`
3. **Queue Submission**: Task ID pushed to worker queue: `rhailib:<worker_id>`
4. **Reply Queue Setup**: Client listens on: `rhailib:reply:<task_id>`
### Redis Key Patterns
- **Task Storage**: `rhailib:<task_id>` (Redis Hash)
- **Worker Queues**: `rhailib:<worker_id>` (Redis List)
- **Reply Queues**: `rhailib:reply:<task_id>` (Redis List)
### Message Flow Diagram
```mermaid
sequenceDiagram
participant C as Client
participant R as Redis
participant W as Worker
C->>R: HSET rhailib:task_id (task details)
C->>R: LPUSH rhailib:worker_id task_id
C->>R: BLPOP rhailib:reply:task_id (blocking)
W->>R: BRPOP rhailib:worker_id (blocking)
W->>W: Execute Rhai Script
W->>R: LPUSH rhailib:reply:task_id (result)
R->>C: Return result from BLPOP
C->>R: DEL rhailib:reply:task_id (cleanup)
```
## Concurrency and Async Design
The client is built on `tokio` for asynchronous operations:
- **Connection Pooling**: Uses Redis multiplexed connections for efficiency
- **Non-blocking Operations**: All Redis operations are async
- **Timeout Handling**: Configurable timeouts with proper cleanup
- **Error Propagation**: Comprehensive error handling with context
## Configuration and Deployment
### Prerequisites
- Redis server accessible to both client and workers
- Proper network connectivity between components
- Sufficient Redis memory for task storage
### Configuration Options
- **Redis URL**: Connection string for Redis instance
- **Caller ID**: Unique identifier for client instance
- **Timeouts**: Per-request timeout configuration
- **Worker Targeting**: Direct worker queue addressing
## Security Considerations
- **Task Isolation**: Each task uses unique identifiers
- **Queue Separation**: Worker-specific queues prevent cross-contamination
- **Cleanup**: Automatic cleanup of reply queues after completion
- **Error Handling**: Secure error propagation without sensitive data leakage
## Performance Characteristics
- **Scalability**: Horizontal scaling through multiple worker instances
- **Throughput**: Limited by Redis performance and network latency
- **Memory Usage**: Efficient with connection pooling and cleanup
- **Latency**: Low latency for local Redis deployments
## Integration Points
The client integrates with:
- **Worker Services**: Via Redis queue protocol
- **Monitoring Systems**: Through structured logging
- **Application Code**: Via builder pattern API
- **Configuration Systems**: Through environment variables and builders

View File

@@ -0,0 +1,272 @@
# Hero Dispatcher Protocol
This document describes the Redis-based protocol used by the Hero Dispatcher for job management and worker communication.
## Overview
The Hero Dispatcher uses Redis as a message broker and data store for managing distributed job execution. Jobs are stored as Redis hashes, and communication with workers happens through Redis lists (queues).
## Redis Namespace
All dispatcher-related keys use the `hero:` namespace prefix to avoid conflicts with other Redis usage.
## Data Structures
### Job Storage
Jobs are stored as Redis hashes with the following key pattern:
```
hero:job:{job_id}
```
**Job Hash Fields:**
- `id`: Unique job identifier (UUID v4)
- `caller_id`: Identifier of the client that created the job
- `worker_id`: Target worker identifier
- `context_id`: Execution context identifier
- `script`: Script content to execute (Rhai or HeroScript)
- `timeout`: Execution timeout in seconds
- `retries`: Number of retry attempts
- `concurrent`: Whether to execute in separate thread (true/false)
- `log_path`: Optional path to log file for job output
- `created_at`: Job creation timestamp (ISO 8601)
- `updated_at`: Job last update timestamp (ISO 8601)
- `status`: Current job status (dispatched/started/error/finished)
- `env_vars`: Environment variables as JSON object (optional)
- `prerequisites`: JSON array of job IDs that must complete before this job (optional)
- `dependents`: JSON array of job IDs that depend on this job completing (optional)
- `output`: Job execution result (set by worker)
- `error`: Error message if job failed (set by worker)
- `dependencies`: List of job IDs that this job depends on
### Job Dependencies
Jobs can have dependencies on other jobs, which are stored in the `dependencies` field. A job will not be dispatched until all its dependencies have completed successfully.
### Work Queues
Jobs are queued for execution using Redis lists:
```
hero:work_queue:{worker_id}
```
Workers listen on their specific queue using `BLPOP` for job IDs to process.
### Stop Queues
Job stop requests are sent through dedicated stop queues:
```
hero:stop_queue:{worker_id}
```
Workers monitor these queues to receive stop requests for running jobs.
### Reply Queues
For synchronous job execution, dedicated reply queues are used:
```
hero:reply:{job_id}
```
Workers send results to these queues when jobs complete.
## Job Lifecycle
### 1. Job Creation
```
Client -> Redis: HSET hero:job:{job_id} {job_fields}
```
### 2. Job Submission
```
Client -> Redis: LPUSH hero:work_queue:{worker_id} {job_id}
```
### 3. Job Processing
```
Worker -> Redis: BLPOP hero:work_queue:{worker_id}
Worker -> Redis: HSET hero:job:{job_id} status "started"
Worker: Execute script
Worker -> Redis: HSET hero:job:{job_id} status "finished" output "{result}"
```
### 4. Job Completion (Async)
```
Worker -> Redis: LPUSH hero:reply:{job_id} {result}
```
## API Operations
### List Jobs
```rust
dispatcher.list_jobs() -> Vec<String>
```
**Redis Operations:**
- `KEYS hero:job:*` - Get all job keys
- Extract job IDs from key names
### Stop Job
```rust
dispatcher.stop_job(job_id) -> Result<(), DispatcherError>
```
**Redis Operations:**
- `LPUSH hero:stop_queue:{worker_id} {job_id}` - Send stop request
### Get Job Status
```rust
dispatcher.get_job_status(job_id) -> Result<JobStatus, DispatcherError>
```
**Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data
- Parse `status` field
### Get Job Logs
```rust
dispatcher.get_job_logs(job_id) -> Result<Option<String>, DispatcherError>
```
**Redis Operations:**
- `HGETALL hero:job:{job_id}` - Get job data
- Read `log_path` field
- Read log file from filesystem
### Run Job and Await Result
```rust
dispatcher.run_job_and_await_result(job, worker_id) -> Result<String, DispatcherError>
```
**Redis Operations:**
1. `HSET hero:job:{job_id} {job_fields}` - Store job
2. `LPUSH hero:work_queue:{worker_id} {job_id}` - Submit job
3. `BLPOP hero:reply:{job_id} {timeout}` - Wait for result
## Worker Protocol
### Job Processing Loop
```rust
loop {
// 1. Wait for job
job_id = BLPOP hero:work_queue:{worker_id}
// 2. Get job details
job_data = HGETALL hero:job:{job_id}
// 3. Update status
HSET hero:job:{job_id} status "started"
// 4. Check for stop requests
if LLEN hero:stop_queue:{worker_id} > 0 {
stop_job_id = LPOP hero:stop_queue:{worker_id}
if stop_job_id == job_id {
HSET hero:job:{job_id} status "error" error "stopped"
continue
}
}
// 5. Execute script
result = execute_script(job_data.script)
// 6. Update job with result
HSET hero:job:{job_id} status "finished" output result
// 7. Send reply if needed
if reply_queue_exists(hero:reply:{job_id}) {
LPUSH hero:reply:{job_id} result
}
}
```
### Stop Request Handling
Workers should periodically check the stop queue during long-running jobs:
```rust
if LLEN hero:stop_queue:{worker_id} > 0 {
stop_requests = LRANGE hero:stop_queue:{worker_id} 0 -1
if stop_requests.contains(current_job_id) {
// Stop current job execution
HSET hero:job:{current_job_id} status "error" error "stopped_by_request"
// Remove stop request
LREM hero:stop_queue:{worker_id} 1 current_job_id
return
}
}
```
## Error Handling
### Job Timeouts
- Client sets timeout when creating job
- Worker should respect timeout and stop execution
- If timeout exceeded: `HSET hero:job:{job_id} status "error" error "timeout"`
### Worker Failures
- If worker crashes, job remains in "started" status
- Monitoring systems can detect stale jobs and retry
- Jobs can be requeued: `LPUSH hero:work_queue:{worker_id} {job_id}`
### Redis Connection Issues
- Clients should implement retry logic with exponential backoff
- Workers should reconnect and resume processing
- Use Redis persistence to survive Redis restarts
## Monitoring and Observability
### Queue Monitoring
```bash
# Check work queue length
LLEN hero:work_queue:{worker_id}
# Check stop queue length
LLEN hero:stop_queue:{worker_id}
# List all jobs
KEYS hero:job:*
# Get job details
HGETALL hero:job:{job_id}
```
### Metrics to Track
- Jobs created per second
- Jobs completed per second
- Average job execution time
- Queue depths
- Worker availability
- Error rates by job type
## Security Considerations
### Redis Security
- Use Redis AUTH for authentication
- Enable TLS for Redis connections
- Restrict Redis network access
- Use Redis ACLs to limit worker permissions
### Job Security
- Validate script content before execution
- Sandbox script execution environment
- Limit resource usage (CPU, memory, disk)
- Log all job executions for audit
### Log File Security
- Ensure log paths are within allowed directories
- Validate log file permissions
- Rotate and archive logs regularly
- Sanitize sensitive data in logs
## Performance Considerations
### Redis Optimization
- Use Redis pipelining for batch operations
- Configure appropriate Redis memory limits
- Use Redis clustering for high availability
- Monitor Redis memory usage and eviction
### Job Optimization
- Keep job payloads small
- Use efficient serialization formats
- Batch similar jobs when possible
- Implement job prioritization if needed
### Worker Optimization
- Pool worker connections to Redis
- Use async I/O for Redis operations
- Implement graceful shutdown handling
- Monitor worker resource usage

View File

@@ -0,0 +1,559 @@
use hero_dispatcher::{Dispatcher, DispatcherBuilder, ScriptType};
use log::info;
use redis::AsyncCommands;
use std::collections::HashMap;
use std::time::Duration;
use tokio::time::sleep;
/// Comprehensive example demonstrating the Hero Dispatcher functionality.
///
/// This example shows:
/// 1. Creating a dispatcher instance
/// 2. Creating jobs with different configurations
/// 3. Submitting jobs to the queue
/// 4. Inspecting Redis entries created by the dispatcher
/// 5. Running jobs and awaiting results
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
println!("🚀 Hero Dispatcher Demo");
println!("======================\n");
// Create dispatcher client with worker vectors per script type
let dispatcher = DispatcherBuilder::new()
.caller_id("demo-caller")
.context_id("demo-context")
.heroscript_workers(vec!["hero-worker-1".to_string(), "hero-worker-2".to_string()])
.rhai_sal_workers(vec!["rhai-sal-worker-1".to_string()])
.rhai_dsl_workers(vec!["rhai-dsl-worker-1".to_string()])
.redis_url("redis://127.0.0.1/")
.build()?;
println!("✅ Dispatcher created with:");
println!(" - Caller ID: demo-caller");
println!(" - Worker ID: demo-worker");
println!(" - Context ID: demo-context\n");
// Create Redis connection for inspection
let redis_client = redis::Client::open("redis://127.0.0.1:6379")?;
let mut redis_conn = redis_client.get_multiplexed_async_connection().await?;
// Demo 1: Create a simple job
println!("📝 Demo 1: Creating a simple job");
println!("--------------------------------");
let job1 = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("Hello from job 1!");"#)
.timeout(Duration::from_secs(10))
.build()?;
println!("Job 1 created with ID: {}", job1.id);
// Create the job (stores in Redis)
dispatcher.create_job(&job1).await?;
println!("✅ Job 1 stored in Redis");
// Inspect Redis entries for this job
print_job_redis_entries(&mut redis_conn, &job1.id).await?;
println!();
// Demo 2: Create a job with custom settings
println!("📝 Demo 2: Creating a job with custom settings");
println!("----------------------------------------------");
let job2 = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script(r#"
let result = 42 * 2;
print("Calculation result: " + result);
result
"#)
.timeout(Duration::from_secs(30))
.build()?;
println!("Job 2 created with ID: {}", job2.id);
// Create the job
dispatcher.create_job(&job2).await?;
println!("✅ Job 2 stored in Redis");
// Inspect Redis entries
print_job_redis_entries(&mut redis_conn, &job2.id).await?;
println!();
// Demo 3: Environment Variables
println!("📝 Demo 3: Jobs with Environment Variables");
println!("------------------------------------------");
// Create environment variables map
let mut env_vars = HashMap::new();
env_vars.insert("API_KEY".to_string(), "secret-api-key-123".to_string());
env_vars.insert("DEBUG_MODE".to_string(), "true".to_string());
env_vars.insert("MAX_RETRIES".to_string(), "5".to_string());
env_vars.insert("SERVICE_URL".to_string(), "https://api.example.com".to_string());
let job_with_env = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"
print("Environment variables available:");
print("API_KEY: " + env.API_KEY);
print("DEBUG_MODE: " + env.DEBUG_MODE);
print("MAX_RETRIES: " + env.MAX_RETRIES);
print("SERVICE_URL: " + env.SERVICE_URL);
"Environment variables processed successfully"
"#)
.env_vars(env_vars.clone())
.timeout(Duration::from_secs(15))
.build()?;
println!("Job with environment variables created: {}", job_with_env.id);
// Store job in Redis
dispatcher.create_job(&job_with_env).await?;
println!("✅ Job with env vars stored in Redis");
// Show Redis entries including environment variables
print_job_redis_entries(&mut redis_conn, &job_with_env.id).await?;
// Demonstrate individual env var setting
let job_individual_env = dispatcher
.new_job()
.script_type(ScriptType::RhaiSAL)
.script("print('Single env var: ' + env.SINGLE_VAR); 'done'")
.env_var("SINGLE_VAR", "individual-value")
.env_var("ANOTHER_VAR", "another-value")
.build()?;
println!("Job with individual env vars created: {}", job_individual_env.id);
dispatcher.create_job(&job_individual_env).await?;
println!("✅ Job with individual env vars stored in Redis");
print_job_redis_entries(&mut redis_conn, &job_individual_env.id).await?;
println!();
// Demo 4: Create multiple jobs and show queue state
println!("📝 Demo 4: Creating multiple jobs and inspecting queue");
println!("----------------------------------------------------");
let mut job_ids = Vec::new();
for i in 3..=5 {
let script_type = match i {
3 => ScriptType::HeroScript,
4 => ScriptType::RhaiSAL,
5 => ScriptType::RhaiDSL,
_ => ScriptType::HeroScript,
};
let job = dispatcher
.new_job()
.script_type(script_type)
.script(&format!(r#"print("Job {} is running");"#, i))
.timeout(Duration::from_secs(15))
.build()?;
job_ids.push(job.id.clone());
dispatcher.create_job(&job).await?;
println!("✅ Job {} created with ID: {}", i, job.id);
}
// Show all Redis keys related to our jobs
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!();
// Demo 4: Show job status checking
println!("📝 Demo 4: Checking job statuses");
println!("--------------------------------");
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
for job_id in &job_ids {
match dispatcher.get_job_status(job_id).await {
Ok(status) => println!("Job {}: {:?}", job_id, status),
Err(e) => println!("Error getting status for job {}: {}", job_id, e),
}
}
println!();
// Demo 5: Simulate running a job and getting result (if worker is available)
println!("📝 Demo 5: Attempting to run job and await result");
println!("------------------------------------------------");
let simple_job = dispatcher
.new_job()
.script_type(ScriptType::HeroScript)
.script(r#"print("This job will complete quickly"); "success""#)
.timeout(Duration::from_secs(5))
.build()?;
println!("Created job for execution: {}", simple_job.id);
// Try to run the job (this will timeout if no worker is available)
match dispatcher.run_job_and_await_result(&simple_job).await {
Ok(result) => {
println!("✅ Job completed successfully!");
println!("Result: {}", result);
}
Err(e) => {
println!("⚠️ Job execution failed (likely no worker available): {}", e);
println!(" This is expected if no Hero worker is running");
}
}
// Demo 6: List all jobs
println!("📝 Demo 6: Listing all jobs");
println!("-------------------------");
let all_job_ids = match dispatcher.list_jobs().await {
Ok(job_ids) => {
println!("Found {} jobs:", job_ids.len());
for job_id in &job_ids {
println!(" - {}", job_id);
}
job_ids
}
Err(e) => {
println!("Error listing jobs: {}", e);
Vec::new()
}
};
println!();
// Demo 7: Create a job with log path and demonstrate logs functionality
println!("📝 Demo 7: Job with log path and logs retrieval");
println!("-----------------------------------------------");
let log_job = dispatcher
.new_job()
.script(r#"print("This job writes to logs"); "log_test""#)
.log_path("/tmp/hero_job_demo.log")
.timeout(Duration::from_secs(10))
.build()?;
println!("Created job with log path: {}", log_job.id);
dispatcher.create_job(&log_job).await?;
// Try to get logs (will be empty since job hasn't run)
match dispatcher.get_job_logs(&log_job.id).await {
Ok(Some(logs)) => println!("Job logs: {}", logs),
Ok(None) => println!("No logs available for job (expected - job hasn't run or no log file)"),
Err(e) => println!("Error getting logs: {}", e),
}
println!();
// Demo 8: Stop job functionality
println!("📝 Demo 8: Stopping a job");
println!("-------------------------");
if let Some(job_id) = all_job_ids.first() {
println!("Attempting to stop job: {}", job_id);
match dispatcher.stop_job(job_id).await {
Ok(()) => println!("✅ Stop request sent for job {}", job_id),
Err(e) => println!("Error stopping job: {}", e),
}
// Show stop queue
let stop_queue_key = "hero:stop_queue:demo-worker";
let stop_queue_length: i64 = redis_conn.llen(stop_queue_key).await?;
println!("📤 Stop queue length ({}): {}", stop_queue_key, stop_queue_length);
if stop_queue_length > 0 {
let stop_items: Vec<String> = redis_conn.lrange(stop_queue_key, 0, -1).await?;
println!("📋 Stop queue items:");
for (i, item) in stop_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
} else {
println!("No jobs available to stop");
}
println!();
// Demo 9: Final Redis state inspection
println!("📝 Demo 9: Final Redis state");
println!("----------------------------");
print_all_dispatcher_redis_keys(&mut redis_conn).await?;
println!("\n🎉 Dispatcher demo completed!");
println!("💡 New features demonstrated:");
println!(" - list_jobs(): List all job IDs");
println!(" - stop_job(): Send stop request to worker");
println!(" - get_job_logs(): Retrieve job logs from file");
println!(" - log_path(): Configure log file for jobs");
println!("💡 To see job execution in action, start a Hero worker that processes the 'demo-worker' queue");
// Demo 6: Demonstrate new job management features
println!("📝 Demo 6: Job Management - Delete and Clear Operations");
println!("--------------------------------------------------------");
// List all current jobs
match dispatcher.list_jobs().await {
Ok(jobs) => {
println!("Current jobs in system: {:?}", jobs);
if !jobs.is_empty() {
// Delete the first job as an example
let job_to_delete = &jobs[0];
println!("Deleting job: {}", job_to_delete);
match dispatcher.delete_job(job_to_delete).await {
Ok(()) => println!("✅ Job {} deleted successfully", job_to_delete),
Err(e) => println!("❌ Error deleting job {}: {}", job_to_delete, e),
}
// Show updated job list
match dispatcher.list_jobs().await {
Ok(remaining_jobs) => println!("Remaining jobs: {:?}", remaining_jobs),
Err(e) => println!("Error listing jobs: {}", e),
}
}
}
Err(e) => println!("Error listing jobs: {}", e),
}
println!();
// Demonstrate clear all jobs
println!("Clearing all remaining jobs...");
match dispatcher.clear_all_jobs().await {
Ok(count) => println!("✅ Cleared {} jobs from Redis", count),
Err(e) => println!("❌ Error clearing jobs: {}", e),
}
// Verify all jobs are cleared
match dispatcher.list_jobs().await {
Ok(jobs) => {
if jobs.is_empty() {
println!("✅ All jobs successfully cleared from Redis");
} else {
println!("⚠️ Some jobs remain: {:?}", jobs);
}
}
Err(e) => println!("Error verifying job clearance: {}", e),
}
println!();
println!("🎉 Demo completed! The dispatcher now supports:");
println!(" • Script type routing (HeroScript, RhaiSAL, RhaiDSL)");
println!(" • Multiple workers per script type for load balancing");
println!(" • Automatic worker selection based on job script type");
println!(" • Job management: list, delete, and clear operations");
println!(" • Enhanced job logging and monitoring");
Ok(())
}
/// Print Redis entries for a specific job
async fn print_job_redis_entries(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<(), redis::RedisError> {
let job_key = format!("hero:job:{}", job_id);
println!("🔍 Redis entries for job {}:", job_id);
// Check if job hash exists
let exists: bool = conn.exists(&job_key).await?;
if exists {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&job_key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
println!(" 📋 Job data ({}): ", job_key);
for (field, value) in job_data {
println!(" {}: {}", field, value);
}
} else {
println!(" ⚠️ Key {} exists but is not a hash (type: {})", job_key, key_type);
}
} else {
println!(" ❌ No job data found at key: {}", job_key);
}
// Check work queue
let queue_key = "hero:work_queue:demo-worker";
let queue_length: i64 = conn.llen(queue_key).await?;
println!(" 📤 Work queue length ({}): {}", queue_key, queue_length);
if queue_length > 0 {
let queue_items: Vec<String> = conn.lrange(queue_key, 0, -1).await?;
println!(" 📋 Queue items:");
for (i, item) in queue_items.iter().enumerate() {
println!(" {}: {}", i, item);
}
}
Ok(())
}
/// Print all dispatcher-related Redis keys
async fn print_all_dispatcher_redis_keys(
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<(), redis::RedisError> {
println!("🔍 All Hero Dispatcher Redis keys:");
// Get all keys with hero: prefix
let keys: Vec<String> = conn.keys("hero:*").await?;
if keys.is_empty() {
println!(" ❌ No Hero keys found in Redis");
return Ok(());
}
// Group keys by type
let mut job_keys = Vec::new();
let mut queue_keys = Vec::new();
let mut other_keys = Vec::new();
for key in keys {
if key.starts_with("hero:job:") {
job_keys.push(key);
} else if key.contains("queue") {
queue_keys.push(key);
} else {
other_keys.push(key);
}
}
// Print job keys
if !job_keys.is_empty() {
println!(" 📋 Job entries:");
for key in job_keys {
// Check if the key is actually a hash before trying to get all fields
let key_type: String = redis::cmd("TYPE").arg(&key).query_async(conn).await?;
if key_type == "hash" {
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&key).await?;
println!(" {}: {} fields", key, job_data.len());
} else {
println!(" {}: {} (not a hash, skipping)", key, key_type);
}
}
}
// Print queue keys
if !queue_keys.is_empty() {
println!(" 📤 Queue entries:");
for key in queue_keys {
let length: i64 = conn.llen(&key).await?;
println!(" {}: {} items", key, length);
}
}
// Print other keys
if !other_keys.is_empty() {
println!(" 🔧 Other entries:");
for key in other_keys {
println!(" {}", key);
}
}
Ok(())
}

View File

@@ -0,0 +1,90 @@
use log::info;
use hero_dispatcher::{DispatcherBuilder, DispatcherError, ScriptType};
use std::time::{Duration, Instant};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::builder()
.filter_level(log::LevelFilter::Info)
.init();
// Build the client using the new builder pattern
let client = DispatcherBuilder::new()
.caller_id("timeout-example-runner")
.redis_url("redis://127.0.0.1/")
.build()?;
info!("Dispatcher created.");
let script_content = r#"
// This script will never be executed by a worker because the recipient does not exist.
let x = 10;
let y = x + 32;
y
"#;
// The worker_id points to a worker queue that doesn't have a worker.
let non_existent_recipient = "non_existent_worker_for_timeout_test";
let very_short_timeout = Duration::from_secs(2);
info!(
"Submitting script to non-existent recipient '{}' with a timeout of {:?}...",
non_existent_recipient, very_short_timeout
);
let start_time = Instant::now();
// Use the new JobBuilder
let result = client
.new_job()
.script_type(ScriptType::HeroScript)
.script(script_content)
.timeout(very_short_timeout)
.await_response()
.await;
match result {
Ok(details) => {
log::error!(
"Timeout Example FAILED: Expected a timeout, but got Ok: {:?}",
details
);
Err("Expected timeout, but task completed successfully.".into())
}
Err(e) => {
let elapsed = start_time.elapsed();
info!("Timeout Example: Received error as expected: {}", e);
info!("Elapsed time: {:?}", elapsed);
match e {
DispatcherError::Timeout(task_id) => {
info!("Timeout Example PASSED: Correctly received DispatcherError::Timeout for task_id: {}", task_id);
// Ensure the elapsed time is close to the timeout duration
// Allow for some buffer for processing
assert!(
elapsed >= very_short_timeout
&& elapsed < very_short_timeout + Duration::from_secs(1),
"Elapsed time {:?} should be close to timeout {:?}",
elapsed,
very_short_timeout
);
info!(
"Elapsed time {:?} is consistent with timeout duration {:?}.",
elapsed, very_short_timeout
);
Ok(())
}
other_error => {
log::error!(
"Timeout Example FAILED: Expected DispatcherError::Timeout, but got other error: {:?}",
other_error
);
Err(format!(
"Expected DispatcherError::Timeout, got other error: {:?}",
other_error
)
.into())
}
}
}
}
}

View File

@@ -0,0 +1,57 @@
// Added error
// Duration is still used, Instant and sleep were removed
/// Comprehensive error type for all possible failures in the Rhai client.
///
/// This enum covers all error scenarios that can occur during client operations,
/// from Redis connectivity issues to task execution timeouts.
#[derive(Debug)]
pub enum DispatcherError {
/// Redis connection or operation error
RedisError(redis::RedisError),
/// JSON serialization/deserialization error
SerializationError(serde_json::Error),
/// Task execution timeout - contains the task_id that timed out
Timeout(String),
/// Task not found after submission - contains the task_id (rare occurrence)
TaskNotFound(String),
/// Context ID is missing
ContextIdMissing,
/// Invalid input provided
InvalidInput(String),
}
impl From<redis::RedisError> for DispatcherError {
fn from(err: redis::RedisError) -> Self {
DispatcherError::RedisError(err)
}
}
impl From<serde_json::Error> for DispatcherError {
fn from(err: serde_json::Error) -> Self {
DispatcherError::SerializationError(err)
}
}
impl std::fmt::Display for DispatcherError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DispatcherError::RedisError(e) => write!(f, "Redis error: {}", e),
DispatcherError::SerializationError(e) => write!(f, "Serialization error: {}", e),
DispatcherError::Timeout(task_id) => {
write!(f, "Timeout waiting for task {} to complete", task_id)
}
DispatcherError::TaskNotFound(task_id) => {
write!(f, "Task {} not found after submission", task_id)
}
DispatcherError::ContextIdMissing => {
write!(f, "Context ID is missing")
}
DispatcherError::InvalidInput(msg) => {
write!(f, "Invalid input: {}", msg)
}
}
}
}
impl std::error::Error for DispatcherError {}

261
core/dispatcher/src/job.rs Normal file
View File

@@ -0,0 +1,261 @@
use chrono::Utc;
use std::collections::HashMap;
use std::time::Duration;
use uuid::Uuid;
use crate::{Dispatcher, DispatcherError};
use hero_job::{Job, ScriptType};
/// Builder for constructing and submitting script execution requests.
///
/// This builder provides a fluent interface for configuring script execution
/// parameters and offers two submission modes: fire-and-forget (`submit()`)
/// and request-reply (`await_response()`).
///
/// # Example
///
/// ```rust,no_run
/// use std::time::Duration;
/// use hero_dispatcher::ScriptType;
///
/// # async fn example(client: &hero_dispatcher::Dispatcher) -> Result<String, hero_dispatcher::DispatcherError> {
/// let result = client
/// .new_job()
/// .script_type(ScriptType::HeroScript)
/// .script(r#"print("Hello, World!");"#)
/// .timeout(Duration::from_secs(30))
/// .await_response()
/// .await?;
/// # Ok(result)
/// # }
/// ```
pub struct JobBuilder<'a> {
client: &'a Dispatcher,
request_id: String,
context_id: String,
caller_id: String,
script: String,
script_type: ScriptType,
timeout: Duration,
retries: u32,
concurrent: bool,
log_path: Option<String>,
env_vars: HashMap<String, String>,
prerequisites: Vec<String>,
dependents: Vec<String>
}
impl<'a> JobBuilder<'a> {
pub fn new(client: &'a Dispatcher) -> Self {
Self {
client,
request_id: "".to_string(),
context_id: client.context_id.clone(),
caller_id: client.caller_id.clone(),
script: "".to_string(),
script_type: ScriptType::HeroScript, // Default to HeroScript
timeout: Duration::from_secs(5),
retries: 0,
concurrent: false,
log_path: None,
env_vars: HashMap::new(),
prerequisites: Vec::new(),
dependents: Vec::new(),
}
}
pub fn request_id(mut self, request_id: &str) -> Self {
self.request_id = request_id.to_string();
self
}
pub fn script_type(mut self, script_type: ScriptType) -> Self {
self.script_type = script_type;
self
}
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = context_id.to_string();
self
}
pub fn script(mut self, script: &str) -> Self {
self.script = script.to_string();
self
}
pub fn script_path(mut self, script_path: &str) -> Self {
self.script = std::fs::read_to_string(script_path).unwrap();
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
pub fn log_path(mut self, log_path: &str) -> Self {
self.log_path = Some(log_path.to_string());
self
}
/// Set a single environment variable
pub fn env_var(mut self, key: &str, value: &str) -> Self {
self.env_vars.insert(key.to_string(), value.to_string());
self
}
/// Set multiple environment variables from a HashMap
pub fn env_vars(mut self, env_vars: HashMap<String, String>) -> Self {
self.env_vars.extend(env_vars);
self
}
/// Clear all environment variables
pub fn clear_env_vars(mut self) -> Self {
self.env_vars.clear();
self
}
/// Add a prerequisite job ID that must complete before this job can run
pub fn prerequisite(mut self, job_id: &str) -> Self {
self.prerequisites.push(job_id.to_string());
self
}
/// Set multiple prerequisite job IDs
pub fn prerequisites(mut self, job_ids: Vec<String>) -> Self {
self.prerequisites.extend(job_ids);
self
}
/// Add a dependent job ID that depends on this job completing
pub fn dependent(mut self, job_id: &str) -> Self {
self.dependents.push(job_id.to_string());
self
}
/// Set multiple dependent job IDs
pub fn dependents(mut self, job_ids: Vec<String>) -> Self {
self.dependents.extend(job_ids);
self
}
/// Clear all prerequisites
pub fn clear_prerequisites(mut self) -> Self {
self.prerequisites.clear();
self
}
/// Clear all dependents
pub fn clear_dependents(mut self) -> Self {
self.dependents.clear();
self
}
pub fn build(self) -> Result<Job, DispatcherError> {
let request_id = if self.request_id.is_empty() {
// Generate a UUID for the request_id
Uuid::new_v4().to_string()
} else {
self.request_id.clone()
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
if self.caller_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
Ok(Job {
id: request_id,
caller_id: self.caller_id,
context_id: self.context_id,
script: self.script,
script_type: self.script_type,
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
})
}
pub async fn submit(self) -> Result<(), DispatcherError> {
// Create job first, then use client reference
let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string()
} else {
self.request_id
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
let job = Job {
id: request_id,
caller_id: self.caller_id,
context_id: self.context_id,
script: self.script,
script_type: self.script_type.clone(),
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
};
self.client.create_job(&job).await?;
Ok(())
}
pub async fn await_response(self) -> Result<String, DispatcherError> {
// Create job first, then use client reference
let request_id = if self.request_id.is_empty() {
Uuid::new_v4().to_string()
} else {
self.request_id
};
if self.context_id.is_empty() {
return Err(DispatcherError::ContextIdMissing);
}
let now = Utc::now();
let job = Job {
id: request_id,
caller_id: self.caller_id.clone(),
context_id: self.context_id,
script: self.script,
script_type: self.script_type.clone(),
timeout: self.timeout,
retries: self.retries as u8,
concurrent: self.concurrent,
log_path: self.log_path.clone(),
env_vars: self.env_vars.clone(),
prerequisites: self.prerequisites.clone(),
dependents: self.dependents.clone(),
created_at: now,
updated_at: now,
};
let result = self.client.run_job_and_await_result(&job).await?;
Ok(result)
}
}

498
core/dispatcher/src/lib.rs Normal file
View File

@@ -0,0 +1,498 @@
use log::{debug, error, info, warn};
use redis::AsyncCommands;
use std::time::Duration;
use hero_job::NAMESPACE_PREFIX;
mod job;
mod error;
pub use crate::error::DispatcherError;
pub use crate::job::JobBuilder;
// Re-export types from hero_job for public API
pub use hero_job::{Job, JobStatus, ScriptType};
pub struct Dispatcher {
redis_client: redis::Client,
caller_id: String,
context_id: String,
heroscript_workers: Vec<String>,
rhai_sal_workers: Vec<String>,
rhai_dsl_workers: Vec<String>,
}
pub struct DispatcherBuilder {
redis_url: Option<String>,
caller_id: Option<String>,
context_id: Option<String>,
heroscript_workers: Vec<String>,
rhai_sal_workers: Vec<String>,
rhai_dsl_workers: Vec<String>,
}
impl DispatcherBuilder {
pub fn new() -> Self {
Self {
redis_url: None,
caller_id: Some("default_caller".to_string()),
context_id: Some("default_context".to_string()),
heroscript_workers: Vec::new(),
rhai_sal_workers: Vec::new(),
rhai_dsl_workers: Vec::new(),
}
}
pub fn caller_id(mut self, caller_id: &str) -> Self {
self.caller_id = Some(caller_id.to_string());
self
}
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = Some(context_id.to_string());
self
}
pub fn heroscript_workers(mut self, workers: Vec<String>) -> Self {
self.heroscript_workers = workers;
self
}
pub fn rhai_sal_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_sal_workers = workers;
self
}
pub fn rhai_dsl_workers(mut self, workers: Vec<String>) -> Self {
self.rhai_dsl_workers = workers;
self
}
pub fn redis_url(mut self, url: &str) -> Self {
self.redis_url = Some(url.to_string());
self
}
/// Builds the final `Dispatcher` instance.
///
/// This method validates the configuration and creates the Redis client.
/// It will return an error if the caller ID is empty or if the Redis
/// connection cannot be established.
///
/// # Returns
///
/// * `Ok(Dispatcher)` - Successfully configured client
/// * `Err(DispatcherError)` - Configuration or connection error
pub fn build(self) -> Result<Dispatcher, DispatcherError> {
let url = self
.redis_url
.unwrap_or_else(|| "redis://127.0.0.1/".to_string());
let client = redis::Client::open(url)?;
Ok(Dispatcher {
redis_client: client,
caller_id: self.caller_id.unwrap_or_else(|| "default_caller".to_string()),
context_id: self.context_id.unwrap_or_else(|| "default_context".to_string()),
heroscript_workers: self.heroscript_workers,
rhai_sal_workers: self.rhai_sal_workers,
rhai_dsl_workers: self.rhai_dsl_workers,
})
}
}
impl Dispatcher {
/// Select a worker ID based on the script type using round-robin or first available
fn select_worker_for_script_type(&self, script_type: &ScriptType) -> Result<String, DispatcherError> {
let workers = match script_type {
ScriptType::HeroScript => &self.heroscript_workers,
ScriptType::RhaiSAL => &self.rhai_sal_workers,
ScriptType::RhaiDSL => &self.rhai_dsl_workers,
};
if workers.is_empty() {
return Err(DispatcherError::InvalidInput(format!(
"No workers configured for script type: {:?}", script_type
)));
}
// For now, use simple round-robin by selecting first available worker
// TODO: Implement proper load balancing
Ok(workers[0].clone())
}
pub fn new_job(&self) -> JobBuilder {
JobBuilder::new(self)
}
// Internal helper to submit script details and push to work queue
async fn create_job_using_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job: &Job,
) -> Result<(), DispatcherError> {
debug!(
"Submitting play request: {} for script type: {:?} with namespace prefix: {}",
job.id, job.script_type, NAMESPACE_PREFIX
);
// Use the shared Job struct's Redis storage method
job.store_in_redis(conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to store job in Redis: {}", e)))?;
Ok(())
}
// Internal helper to submit script details and push to work queue
async fn start_job_using_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job_id: String,
worker_id: String
) -> Result<(), DispatcherError> {
let worker_queue_key = format!(
"{}{}",
NAMESPACE_PREFIX,
worker_id.replace(" ", "_").to_lowercase()
);
// lpush also infers its types, RV is typically i64 (length of list) or () depending on exact command variant
// For `redis::AsyncCommands::lpush`, it's `RedisResult<R>` where R: FromRedisValue
// Often this is the length of the list. Let's allow inference or specify if needed.
let _: redis::RedisResult<i64> =
conn.lpush(&worker_queue_key, job_id.clone()).await;
Ok(())
}
// Internal helper to await response from worker
async fn await_response_from_connection(
&self,
conn: &mut redis::aio::MultiplexedConnection,
job_key: &String,
reply_queue_key: &String,
timeout: Duration,
) -> Result<String, DispatcherError> {
// BLPOP on the reply queue
// The timeout for BLPOP is in seconds (integer)
let blpop_timeout_secs = timeout.as_secs().max(1); // Ensure at least 1 second for BLPOP timeout
match conn
.blpop::<&String, Option<(String, String)>>(reply_queue_key, blpop_timeout_secs as f64)
.await
{
Ok(Some((_queue, result_message_str))) => {
Ok(result_message_str)
}
Ok(None) => {
// BLPOP timed out
warn!(
"Timeout waiting for result on reply queue {} for job {}",
reply_queue_key, job_key
);
// Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::Timeout(job_key.clone()))
}
Err(e) => {
// Redis error
error!(
"Redis error on BLPOP for reply queue {}: {}",
reply_queue_key, e
);
// Optionally, delete the reply queue
let _: redis::RedisResult<i32> = conn.del(&reply_queue_key).await;
Err(DispatcherError::RedisError(e))
}
}
}
// New method using dedicated reply queue
pub async fn create_job(
&self,
job: &Job,
) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
self.create_job_using_connection(
&mut conn,
&job, // Pass the job_id parameter
)
.await?;
Ok(())
}
// New method using dedicated reply queue with automatic worker selection
pub async fn run_job_and_await_result(
&self,
job: &Job
) -> Result<String, DispatcherError> {
// Select worker based on script type
let worker_id = self.select_worker_for_script_type(&job.script_type)?;
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, job.id); // Derived from the passed job_id
self.create_job_using_connection(
&mut conn,
&job, // Pass the job_id parameter
)
.await?;
self.start_job_using_connection(&mut conn, job.id.clone(), worker_id).await?;
info!(
"Task {} submitted. Waiting for result on queue {} with timeout {:?}...",
job.id, // This is the UUID
reply_queue_key,
job.timeout
);
self.await_response_from_connection(
&mut conn,
&job.id,
&reply_queue_key,
job.timeout,
)
.await
}
// Method to get job status
pub async fn get_job_status(
&self,
job_id: &str,
) -> Result<JobStatus, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
let status_str = map.get("status").cloned().unwrap_or_else(|| {
warn!("Task {}: 'status' field missing from Redis hash, defaulting to empty.", job_id);
String::new()
});
let status = match status_str.as_str() {
"dispatched" => JobStatus::Dispatched,
"started" => JobStatus::Started,
"error" => JobStatus::Error,
"finished" => JobStatus::Finished,
_ => JobStatus::Dispatched, // default
};
Ok(status)
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(JobStatus::Dispatched) // default for missing jobs
}
}
}
// Method to get job output
pub async fn get_job_output(
&self,
job_id: &str,
) -> Result<Option<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
Ok(map.get("output").cloned())
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(None)
}
}
}
/// List all jobs in Redis
pub async fn list_jobs(&self) -> Result<Vec<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's list method
Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e)))
}
/// Stop a job by pushing its ID to the stop queue
pub async fn stop_job(&self, job_id: &str) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get job details to determine script type and appropriate worker
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if job_data.is_empty() {
return Err(DispatcherError::InvalidInput(format!("Job {} not found", job_id)));
}
// Parse script type from job data
let script_type_str = job_data.get("script_type")
.ok_or_else(|| DispatcherError::InvalidInput("Job missing script_type field".to_string()))?;
let script_type: ScriptType = serde_json::from_str(&format!("\"{}\"", script_type_str))
.map_err(|e| DispatcherError::InvalidInput(format!("Invalid script type: {}", e)))?;
// Select appropriate worker for this script type
let worker_id = self.select_worker_for_script_type(&script_type)?;
let stop_queue_key = format!("{}stop_queue:{}", NAMESPACE_PREFIX, worker_id);
// Push job ID to the stop queue
conn.lpush::<_, _, ()>(&stop_queue_key, job_id).await?;
info!("Job {} added to stop queue {} for script type {:?}", job_id, stop_queue_key, script_type);
Ok(())
}
/// Get logs for a job by reading from its log file
pub async fn get_job_logs(&self, job_id: &str) -> Result<Option<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
// Get the job data to find the log path
let result_map: Option<std::collections::HashMap<String, String>> =
conn.hgetall(&job_key).await?;
match result_map {
Some(map) => {
if let Some(log_path) = map.get("log_path") {
// Try to read the log file
match std::fs::read_to_string(log_path) {
Ok(contents) => Ok(Some(contents)),
Err(e) => {
warn!("Failed to read log file {}: {}", log_path, e);
Ok(None)
}
}
} else {
// No log path configured for this job
Ok(None)
}
}
None => {
warn!("Job {} not found in Redis", job_id);
Ok(None)
}
}
}
/// Delete a specific job by ID
pub async fn delete_job(&self, job_id: &str) -> Result<(), DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the shared Job struct's delete method
Job::delete_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job: {}", e)))?;
info!("Job {} deleted successfully", job_id);
Ok(())
}
/// Clear all jobs from Redis
pub async fn clear_all_jobs(&self) -> Result<usize, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Get all job IDs first
let job_ids = Job::list_all_job_ids(&mut conn).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to list jobs: {}", e)))?;
let count = job_ids.len();
// Delete each job using the shared method
for job_id in job_ids {
Job::delete_from_redis(&mut conn, &job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to delete job {}: {}", job_id, e)))?;
}
Ok(count)
}
/// Check if all prerequisites for a job are completed
pub async fn check_prerequisites_completed(&self, job_id: &str) -> Result<bool, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Load the job using the shared Job struct
let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each prerequisite job status
for prereq_id in &job.prerequisites {
let status = Job::get_status(&mut conn, prereq_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get prerequisite status: {}", e)))?;
if status != JobStatus::Finished {
return Ok(false); // Prerequisite not completed
}
}
Ok(true) // All prerequisites completed (or no prerequisites)
}
/// Update job status and check dependent jobs for readiness
pub async fn update_job_status_and_check_dependents(&self, job_id: &str, new_status: JobStatus) -> Result<Vec<String>, DispatcherError> {
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Update job status using shared Job method
Job::update_status(&mut conn, job_id, new_status.clone()).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update job status: {}", e)))?;
let mut ready_jobs = Vec::new();
// If job finished, check dependent jobs
if new_status == JobStatus::Finished {
// Load the job to get its dependents
let job = Job::load_from_redis(&mut conn, job_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to load job: {}", e)))?;
// Check each dependent job
for dependent_id in &job.dependents {
let dependent_status = Job::get_status(&mut conn, dependent_id).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to get dependent status: {}", e)))?;
// Only check jobs that are waiting for prerequisites
if dependent_status == JobStatus::WaitingForPrerequisites {
// Check if all prerequisites are now completed
if self.check_prerequisites_completed(dependent_id).await? {
// Update status to dispatched and add to ready jobs
Job::update_status(&mut conn, dependent_id, JobStatus::Dispatched).await
.map_err(|e| DispatcherError::InvalidInput(format!("Failed to update dependent status: {}", e)))?;
ready_jobs.push(dependent_id.clone());
}
}
}
}
Ok(ready_jobs)
}
/// Dispatch jobs that are ready (have all prerequisites completed)
pub async fn dispatch_ready_jobs(&self, ready_job_ids: Vec<String>) -> Result<(), DispatcherError> {
for job_id in ready_job_ids {
// Get job data to determine script type and select worker
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let job_key = format!("{}job:{}", NAMESPACE_PREFIX, job_id);
let job_data: std::collections::HashMap<String, String> = conn.hgetall(&job_key).await?;
if let Some(script_type_str) = job_data.get("script_type") {
// Parse script type (stored as Debug format, e.g., "HeroScript")
let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript,
"RhaiSAL" => ScriptType::RhaiSAL,
"RhaiDSL" => ScriptType::RhaiDSL,
_ => return Err(DispatcherError::InvalidInput(format!("Unknown script type: {}", script_type_str))),
};
// Select worker and dispatch job
let worker_id = self.select_worker_for_script_type(&script_type)?;
self.start_job_using_connection(&mut conn, job_id, worker_id).await?;
}
}
Ok(())
}
}

794
core/engine/Cargo.lock generated Normal file
View File

@@ -0,0 +1,794 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "ahash"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75"
dependencies = [
"cfg-if",
"const-random",
"getrandom 0.3.3",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "arrayvec"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bincode"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740"
dependencies = [
"bincode_derive",
"serde",
"unty",
]
[[package]]
name = "bincode_derive"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09"
dependencies = [
"virtue",
]
[[package]]
name = "bitflags"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "cc"
version = "1.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"wasm-bindgen",
"windows-link",
]
[[package]]
name = "const-random"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
dependencies = [
"const-random-macro",
]
[[package]]
name = "const-random-macro"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom 0.2.16",
"once_cell",
"tiny-keccak",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "engine"
version = "0.1.0"
dependencies = [
"chrono",
"heromodels",
"heromodels-derive",
"heromodels_core",
"rhai",
]
[[package]]
name = "getrandom"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "heromodels"
version = "0.1.0"
dependencies = [
"bincode",
"chrono",
"heromodels-derive",
"heromodels_core",
"ourdb",
"rhai",
"rhai_client_macros",
"serde",
"serde_json",
"strum",
"strum_macros",
"tst",
"uuid",
]
[[package]]
name = "heromodels-derive"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "heromodels_core"
version = "0.1.0"
dependencies = [
"chrono",
"serde",
]
[[package]]
name = "iana-time-zone"
version = "0.1.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"log",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "instant"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "no-std-compat"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c"
dependencies = [
"spin",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
dependencies = [
"portable-atomic",
]
[[package]]
name = "ourdb"
version = "0.1.0"
dependencies = [
"crc32fast",
"log",
"rand",
"thiserror",
]
[[package]]
name = "portable-atomic"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.16",
]
[[package]]
name = "rhai"
version = "1.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2780e813b755850e50b178931aaf94ed24f6817f46aaaf5d21c13c12d939a249"
dependencies = [
"ahash",
"bitflags",
"instant",
"no-std-compat",
"num-traits",
"once_cell",
"rhai_codegen",
"rust_decimal",
"smallvec",
"smartstring",
"thin-vec",
]
[[package]]
name = "rhai_client_macros"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"rhai",
"syn",
]
[[package]]
name = "rhai_codegen"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "rust_decimal"
version = "1.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50"
dependencies = [
"arrayvec",
"num-traits",
]
[[package]]
name = "rustversion"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "smallvec"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
[[package]]
name = "smartstring"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
dependencies = [
"autocfg",
"static_assertions",
"version_check",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strum"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
[[package]]
name = "strum_macros"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn",
]
[[package]]
name = "syn"
version = "2.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thin-vec"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d"
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]]
name = "tst"
version = "0.1.0"
dependencies = [
"ourdb",
"thiserror",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unty"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae"
[[package]]
name = "uuid"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.3",
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "virtue"
version = "0.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "windows-core"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-implement"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-link"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
[[package]]
name = "windows-result"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
dependencies = [
"windows-link",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

38
core/engine/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "rhailib_engine"
version = "0.1.0"
edition = "2021"
description = "Central Rhai engine for heromodels"
[dependencies]
rhai = { version = "1.21.0", features = ["std", "sync", "decimal", "internals"] }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] }
heromodels_core = { path = "../../../db/heromodels_core" }
chrono = "0.4"
heromodels-derive = { path = "../../../db/heromodels-derive" }
rhailib_dsl = { path = "../../../rhailib/src/dsl" }
[features]
default = ["calendar", "finance"]
calendar = []
finance = []
# Flow module is now updated to use our approach to Rhai engine registration
flow = []
legal = []
projects = []
biz = []
[[example]]
name = "calendar_example"
path = "examples/calendar/example.rs"
required-features = ["calendar"]
[[example]]
name = "flow_example"
path = "examples/flow/example.rs"
required-features = ["flow"]
[[example]]
name = "finance"
path = "examples/finance/example.rs"
required-features = ["finance"]

135
core/engine/README.md Normal file
View File

@@ -0,0 +1,135 @@
# HeroModels Rhai Engine (`engine`)
The `engine` crate provides a central Rhai scripting engine for the HeroModels project. It offers a unified way to interact with various HeroModels modules (like Calendar, Flow, Legal, etc.) through Rhai scripts, leveraging a shared database connection.
## Overview
This crate facilitates:
1. **Centralized Engine Creation**: A function `create_heromodels_engine` to instantiate a Rhai engine pre-configured with common settings and all enabled HeroModels modules.
2. **Modular Registration**: HeroModels modules (Calendar, Flow, etc.) can be registered with a Rhai engine based on feature flags.
3. **Script Evaluation Utilities**: Helper functions for compiling Rhai scripts into Abstract Syntax Trees (ASTs) and for evaluating scripts or ASTs.
4. **Mock Database**: Includes a `mock_db` module for testing and running examples without needing a live database.
## Core Components & Usage
### Library (`src/lib.rs`)
- **`create_heromodels_engine(db: Arc<OurDB>) -> Engine`**:
Creates and returns a new `rhai::Engine` instance. This engine is configured with default settings (e.g., max expression depths, string/array/map sizes) and then all available HeroModels modules (controlled by feature flags) are registered with it, using the provided `db` (an `Arc<OurDB>`) instance.
- **`register_all_modules(engine: &mut Engine, db: Arc<OurDB>)`**:
Registers all HeroModels modules for which features are enabled (e.g., `calendar`, `flow`, `legal`, `projects`, `biz`) with the given Rhai `engine`. Each module is passed the shared `db` instance.
- **`eval_script(engine: &Engine, script: &str) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
A utility function to directly evaluate a Rhai script string using the provided `engine`.
- **`compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::EvalAltResult>>`**:
Compiles a Rhai script string into an `AST` (Abstract Syntax Tree) for potentially faster repeated execution.
- **`run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>>`**:
Runs a pre-compiled `AST` with a given `scope` using the provided `engine`.
- **`mock_db` module**:
Provides `create_mock_db()` which returns an `Arc<OurDB>` instance suitable for testing and examples. This allows scripts that interact with database functionalities to run without external database dependencies.
### Basic Usage
```rust
use std::sync::Arc;
use engine::{create_heromodels_engine, eval_script};
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB; // Actual DB type
// Create a mock database (or connect to a real one)
let db: Arc<OurDB> = create_mock_db();
// Create the Rhai engine with all enabled modules registered
let engine = create_heromodels_engine(db);
// Run a Rhai script
let script = r#"
// Example: Assuming 'calendar' feature is enabled
let cal = new_calendar("My Test Calendar");
cal.set_description("This is a test.");
print(`Created calendar: ${cal.get_name()}`);
cal.get_id() // Return the ID
"#;
match eval_script(&engine, script) {
Ok(val) => println!("Script returned: {:?}", val),
Err(err) => eprintln!("Script error: {}", err),
}
```
### Using Specific Modules Manually
If you need more fine-grained control or only want specific modules (and prefer not to rely solely on feature flags at compile time for `create_heromodels_engine`), you can initialize an engine and register modules manually:
```rust
use std::sync::Arc;
use rhai::Engine;
use engine::mock_db::create_mock_db; // For example usage
use heromodels::db::hero::OurDB;
// Import the specific module registration function
use heromodels::models::calendar::register_calendar_rhai_module;
// Create a mock database
let db: Arc<OurDB> = create_mock_db();
// Create a new Rhai engine
let mut engine = Engine::new();
// Register only the calendar module
register_calendar_rhai_module(&mut engine, db.clone());
// Now you can use calendar-related functions in your scripts
let result = engine.eval::<String>(r#" let c = new_calendar("Solo Cal"); c.get_name() "#);
match result {
Ok(name) => println!("Calendar name: {}", name),
Err(err) => eprintln!("Error: {}", err),
}
```
## Examples
This crate includes several examples demonstrating how to use different HeroModels modules with Rhai. Each example typically requires its corresponding feature to be enabled.
- `calendar_example`: Working with calendars, events, and attendees (requires `calendar` feature).
- `flow_example`: Working with flows, steps, and signature requirements (requires `flow` feature).
- `finance_example`: Working with financial models (requires `finance` feature).
- *(Additional examples for `legal`, `projects`, `biz` would follow the same pattern if present).*
To run an example (e.g., `calendar_example`):
```bash
cargo run --example calendar_example --features calendar
```
*(Note: Examples in `Cargo.toml` already specify `required-features`, so simply `cargo run --example calendar_example` might suffice if those features are part of the default set or already enabled.)*
## Features
The crate uses feature flags to control which HeroModels modules are compiled and registered:
- `calendar`: Enables the Calendar module.
- `finance`: Enables the Finance module.
- `flow`: Enables the Flow module.
- `legal`: Enables the Legal module.
- `projects`: Enables the Projects module.
- `biz`: Enables the Business module.
The `default` features are `["calendar", "finance"]`. You can enable other modules by specifying them during the build or in your project's `Cargo.toml` if this `engine` crate is a dependency.
## Dependencies
Key dependencies include:
- `rhai`: The Rhai scripting engine.
- `heromodels`: Provides the core data models and database interaction logic, including the Rhai registration functions for each module.
- `heromodels_core`: Core utilities for HeroModels.
- `chrono`: For date/time utilities.
- `heromodels-derive`: Procedural macros used by HeroModels.
## License
This crate is part of the HeroModels project and shares its license.

16
core/engine/build.rs Normal file
View File

@@ -0,0 +1,16 @@
fn main() {
// Tell Cargo to re-run this build script if the calendar/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/calendar/rhai.rs");
// Tell Cargo to re-run this build script if the flow/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/flow/rhai.rs");
// Tell Cargo to re-run this build script if the legal/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/legal/rhai.rs");
// Tell Cargo to re-run this build script if the projects/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/projects/rhai.rs");
// Tell Cargo to re-run this build script if the biz/rhai.rs file changes
println!("cargo:rerun-if-changed=../heromodels/src/models/biz/rhai.rs");
}

View File

@@ -0,0 +1,331 @@
# Architecture of the `rhailib_engine` Crate
The `rhailib_engine` crate serves as the central Rhai scripting engine for the heromodels ecosystem. It provides a unified interface for creating, configuring, and executing Rhai scripts with access to all business domain modules through a feature-based architecture.
## Core Architecture
The engine acts as an orchestration layer that brings together the DSL modules and provides execution utilities:
```mermaid
graph TD
A[rhailib_engine] --> B[Engine Creation]
A --> C[Script Execution]
A --> D[Mock Database]
A --> E[Feature Management]
B --> B1[create_heromodels_engine]
B --> B2[Engine Configuration]
B --> B3[DSL Registration]
C --> C1[eval_script]
C --> C2[eval_file]
C --> C3[compile_script]
C --> C4[run_ast]
D --> D1[create_mock_db]
D --> D2[seed_mock_db]
D --> D3[Domain Data Seeding]
E --> E1[calendar]
E --> E2[finance]
E --> E3[flow]
E --> E4[legal]
E --> E5[projects]
E --> E6[biz]
B3 --> F[rhailib_dsl]
F --> G[All Domain Modules]
```
## Core Components
### 1. Engine Factory (`create_heromodels_engine`)
The primary entry point for creating a fully configured Rhai engine:
```rust
pub fn create_heromodels_engine() -> Engine
```
**Responsibilities:**
- Creates a new Rhai engine instance
- Configures engine limits and settings
- Registers all available DSL modules
- Returns a ready-to-use engine
**Configuration Settings:**
- **Expression Depth**: 128 levels for both expressions and functions
- **String Size Limit**: 10 MB maximum string size
- **Array Size Limit**: 10,000 elements maximum
- **Map Size Limit**: 10,000 key-value pairs maximum
### 2. Script Execution Utilities
#### Direct Script Evaluation
```rust
pub fn eval_script(engine: &Engine, script: &str) -> Result<Dynamic, Box<EvalAltResult>>
```
Executes Rhai script strings directly with immediate results.
#### File-Based Script Execution
```rust
pub fn eval_file(engine: &Engine, file_path: &Path) -> Result<Dynamic, Box<EvalAltResult>>
```
Loads and executes Rhai scripts from filesystem with proper error handling.
#### Compiled Script Execution
```rust
pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<EvalAltResult>>
pub fn run_ast(engine: &Engine, ast: &AST, scope: &mut Scope) -> Result<Dynamic, Box<EvalAltResult>>
```
Provides compilation and execution of scripts for performance optimization.
### 3. Mock Database System
#### Database Creation
```rust
pub fn create_mock_db() -> Arc<OurDB>
```
Creates an in-memory database instance for testing and examples.
#### Data Seeding
```rust
pub fn seed_mock_db(db: Arc<OurDB>)
```
Populates the mock database with representative data across all domains.
## Feature-Based Architecture
The engine uses Cargo features to control which domain modules are included:
### Available Features
- **`calendar`** (default): Calendar and event management
- **`finance`** (default): Financial accounts, assets, and marketplace
- **`flow`**: Workflow and approval processes
- **`legal`**: Contract and legal document management
- **`projects`**: Project and task management
- **`biz`**: Business operations and entities
### Feature Integration Pattern
```rust
#[cfg(feature = "calendar")]
use heromodels::models::calendar::*;
#[cfg(feature = "finance")]
use heromodels::models::finance::*;
```
This allows for:
- **Selective Compilation**: Only include needed functionality
- **Reduced Binary Size**: Exclude unused domain modules
- **Modular Deployment**: Different configurations for different use cases
## Mock Database Architecture
### Database Structure
The mock database provides a complete testing environment:
```mermaid
graph LR
A[Mock Database] --> B[Calendar Data]
A --> C[Finance Data]
A --> D[Flow Data]
A --> E[Legal Data]
A --> F[Projects Data]
B --> B1[Calendars]
B --> B2[Events]
B --> B3[Attendees]
C --> C1[Accounts]
C --> C2[Assets - ERC20/ERC721]
C --> C3[Marketplace Listings]
D --> D1[Flows]
D --> D2[Flow Steps]
D --> D3[Signature Requirements]
E --> E1[Contracts]
E --> E2[Contract Revisions]
E --> E3[Contract Signers]
F --> F1[Projects]
F --> F2[Project Members]
F --> F3[Project Tags]
```
### Seeding Strategy
Each domain has its own seeding function that creates realistic test data:
#### Calendar Seeding
- Creates work calendars with descriptions
- Adds team meetings with attendees
- Sets up recurring events
#### Finance Seeding
- Creates demo trading accounts
- Generates ERC20 tokens and ERC721 NFTs
- Sets up marketplace listings with metadata
#### Flow Seeding (Feature-Gated)
- Creates document approval workflows
- Defines multi-step approval processes
- Sets up signature requirements
#### Legal Seeding (Feature-Gated)
- Creates service agreements
- Adds contract revisions and versions
- Defines contract signers and roles
#### Projects Seeding (Feature-Gated)
- Creates project instances with status tracking
- Assigns team members and priorities
- Adds project tags and categorization
## Error Handling Architecture
### Comprehensive Error Propagation
```rust
Result<Dynamic, Box<EvalAltResult>>
```
All functions return proper Rhai error types that include:
- **Script Compilation Errors**: Syntax and parsing issues
- **Runtime Errors**: Execution failures and exceptions
- **File System Errors**: File reading and path resolution issues
- **Database Errors**: Mock database operation failures
### Error Context Enhancement
File operations include enhanced error context:
```rust
Err(Box::new(EvalAltResult::ErrorSystem(
format!("Failed to read script file: {}", file_path.display()),
Box::new(io_err),
)))
```
## Performance Considerations
### Engine Configuration
Optimized settings for production use:
- **Memory Limits**: Prevent runaway script execution
- **Depth Limits**: Avoid stack overflow from deep recursion
- **Size Limits**: Control memory usage for large data structures
### Compilation Strategy
- **AST Caching**: Compile once, execute multiple times
- **Scope Management**: Efficient variable scope handling
- **Module Registration**: One-time registration at engine creation
### Mock Database Performance
- **In-Memory Storage**: Fast access for testing scenarios
- **Temporary Directories**: Automatic cleanup after use
- **Lazy Loading**: Data seeded only when needed
## Integration Patterns
### Script Development Workflow
```rust
// 1. Create engine with all modules
let engine = create_heromodels_engine();
// 2. Execute business logic scripts
let result = eval_script(&engine, r#"
let company = new_company()
.name("Tech Startup")
.business_type("startup");
save_company(company)
"#)?;
// 3. Handle results and errors
match result {
Ok(value) => println!("Success: {:?}", value),
Err(error) => eprintln!("Error: {}", error),
}
```
### Testing Integration
```rust
// 1. Create mock database
let db = create_mock_db();
seed_mock_db(db.clone());
// 2. Create engine
let engine = create_heromodels_engine();
// 3. Test scripts against seeded data
let script = r#"
let calendars = list_calendars();
calendars.len()
"#;
let count = eval_script(&engine, script)?;
```
### File-Based Script Execution
```rust
// Execute scripts from files
let result = eval_file(&engine, Path::new("scripts/business_logic.rhai"))?;
```
## Deployment Configurations
### Minimal Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["calendar"] }
```
### Full Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", features = ["calendar", "finance", "flow", "legal", "projects", "biz"] }
```
### Custom Configuration
```toml
[dependencies]
rhailib_engine = { version = "0.1.0", default-features = false, features = ["finance", "biz"] }
```
## Security Considerations
### Script Execution Limits
- **Resource Limits**: Prevent resource exhaustion attacks
- **Execution Time**: Configurable timeouts for long-running scripts
- **Memory Bounds**: Controlled memory allocation
### Database Access
- **Mock Environment**: Safe testing without production data exposure
- **Temporary Storage**: Automatic cleanup prevents data persistence
- **Isolated Execution**: Each test run gets fresh database state
## Extensibility
### Adding New Domains
1. Create new feature flag in `Cargo.toml`
2. Add conditional imports for new models
3. Implement seeding function for test data
4. Register with DSL module system
### Custom Engine Configuration
```rust
let mut engine = Engine::new();
// Custom configuration
engine.set_max_expr_depths(256, 256);
// Register specific modules
rhailib_dsl::register_dsl_modules(&mut engine);
```
This architecture provides a robust, feature-rich foundation for Rhai script execution while maintaining flexibility, performance, and security.

View File

@@ -0,0 +1,101 @@
// calendar_script.rhai
// Example Rhai script for working with Calendar models
// Constants for AttendanceStatus
const NO_RESPONSE = "NoResponse";
const ACCEPTED = "Accepted";
const DECLINED = "Declined";
const TENTATIVE = "Tentative";
// Create a new calendar using builder pattern
let my_calendar = new_calendar()
.name("Team Calendar")
.description("Calendar for team events and meetings");
print(`Created calendar: ${my_calendar.name} (${my_calendar.id})`);
// Add attendees to the event
let alice = new_attendee()
.with_contact_id(1)
.with_status(NO_RESPONSE);
let bob = new_attendee()
.with_contact_id(2)
.with_status(ACCEPTED);
let charlie = new_attendee()
.with_contact_id(3)
.with_status(TENTATIVE);
// Create a new event using builder pattern
// Note: Timestamps are in seconds since epoch
let now = timestamp_now();
let one_hour = 60 * 60;
let meeting = new_event()
.title("Weekly Sync")
.reschedule(now, now + one_hour)
.location("Conference Room A")
.description("Regular team sync meeting")
.add_attendee(alice)
.add_attendee(bob)
.add_attendee(charlie)
.save_event();
print(`Created event: ${meeting.title}`);
meeting.delete_event();
print(`Deleted event: ${meeting.title}`);
// Print attendees info
let attendees = meeting.attendees;
print(`Added attendees to the event`);
// Update Charlie's attendee status directly
meeting.update_attendee_status(3, ACCEPTED);
print(`Updated Charlie's status to: ${ACCEPTED}`);
// Add the event to the calendar
my_calendar.add_event_to_calendar(meeting);
// Print events info
print(`Added event to calendar`);
// Save the calendar to the database
let saved_calendar = my_calendar.save_calendar();
print(`Calendar saved to database with ID: ${saved_calendar.id}`);
// Retrieve the calendar from the database using the ID from the saved calendar
let retrieved_calendar = get_calendar_by_id(saved_calendar.id);
if retrieved_calendar != () {
print(`Retrieved calendar: ${retrieved_calendar.name}`);
print(`Retrieved calendar successfully`);
} else {
print("Failed to retrieve calendar from database");
}
// List all calendars in the database
let all_calendars = list_calendars();
print("\nListing all calendars in database:");
let calendar_count = 0;
for calendar in all_calendars {
print(` - Calendar: ${calendar.name} (ID: ${calendar.id})`);
calendar_count += 1;
}
print(`Total calendars: ${calendar_count}`);
// List all events in the database
let all_events = list_events();
print("\nListing all events in database:");
let event_count = 0;
for event in all_events {
print(` - Event: ${event.title} (ID: ${event.id})`);
event_count += 1;
}
print(`Total events: ${event_count}`);
// Helper function to get current timestamp
fn timestamp_now() {
// This would typically be provided by the host application
// For this example, we'll use a fixed timestamp
1685620800 // June 1, 2023, 12:00 PM
}

View File

@@ -0,0 +1,70 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
mod mock;
use mock::seed_calendar_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Calendar Rhai Example");
println!("=====================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_calendar_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let script_path = manifest_dir
.join("examples")
.join("calendar")
.join("calendar_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@@ -0,0 +1,60 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with calendar data
pub fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let calendar = Calendar::new(None, "Work Calendar".to_string())
.description("My work schedule".to_string());
// Store the calendar in the database
let (calendar_id, mut saved_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
.build();
// Store the event in the database first to get its ID
let (event_id, saved_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
saved_calendar = saved_calendar.add_event(event_id as i64);
// Store the updated calendar in the database
let (_calendar_id, final_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&saved_calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
final_calendar.name,
final_calendar.get_id()
);
println!(
" - Added event: {} (ID: {})",
saved_event.title,
saved_event.get_id()
);
}

View File

@@ -0,0 +1,70 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use rhai::Engine;
use std::path::Path;
mod mock;
use mock::seed_finance_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Finance Rhai Example");
println!("===================");
// Create a mock database
let db = create_mock_db();
// Seed the database with some initial data
seed_finance_data(db.clone());
// Create the Rhai engine using our central engine creator
let mut engine = create_heromodels_engine(db.clone());
// Register timestamp helper functions
register_timestamp_helpers(&mut engine);
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("finance_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
Ok(())
}
Err(err) => {
eprintln!("\nError running script: {}", err);
Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)))
}
}
}
// Register timestamp helper functions with the engine
fn register_timestamp_helpers(engine: &mut Engine) {
use chrono::{TimeZone, Utc};
// Function to get current timestamp
engine.register_fn("timestamp_now", || Utc::now().timestamp() as i64);
// Function to format a timestamp
engine.register_fn("format_timestamp", |ts: i64| {
let dt = Utc
.timestamp_opt(ts, 0)
.single()
.expect("Invalid timestamp");
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
println!("Timestamp helper functions registered successfully.");
}

View File

@@ -0,0 +1,202 @@
// finance_script.rhai
// Example Rhai script for working with Finance models
// Constants for AssetType
const NATIVE = "Native";
const ERC20 = "Erc20";
const ERC721 = "Erc721";
const ERC1155 = "Erc1155";
// Constants for ListingStatus
const ACTIVE = "Active";
const SOLD = "Sold";
const CANCELLED = "Cancelled";
const EXPIRED = "Expired";
// Constants for ListingType
const FIXED_PRICE = "FixedPrice";
const AUCTION = "Auction";
const EXCHANGE = "Exchange";
// Constants for BidStatus
const BID_ACTIVE = "Active";
const BID_ACCEPTED = "Accepted";
const BID_REJECTED = "Rejected";
const BID_CANCELLED = "Cancelled";
// Create a new account using builder pattern
let alice_account = new_account()
.name("Alice's Account")
.user_id(101)
.description("Alice's primary trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
print(`Created account: ${alice_account.get_name()} (User ID: ${alice_account.get_user_id()})`);
// Save the account to the database
let saved_alice = set_account(alice_account);
print(`Account saved to database with ID: ${saved_alice.get_id()}`);
// Create a new asset using builder pattern
let token_asset = new_asset()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(ERC20)
.decimals(18);
print(`Created asset: ${token_asset.get_name()} (${token_asset.get_amount()} ${token_asset.get_asset_type()})`);
// Save the asset to the database
let saved_token = set_asset(token_asset);
print(`Asset saved to database with ID: ${saved_token.get_id()}`);
// Add the asset to Alice's account
saved_alice = saved_alice.add_asset(saved_token.get_id());
saved_alice = set_account(saved_alice);
print(`Added asset ${saved_token.get_name()} to ${saved_alice.get_name()}`);
// Create a new NFT asset
let nft_asset = new_asset()
.name("Herocode #42")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(ERC721)
.decimals(0);
// Save the NFT to the database
let saved_nft = set_asset(nft_asset);
print(`NFT saved to database with ID: ${saved_nft.get_id()}`);
// Create Bob's account
let bob_account = new_account()
.name("Bob's Account")
.user_id(102)
.description("Bob's trading account")
.ledger("ethereum")
.address("0xfedcba0987654321fedcba0987654321fedcba09")
.pubkey("0x654321fedcba0987654321fedcba0987654321fe");
// Save Bob's account
let saved_bob = set_account(bob_account);
print(`Created and saved Bob's account with ID: ${saved_bob.get_id()}`);
// Create a listing for the NFT
let nft_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_nft.get_id())
.price(0.5)
.currency("ETH")
.listing_type(AUCTION)
.title("Rare Herocode NFT")
.description("One of a kind digital collectible")
.image_url("https://example.com/nft/42.png")
.expires_at(timestamp_now() + 86400) // 24 hours from now
.add_tag("rare")
.add_tag("collectible")
.add_tag("digital art")
.set_listing();
// Save the listing
print(`Created listing: ${nft_listing.get_title()} (ID: ${nft_listing.get_id()})`);
print(`Listing status: ${nft_listing.get_status()}, Type: ${nft_listing.get_listing_type()}`);
print(`Listing price: ${nft_listing.get_price()} ${nft_listing.get_currency()}`);
// Create a bid from Bob
let bob_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_bob.get_id())
.amount(1.5)
.currency("ETH")
.set_bid();
// Save the bid
print(`Created bid from ${saved_bob.get_name()} for ${bob_bid.get_amount()} ${bob_bid.get_currency()}`);
// Add the bid to the listing
nft_listing.add_bid(bob_bid);
nft_listing.set_listing();
print(`Added bid to listing ${nft_listing.get_title()}`);
// Create another bid with higher amount
let charlie_account = new_account()
.name("Charlie's Account")
.user_id(103)
.description("Charlie's trading account")
.ledger("ethereum")
.address("0x1122334455667788991122334455667788990011")
.pubkey("0x8877665544332211887766554433221188776655");
let saved_charlie = set_account(charlie_account);
print(`Created and saved Charlie's account with ID: ${saved_charlie.get_id()}`);
let charlie_bid = new_bid()
.listing_id(nft_listing.get_id().to_string())
.bidder_id(saved_charlie.get_id())
.amount(2.5)
.currency("ETH")
.set_bid();
print(`Created higher bid from ${saved_charlie.get_name()} for ${charlie_bid.get_amount()} ${charlie_bid.get_currency()}`);
// Add the higher bid to the listing
nft_listing.add_bid(charlie_bid)
.set_listing();
print(`Added higher bid to listing ${nft_listing.get_title()}`);
nft_listing.sale_price(2.5)
.set_listing();
// Complete the sale to the highest bidder (Charlie)
nft_listing.complete_sale(saved_charlie.get_id())
.set_listing();
print(`Completed sale of ${nft_listing.get_title()} to ${saved_charlie.get_name()}`);
print(`New listing status: ${saved_listing.get_status()}`);
// Retrieve the listing from the database
let retrieved_listing = get_listing_by_id(saved_listing.get_id());
print(`Retrieved listing: ${retrieved_listing.get_title()} (Status: ${retrieved_listing.get_status()})`);
// Create a fixed price listing
let token_listing = new_listing()
.seller_id(saved_alice.get_id())
.asset_id(saved_token.get_id())
.price(100.0)
.currency("USDC")
.listing_type(FIXED_PRICE)
.title("HERO Tokens for Sale")
.description("100 HERO tokens at fixed price")
.set_listing();
// Save the fixed price listing
print(`Created fixed price listing: ${token_listing.get_title()} (ID: ${token_listing.get_id()})`);
// Cancel the listing
token_listing.cancel();
token_listing.set_listing();
print(`Cancelled listing: ${token_listing.get_title()}`);
print(`Listing status: ${token_listing.get_status()}`);
// Print summary of all accounts
print("\nAccount Summary:");
print(`Alice (ID: ${saved_alice.get_id()}): ${saved_alice.get_assets().len()} assets`);
print(`Bob (ID: ${saved_bob.get_id()}): ${saved_bob.get_assets().len()} assets`);
print(`Charlie (ID: ${saved_charlie.get_id()}): ${saved_charlie.get_assets().len()} assets`);
// Print summary of all listings
print("\nListing Summary:");
print(`NFT Auction (ID: ${nft_listing.get_id()}): ${nft_listing.get_status()}`);
print(`Token Sale (ID: ${token_listing.get_id()}): ${token_listing.get_status()}`);
// Print summary of all bids
print("\nBid Summary:");
print(`Bob's bid: ${bob_bid.get_amount()} ${bob_bid.get_currency()} (Status: ${bob_bid.get_status()})`);
print(`Charlie's bid: ${charlie_bid.get_amount()} ${charlie_bid.get_currency()} (Status: ${charlie_bid.get_status()})`);

View File

@@ -0,0 +1,111 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with finance data
pub fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, mut updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
updated_account = updated_account.add_asset(token_id);
updated_account = updated_account.add_asset(nft_id);
// Update the account in the database
let (_, final_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&updated_account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("https://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
final_account.name,
final_account.get_id()
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name,
updated_token.get_id()
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name,
updated_nft.get_id()
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title,
updated_listing.get_id()
);
}

View File

@@ -0,0 +1,162 @@
use engine::mock_db::create_mock_db;
use engine::{create_heromodels_engine, eval_file};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use rhai::Scope;
use std::path::Path;
mod mock;
use mock::seed_flow_data;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Flow Rhai Example");
println!("=================");
// Create a mock database
let db = create_mock_db();
// Seed the database with initial data
seed_flow_data(db.clone());
// Create the Rhai engine with all modules registered
let engine = create_heromodels_engine(db.clone());
// Get the path to the script
let script_path = Path::new(file!())
.parent()
.unwrap()
.join("flow_script.rhai");
println!("\nRunning script: {}", script_path.display());
println!("---------------------");
// Run the script
match eval_file(&engine, &script_path.to_string_lossy()) {
Ok(result) => {
if !result.is_unit() {
println!("\nScript returned: {:?}", result);
}
println!("\nScript executed successfully!");
}
Err(err) => {
eprintln!("\nError running script: {}", err);
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)));
}
}
// Demonstrate direct Rust interaction with the Rhai-exposed flow functionality
println!("\nDirect Rust interaction with Rhai-exposed flow functionality");
println!("----------------------------------------------------------");
// Create a new scope
let mut scope = Scope::new();
// Create a new flow using the Rhai function
let result = engine.eval::<Flow>("new_flow(0, \"Direct Rust Flow\")");
match result {
Ok(mut flow) => {
println!(
"Created flow from Rust: {} (ID: {})",
flow.name,
flow.get_id()
);
// Set flow status using the builder pattern
flow = flow.status("active".to_string());
println!("Set flow status to: {}", flow.status);
// Create a new flow step using the Rhai function
let result = engine.eval::<FlowStep>("new_flow_step(0, 1)");
match result {
Ok(mut step) => {
println!(
"Created flow step from Rust: Step Order {} (ID: {})",
step.step_order,
step.get_id()
);
// Set step description
step = step.description("Direct Rust Step".to_string());
println!(
"Set step description to: {}",
step.description
.clone()
.unwrap_or_else(|| "None".to_string())
);
// Create a signature requirement using the Rhai function
let result = engine.eval::<SignatureRequirement>(
"new_signature_requirement(0, 1, \"Direct Rust Signer\", \"Please sign this document\")"
);
match result {
Ok(req) => {
println!(
"Created signature requirement from Rust: Public Key {} (ID: {})",
req.public_key,
req.get_id()
);
// Add the step to the flow using the builder pattern
flow = flow.add_step(step);
println!(
"Added step to flow. Flow now has {} steps",
flow.steps.len()
);
// Save the flow to the database using the Rhai function
let save_flow_script = "fn save_it(f) { return db::save_flow(f); }";
let save_flow_ast = engine.compile(save_flow_script).unwrap();
let result = engine.call_fn::<Flow>(
&mut scope,
&save_flow_ast,
"save_it",
(flow,),
);
match result {
Ok(saved_flow) => {
println!(
"Saved flow to database with ID: {}",
saved_flow.get_id()
);
}
Err(err) => eprintln!("Error saving flow: {}", err),
}
// Save the signature requirement to the database using the Rhai function
let save_req_script =
"fn save_it(r) { return db::save_signature_requirement(r); }";
let save_req_ast = engine.compile(save_req_script).unwrap();
let result = engine.call_fn::<SignatureRequirement>(
&mut scope,
&save_req_ast,
"save_it",
(req,),
);
match result {
Ok(saved_req) => {
println!(
"Saved signature requirement to database with ID: {}",
saved_req.get_id()
);
}
Err(err) => {
eprintln!("Error saving signature requirement: {}", err)
}
}
}
Err(err) => eprintln!("Error creating signature requirement: {}", err),
}
}
Err(err) => eprintln!("Error creating flow step: {}", err),
}
}
Err(err) => eprintln!("Error creating flow: {}", err),
}
Ok(())
}

View File

@@ -0,0 +1,111 @@
// flow_script.rhai
// Example Rhai script for working with Flow models
// Constants for Flow status
const STATUS_DRAFT = "draft";
const STATUS_ACTIVE = "active";
const STATUS_COMPLETED = "completed";
const STATUS_CANCELLED = "cancelled";
// Create a new flow using builder pattern
let my_flow = new_flow(0, "flow-123");
name(my_flow, "Document Approval Flow");
status(my_flow, STATUS_DRAFT);
print(`Created flow: ${get_flow_name(my_flow)} (ID: ${get_flow_id(my_flow)})`);
print(`Status: ${get_flow_status(my_flow)}`);
// Create flow steps using builder pattern
let step1 = new_flow_step(0, 1);
description(step1, "Initial review by legal team");
status(step1, STATUS_DRAFT);
let step2 = new_flow_step(0, 2);
description(step2, "Approval by department head");
status(step2, STATUS_DRAFT);
let step3 = new_flow_step(0, 3);
description(step3, "Final signature by CEO");
status(step3, STATUS_DRAFT);
// Create signature requirements using builder pattern
let req1 = new_signature_requirement(0, get_flow_step_id(step1), "legal@example.com", "Please review this document");
signed_by(req1, "Legal Team");
status(req1, STATUS_DRAFT);
let req2 = new_signature_requirement(0, get_flow_step_id(step2), "dept@example.com", "Department approval needed");
signed_by(req2, "Department Head");
status(req2, STATUS_DRAFT);
let req3 = new_signature_requirement(0, get_flow_step_id(step3), "ceo@example.com", "Final approval required");
signed_by(req3, "CEO");
status(req3, STATUS_DRAFT);
print(`Created flow steps with signature requirements`);
// Add steps to the flow
let flow_with_steps = my_flow;
add_step(flow_with_steps, step1);
add_step(flow_with_steps, step2);
add_step(flow_with_steps, step3);
print(`Added steps to flow. Flow now has ${get_flow_steps(flow_with_steps).len()} steps`);
// Activate the flow
let active_flow = flow_with_steps;
status(active_flow, STATUS_ACTIVE);
print(`Updated flow status to: ${get_flow_status(active_flow)}`);
// Save the flow to the database
let saved_flow = db::save_flow(active_flow);
print(`Flow saved to database with ID: ${get_flow_id(saved_flow)}`);
// Save signature requirements to the database
let saved_req1 = db::save_signature_requirement(req1);
let saved_req2 = db::save_signature_requirement(req2);
let saved_req3 = db::save_signature_requirement(req3);
print(`Signature requirements saved to database with IDs: ${get_signature_requirement_id(saved_req1)}, ${get_signature_requirement_id(saved_req2)}, ${get_signature_requirement_id(saved_req3)}`);
// Retrieve the flow from the database
let retrieved_flow = db::get_flow_by_id(get_flow_id(saved_flow));
print(`Retrieved flow: ${get_flow_name(retrieved_flow)}`);
print(`It has ${get_flow_steps(retrieved_flow).len()} steps`);
// Complete the flow
let completed_flow = retrieved_flow;
status(completed_flow, STATUS_COMPLETED);
print(`Updated retrieved flow status to: ${get_flow_status(completed_flow)}`);
// Save the updated flow
db::save_flow(completed_flow);
print("Updated flow saved to database");
// List all flows in the database
let all_flows = db::list_flows();
print("\nListing all flows in database:");
let flow_count = 0;
for flow in all_flows {
print(` - Flow: ${get_flow_name(flow)} (ID: ${get_flow_id(flow)})`);
flow_count += 1;
}
print(`Total flows: ${flow_count}`);
// List all signature requirements
let all_reqs = db::list_signature_requirements();
print("\nListing all signature requirements in database:");
let req_count = 0;
for req in all_reqs {
print(` - Requirement for step ${get_signature_requirement_flow_step_id(req)} (ID: ${get_signature_requirement_id(req)})`);
req_count += 1;
}
print(`Total signature requirements: ${req_count}`);
// Clean up - delete the flow
db::delete_flow(get_flow_id(completed_flow));
print(`Deleted flow with ID: ${get_flow_id(completed_flow)}`);
// Clean up - delete signature requirements
db::delete_signature_requirement(get_signature_requirement_id(saved_req1));
db::delete_signature_requirement(get_signature_requirement_id(saved_req2));
db::delete_signature_requirement(get_signature_requirement_id(saved_req3));
print("Deleted all signature requirements");

View File

@@ -0,0 +1,65 @@
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db};
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
use heromodels_core::Model;
use std::sync::Arc;
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
pub fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let flow = Flow::new(None, "Onboarding Flow".to_string())
.description("New employee onboarding process".to_string())
.status("active".to_string());
// Create a signature requirement first
let sig_req = SignatureRequirement::new(
None,
1,
"hr_manager_pubkey".to_string(),
"Please sign the employment contract".to_string(),
);
let (sig_req_id, saved_sig_req) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&sig_req)
.expect("Failed to store signature requirement");
// Create a flow step and add the signature requirement
let step = FlowStep::new(None, 1)
.description("Complete HR paperwork".to_string())
.add_signature_requirement(sig_req_id);
let (step_id, saved_step) = db
.collection::<FlowStep>()
.expect("Failed to get FlowStep collection")
.set(&step)
.expect("Failed to store flow step");
// Add the step to the flow
let flow_with_step = flow.add_step(step_id);
// Store the flow
let (_flow_id, saved_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow_with_step)
.expect("Failed to store flow");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
saved_flow.name,
saved_flow.get_id()
);
println!(
" - Added step with order: {} (ID: {})",
saved_step.step_order,
saved_step.get_id()
);
println!(
" - Added signature requirement for: {} (ID: {})",
saved_sig_req.public_key,
saved_sig_req.get_id()
);
}

305
core/engine/src/lib.rs Normal file
View File

@@ -0,0 +1,305 @@
//! # Rhailib Engine
//!
//! The central Rhai scripting engine for the heromodels ecosystem. This crate provides
//! a unified interface for creating, configuring, and executing Rhai scripts with access
//! to all business domain modules.
//!
//! ## Features
//!
//! - **Unified Engine Creation**: Pre-configured Rhai engine with all DSL modules
//! - **Script Execution Utilities**: Direct evaluation, file-based execution, and AST compilation
//! - **Mock Database System**: Complete testing environment with seeded data
//! - **Feature-Based Architecture**: Modular compilation based on required domains
//!
//! ## Quick Start
//!
//! ```rust
//! use rhailib_engine::{create_heromodels_engine, eval_script};
//!
//! // Create a fully configured engine
//! let engine = create_heromodels_engine();
//!
//! // Execute a business logic script
//! let result = eval_script(&engine, r#"
//! let company = new_company()
//! .name("Acme Corp")
//! .business_type("global");
//! company.name
//! "#)?;
//!
//! println!("Company name: {}", result.as_string().unwrap());
//! ```
//!
//! ## Available Features
//!
//! - `calendar` (default): Calendar and event management
//! - `finance` (default): Financial accounts, assets, and marketplace
//! - `flow`: Workflow and approval processes
//! - `legal`: Contract and legal document management
//! - `projects`: Project and task management
//! - `biz`: Business operations and entities
use rhai::{Engine, EvalAltResult, Scope, AST};
use rhailib_dsl;
use std::fs;
use std::path::Path;
/// Mock database module for testing and examples
pub mod mock_db;
/// Creates a fully configured Rhai engine with all available DSL modules.
///
/// This function creates a new Rhai engine instance, configures it with appropriate
/// limits and settings, and registers all available business domain modules based
/// on enabled features.
///
/// # Engine Configuration
///
/// The engine is configured with the following limits:
/// - **Expression Depth**: 128 levels for both expressions and functions
/// - **String Size**: 10 MB maximum
/// - **Array Size**: 10,000 elements maximum
/// - **Map Size**: 10,000 key-value pairs maximum
///
/// # Registered Modules
///
/// All enabled DSL modules are automatically registered, including:
/// - Business operations (companies, products, sales, shareholders)
/// - Financial models (accounts, assets, marketplace)
/// - Content management (collections, images, PDFs, books)
/// - Workflow management (flows, steps, signatures)
/// - And more based on enabled features
///
/// # Returns
///
/// A fully configured `Engine` instance ready for script execution.
///
/// # Example
///
/// ```rust
/// use rhailib_engine::create_heromodels_engine;
///
/// let engine = create_heromodels_engine();
///
/// // Engine is now ready to execute scripts with access to all DSL functions
/// let result = engine.eval::<String>(r#"
/// let company = new_company().name("Test Corp");
/// company.name
/// "#).unwrap();
/// assert_eq!(result, "Test Corp");
/// ```
pub fn create_heromodels_engine() -> Engine {
let mut engine = Engine::new();
// Configure engine settings
engine.set_max_expr_depths(128, 128);
engine.set_max_string_size(10 * 1024 * 1024); // 10 MB
engine.set_max_array_size(10 * 1024); // 10K elements
engine.set_max_map_size(10 * 1024); // 10K elements
// Register all heromodels Rhai modules
rhailib_dsl::register_dsl_modules(&mut engine);
engine
}
// /// Register all heromodels Rhai modules with the engine
// pub fn register_all_modules(engine: &mut Engine, db: Arc<OurDB>) {
// // Register the calendar module if the feature is enabled
// heromodels::models::access::register_access_rhai_module(engine, db.clone());
// #[cfg(feature = "calendar")]
// heromodels::models::calendar::register_calendar_rhai_module(engine, db.clone());
// heromodels::models::contact::register_contact_rhai_module(engine, db.clone());
// heromodels::models::library::register_library_rhai_module(engine, db.clone());
// heromodels::models::circle::register_circle_rhai_module(engine, db.clone());
// // Register the flow module if the feature is enabled
// #[cfg(feature = "flow")]
// heromodels::models::flow::register_flow_rhai_module(engine, db.clone());
// // // Register the finance module if the feature is enabled
// // #[cfg(feature = "finance")]
// // heromodels::models::finance::register_finance_rhai_module(engine, db.clone());
// // Register the legal module if the feature is enabled
// #[cfg(feature = "legal")]
// heromodels::models::legal::register_legal_rhai_module(engine, db.clone());
// // Register the projects module if the feature is enabled
// #[cfg(feature = "projects")]
// heromodels::models::projects::register_projects_rhai_module(engine, db.clone());
// // Register the biz module if the feature is enabled
// #[cfg(feature = "biz")]
// heromodels::models::biz::register_biz_rhai_module(engine, db.clone());
// println!("Heromodels Rhai modules registered successfully.");
// }
/// Evaluates a Rhai script string and returns the result.
///
/// This function provides a convenient way to execute Rhai script strings directly
/// using the provided engine. It's suitable for one-off script execution or when
/// the script content is dynamically generated.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for script execution
/// * `script` - The Rhai script content as a string
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - Script compilation or execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_script};
///
/// let engine = create_heromodels_engine();
/// let result = eval_script(&engine, r#"
/// let x = 42;
/// let y = 8;
/// x + y
/// "#)?;
/// assert_eq!(result.as_int().unwrap(), 50);
/// ```
pub fn eval_script(
engine: &Engine,
script: &str,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
engine.eval::<rhai::Dynamic>(script)
}
/// Evaluates a Rhai script from a file and returns the result.
///
/// This function reads a Rhai script from the filesystem and executes it using
/// the provided engine. It handles file reading errors gracefully and provides
/// meaningful error messages.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for script execution
/// * `file_path` - Path to the Rhai script file
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - File reading, compilation, or execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, eval_file};
/// use std::path::Path;
///
/// let engine = create_heromodels_engine();
/// let result = eval_file(&engine, Path::new("scripts/business_logic.rhai"))?;
/// println!("Script result: {:?}", result);
/// ```
///
/// # Error Handling
///
/// File reading errors are converted to Rhai `ErrorSystem` variants with
/// descriptive messages including the file path that failed to load.
pub fn eval_file(
engine: &Engine,
file_path: &Path,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
match fs::read_to_string(file_path) {
Ok(script_content) => engine.eval::<rhai::Dynamic>(&script_content),
Err(io_err) => Err(Box::new(EvalAltResult::ErrorSystem(
format!("Failed to read script file: {}", file_path.display()),
Box::new(io_err),
))),
}
}
/// Compiles a Rhai script string into an Abstract Syntax Tree (AST).
///
/// This function compiles a Rhai script into an AST that can be executed multiple
/// times with different scopes. This is more efficient than re-parsing the script
/// for each execution when the same script needs to be run repeatedly.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for compilation
/// * `script` - The Rhai script content as a string
///
/// # Returns
///
/// * `Ok(AST)` - The compiled Abstract Syntax Tree
/// * `Err(Box<EvalAltResult>)` - Script compilation error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope;
///
/// let engine = create_heromodels_engine();
/// let ast = compile_script(&engine, r#"
/// let company = new_company().name(company_name);
/// save_company(company)
/// "#)?;
///
/// // Execute the compiled script multiple times with different variables
/// let mut scope1 = Scope::new();
/// scope1.push("company_name", "Acme Corp");
/// let result1 = run_ast(&engine, &ast, &mut scope1)?;
///
/// let mut scope2 = Scope::new();
/// scope2.push("company_name", "Tech Startup");
/// let result2 = run_ast(&engine, &ast, &mut scope2)?;
/// ```
pub fn compile_script(engine: &Engine, script: &str) -> Result<AST, Box<rhai::EvalAltResult>> {
Ok(engine.compile(script)?)
}
/// Executes a compiled Rhai script AST with the provided scope.
///
/// This function runs a pre-compiled AST using the provided engine and scope.
/// The scope can contain variables and functions that will be available to
/// the script during execution.
///
/// # Arguments
///
/// * `engine` - The Rhai engine to use for execution
/// * `ast` - The compiled Abstract Syntax Tree to execute
/// * `scope` - Mutable scope containing variables and functions for the script
///
/// # Returns
///
/// * `Ok(Dynamic)` - The result of script execution
/// * `Err(Box<EvalAltResult>)` - Script execution error
///
/// # Example
///
/// ```rust
/// use rhailib_engine::{create_heromodels_engine, compile_script, run_ast};
/// use rhai::Scope;
///
/// let engine = create_heromodels_engine();
/// let ast = compile_script(&engine, "x + y")?;
///
/// let mut scope = Scope::new();
/// scope.push("x", 10_i64);
/// scope.push("y", 32_i64);
///
/// let result = run_ast(&engine, &ast, &mut scope)?;
/// assert_eq!(result.as_int().unwrap(), 42);
/// ```
///
/// # Performance Notes
///
/// Using compiled ASTs is significantly more efficient than re-parsing scripts
/// for repeated execution, especially for complex scripts or when executing
/// the same logic with different input parameters.
pub fn run_ast(
engine: &Engine,
ast: &AST,
scope: &mut Scope,
) -> Result<rhai::Dynamic, Box<rhai::EvalAltResult>> {
engine.eval_ast_with_scope(scope, ast)
}

374
core/engine/src/mock_db.rs Normal file
View File

@@ -0,0 +1,374 @@
use chrono::Utc;
use heromodels::db::hero::OurDB;
use heromodels::db::{Collection, Db}; // Import both Db and Collection traits
use heromodels::models::calendar::{Calendar, Event};
use heromodels_core::Model; // Import Model trait to use build method
use std::env;
use std::sync::Arc;
// Import finance models
use heromodels::models::finance::account::Account;
use heromodels::models::finance::asset::{Asset, AssetType};
use heromodels::models::finance::marketplace::{Listing, ListingType};
// Conditionally import other modules based on features
#[cfg(feature = "flow")]
use heromodels::models::flow::{Flow, FlowStep, SignatureRequirement};
#[cfg(feature = "legal")]
use heromodels::models::legal::{
Contract, ContractRevision, ContractSigner, ContractStatus, SignerStatus,
};
#[cfg(feature = "projects")]
use heromodels::models::projects::{ItemType, Priority, Project, Status as ProjectStatus};
/// Create a mock in-memory database for examples
pub fn create_mock_db() -> Arc<OurDB> {
// Create a temporary directory for the database files
let temp_dir = env::temp_dir().join("engine_examples");
std::fs::create_dir_all(&temp_dir).expect("Failed to create temp directory");
// Create a new OurDB instance with reset=true to ensure it's clean
let db = OurDB::new(temp_dir, true).expect("Failed to create OurDB instance");
Arc::new(db)
}
/// Seed the mock database with some initial data for all modules
pub fn seed_mock_db(db: Arc<OurDB>) {
// Seed calendar data
seed_calendar_data(db.clone());
// Seed finance data
seed_finance_data(db.clone());
// Seed flow data if the feature is enabled
#[cfg(feature = "flow")]
seed_flow_data(db.clone());
// Seed legal data if the feature is enabled
#[cfg(feature = "legal")]
seed_legal_data(db.clone());
// Seed projects data if the feature is enabled
#[cfg(feature = "projects")]
seed_projects_data(db.clone());
println!("Mock database seeded with initial data for all enabled modules.");
}
/// Seed the mock database with calendar data
fn seed_calendar_data(db: Arc<OurDB>) {
// Create a calendar
let mut calendar = Calendar::new(None, "Work Calendar".to_string());
calendar.description = Some("My work schedule".to_string());
// Store the calendar in the database
let (_calendar_id, _updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
// Create an event
let now = Utc::now().timestamp();
let end_time = now + 3600; // Add 1 hour in seconds
// Use the builder pattern for Event
let event = Event::new()
.title("Team Meeting".to_string())
.reschedule(now, end_time)
.location("Conference Room A".to_string())
.description("Weekly sync".to_string())
// .add_attendee(Attendee::new(1))
// .add_attendee(Attendee::new(2))
.build();
// // Add attendees to the event using the builder pattern
// let attendee1 = Attendee::new(1);
// let attendee2 = Attendee::new(2);
// // Add attendees using the builder pattern
// event = event.add_attendee(attendee1);
// event = event.add_attendee(attendee2);
// Call build and capture the returned value
// let event = event.build();
// Store the event in the database first to get its ID
let (event_id, updated_event) = db
.collection()
.expect("Failed to get Event collection")
.set(&event)
.expect("Failed to store event");
// Add the event ID to the calendar
calendar = calendar.add_event(event_id as i64);
// Store the calendar in the database
let (_calendar_id, updated_calendar) = db
.collection::<Calendar>()
.expect("Failed to get Calendar collection")
.set(&calendar)
.expect("Failed to store calendar");
println!("Mock database seeded with calendar data:");
println!(
" - Added calendar: {} (ID: {})",
updated_calendar.name, updated_calendar.base_data.id
);
println!(
" - Added event: {} (ID: {})",
updated_event.title, updated_event.base_data.id
);
}
/// Seed the mock database with flow data
#[cfg(feature = "flow")]
fn seed_flow_data(db: Arc<OurDB>) {
// Create a flow
let mut flow = Flow::new(0, "Document Approval".to_string());
// Set flow properties using the builder pattern
flow = flow.status("draft".to_string());
flow = flow.name("Document Approval Flow".to_string());
// Create flow steps
let mut step1 = FlowStep::new(0, 1);
step1 = step1.description("Initial review by legal team".to_string());
step1 = step1.status("pending".to_string());
let mut step2 = FlowStep::new(0, 2);
step2 = step2.description("Approval by department head".to_string());
step2 = step2.status("pending".to_string());
// Add signature requirements
let mut req1 = SignatureRequirement::new(
0,
1,
"Legal Team".to_string(),
"Please review this document".to_string(),
);
let mut req2 = SignatureRequirement::new(
0,
2,
"Department Head".to_string(),
"Please approve this document".to_string(),
);
// Add steps to flow
flow = flow.add_step(step1);
flow = flow.add_step(step2);
// Store in the database
let (_, updated_flow) = db
.collection::<Flow>()
.expect("Failed to get Flow collection")
.set(&flow)
.expect("Failed to store flow");
// Store signature requirements in the database
let (_, updated_req1) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req1)
.expect("Failed to store signature requirement");
let (_, updated_req2) = db
.collection::<SignatureRequirement>()
.expect("Failed to get SignatureRequirement collection")
.set(&req2)
.expect("Failed to store signature requirement");
println!("Mock database seeded with flow data:");
println!(
" - Added flow: {} (ID: {})",
updated_flow.name, updated_flow.base_data.id
);
println!(" - Added {} steps", updated_flow.steps.len());
println!(
" - Added signature requirements with IDs: {} and {}",
updated_req1.base_data.id, updated_req2.base_data.id
);
}
/// Seed the mock database with legal data
#[cfg(feature = "legal")]
fn seed_legal_data(db: Arc<OurDB>) {
// Create a contract
let mut contract = Contract::new(None, "Service Agreement".to_string());
contract.description = Some("Agreement for software development services".to_string());
contract.status = ContractStatus::Draft;
// Create a revision
let revision = ContractRevision::new(
None,
"Initial draft".to_string(),
"https://example.com/contract/v1".to_string(),
);
// Create signers
let signer1 = ContractSigner::new(None, 1, "Client".to_string());
let signer2 = ContractSigner::new(None, 2, "Provider".to_string());
// Add revision and signers to contract
contract.add_revision(revision);
contract.add_signer(signer1);
contract.add_signer(signer2);
// Store in the database
let (_, updated_contract) = db
.collection::<Contract>()
.expect("Failed to get Contract collection")
.set(&contract)
.expect("Failed to store contract");
println!("Mock database seeded with legal data:");
println!(
" - Added contract: {} (ID: {})",
updated_contract.name, updated_contract.base_data.id
);
println!(
" - Added {} revisions and {} signers",
updated_contract.revisions.len(),
updated_contract.signers.len()
);
}
/// Seed the mock database with projects data
#[cfg(feature = "projects")]
fn seed_projects_data(db: Arc<OurDB>) {
// Create a project
let mut project = Project::new(None, "Website Redesign".to_string());
project.description = Some("Redesign the company website".to_string());
project.status = ProjectStatus::InProgress;
project.priority = Priority::High;
// Add members and tags
project.add_member_id(1);
project.add_member_id(2);
project.add_tag("design".to_string());
project.add_tag("web".to_string());
// Store in the database
let (_, updated_project) = db
.collection::<Project>()
.expect("Failed to get Project collection")
.set(&project)
.expect("Failed to store project");
println!("Mock database seeded with projects data:");
println!(
" - Added project: {} (ID: {})",
updated_project.name, updated_project.base_data.id
);
println!(
" - Status: {}, Priority: {}",
updated_project.status, updated_project.priority
);
println!(
" - Added {} members and {} tags",
updated_project.member_ids.len(),
updated_project.tags.len()
);
}
/// Seed the mock database with finance data
fn seed_finance_data(db: Arc<OurDB>) {
// Create a user account
let mut account = Account::new()
.name("Demo Account")
.user_id(1)
.description("Demo trading account")
.ledger("ethereum")
.address("0x1234567890abcdef1234567890abcdef12345678")
.pubkey("0xabcdef1234567890abcdef1234567890abcdef12");
// Store the account in the database
let (account_id, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store account");
// Create an ERC20 token asset
let token_asset = Asset::new()
.name("HERO Token")
.description("Herocode governance token")
.amount(1000.0)
.address("0x9876543210abcdef9876543210abcdef98765432")
.asset_type(AssetType::Erc20)
.decimals(18);
// Store the token asset in the database
let (token_id, updated_token) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&token_asset)
.expect("Failed to store token asset");
// Create an NFT asset
let nft_asset = Asset::new()
.name("Herocode #1")
.description("Unique digital collectible")
.amount(1.0)
.address("0xabcdef1234567890abcdef1234567890abcdef12")
.asset_type(AssetType::Erc721)
.decimals(0);
// Store the NFT asset in the database
let (nft_id, updated_nft) = db
.collection::<Asset>()
.expect("Failed to get Asset collection")
.set(&nft_asset)
.expect("Failed to store NFT asset");
// Add assets to the account
account = updated_account.add_asset(token_id);
account = account.add_asset(nft_id);
// Update the account in the database
let (_, updated_account) = db
.collection::<Account>()
.expect("Failed to get Account collection")
.set(&account)
.expect("Failed to store updated account");
// Create a listing for the NFT
let listing = Listing::new()
.seller_id(account_id)
.asset_id(nft_id)
.price(0.5)
.currency("ETH")
.listing_type(ListingType::Auction)
.title("Rare Herocode NFT".to_string())
.description("One of a kind digital collectible".to_string())
.image_url(Some("hcttps://example.com/nft/1.png".to_string()))
.add_tag("rare".to_string())
.add_tag("collectible".to_string());
// Store the listing in the database
let (_listing_id, updated_listing) = db
.collection::<Listing>()
.expect("Failed to get Listing collection")
.set(&listing)
.expect("Failed to store listing");
println!("Mock database seeded with finance data:");
println!(
" - Added account: {} (ID: {})",
updated_account.name, updated_account.base_data.id
);
println!(
" - Added token asset: {} (ID: {})",
updated_token.name, updated_token.base_data.id
);
println!(
" - Added NFT asset: {} (ID: {})",
updated_nft.name, updated_nft.base_data.id
);
println!(
" - Added listing: {} (ID: {})",
updated_listing.title, updated_listing.base_data.id
);
}

20
core/examples/Cargo.toml Normal file
View File

@@ -0,0 +1,20 @@
[package]
name = "hero_examples"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "supervisor_worker_demo"
path = "supervisor_worker_demo.rs"
[dependencies]
hero_dispatcher = { path = "../dispatcher" }
hero_job = { path = "../job" }
tokio = { version = "1.0", features = ["full"] }
redis = { version = "0.25", features = ["tokio-comp"] }
serde_json = "1.0"
log = "0.4"
env_logger = "0.10"
colored = "2.0"
uuid = { version = "1.0", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }

View File

@@ -0,0 +1,365 @@
use colored::*;
use hero_dispatcher::{DispatcherBuilder, ScriptType, JobStatus};
use log::warn;
use std::process::Stdio;
use std::time::Duration;
use tokio::process::{Child, Command as TokioCommand};
use tokio::time::sleep;
/// Supervisor manages worker lifecycle and job execution
pub struct Supervisor {
dispatcher: hero_dispatcher::Dispatcher,
worker_processes: Vec<WorkerProcess>,
redis_url: String,
}
/// Represents a managed worker process
pub struct WorkerProcess {
id: String,
script_type: ScriptType,
process: Option<Child>,
binary_path: String,
}
impl Supervisor {
/// Create a new supervisor with dispatcher configuration
pub async fn new(redis_url: String) -> Result<Self, Box<dyn std::error::Error>> {
let dispatcher = DispatcherBuilder::new()
.caller_id("supervisor")
.context_id("demo-context")
.redis_url(&redis_url)
.heroscript_workers(vec!["hero-worker-1".to_string()])
.rhai_sal_workers(vec!["rhai-sal-worker-1".to_string()])
.rhai_dsl_workers(vec!["rhai-dsl-worker-1".to_string()])
.build()?;
Ok(Self {
dispatcher,
worker_processes: Vec::new(),
redis_url,
})
}
/// Start a worker for a specific script type
pub async fn start_worker(&mut self, script_type: ScriptType, worker_binary_path: &str) -> Result<(), Box<dyn std::error::Error>> {
let worker_id = match script_type {
ScriptType::HeroScript => "hero-worker-1",
ScriptType::RhaiSAL => "rhai-sal-worker-1",
ScriptType::RhaiDSL => "rhai-dsl-worker-1",
};
println!("{}", format!("🚀 Starting {} worker: {}", script_type.as_str(), worker_id).green().bold());
// Check if worker binary exists
if !std::path::Path::new(worker_binary_path).exists() {
return Err(format!("Worker binary not found at: {}", worker_binary_path).into());
}
// Start the worker process
let mut cmd = TokioCommand::new(worker_binary_path);
cmd.arg("--worker-id").arg(worker_id)
.arg("--redis-url").arg(&self.redis_url)
.arg("--no-timestamp")
.stdout(Stdio::piped())
.stderr(Stdio::piped());
let process = cmd.spawn()?;
let worker_process = WorkerProcess {
id: worker_id.to_string(),
script_type,
process: Some(process),
binary_path: worker_binary_path.to_string(),
};
self.worker_processes.push(worker_process);
// Give worker time to start up
sleep(Duration::from_millis(500)).await;
println!("{}", format!("✅ Worker {} started successfully", worker_id).green());
Ok(())
}
/// Stop all workers
pub async fn stop_all_workers(&mut self) {
println!("{}", "🛑 Stopping all workers...".yellow().bold());
for worker in &mut self.worker_processes {
if let Some(mut process) = worker.process.take() {
println!("Stopping worker: {}", worker.id);
// Try graceful shutdown first
if let Err(e) = process.kill().await {
warn!("Failed to kill worker {}: {}", worker.id, e);
}
// Wait for process to exit
if let Ok(status) = process.wait().await {
println!("Worker {} exited with status: {:?}", worker.id, status);
} else {
warn!("Failed to wait for worker {} to exit", worker.id);
}
}
}
self.worker_processes.clear();
println!("{}", "✅ All workers stopped".green());
}
/// Submit a job and return the job ID
pub async fn submit_job(&self, script_type: ScriptType, script: &str) -> Result<String, Box<dyn std::error::Error>> {
let job = self.dispatcher
.new_job()
.script_type(script_type.clone())
.script(script)
.timeout(Duration::from_secs(30))
.build()?;
let job_id = job.id.clone();
self.dispatcher.create_job(&job).await?;
println!("{}", format!("📝 Job {} submitted for {}", job_id, script_type.as_str()).cyan());
Ok(job_id)
}
/// Wait for job completion and return result
pub async fn wait_for_job_completion(&self, job_id: &str, timeout_duration: Duration) -> Result<String, Box<dyn std::error::Error>> {
let start_time = std::time::Instant::now();
println!("{}", format!("⏳ Waiting for job {} to complete...", job_id).yellow());
loop {
if start_time.elapsed() > timeout_duration {
return Err("Job execution timeout".into());
}
// Check job status using dispatcher methods
match self.dispatcher.get_job_status(job_id).await {
Ok(status) => {
match status {
JobStatus::Finished => {
if let Ok(Some(result)) = self.dispatcher.get_job_output(job_id).await {
println!("{}", format!("✅ Job {} completed successfully", job_id).green());
return Ok(result);
}
}
JobStatus::Error => {
return Err("Job failed".into());
}
_ => {
// Job still running or waiting
}
}
}
Err(_) => {
// Job not found or error checking status
}
}
sleep(Duration::from_millis(100)).await;
}
}
/// List all jobs
pub async fn list_jobs(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> {
self.dispatcher.list_jobs().await.map_err(|e| e.into())
}
/// Clear all jobs
pub async fn clear_all_jobs(&self) -> Result<usize, Box<dyn std::error::Error>> {
self.dispatcher.clear_all_jobs().await.map_err(|e| e.into())
}
/// Get worker status
pub fn get_worker_status(&self) -> Vec<(String, ScriptType, bool)> {
self.worker_processes.iter().map(|w| {
(w.id.clone(), w.script_type.clone(), w.process.is_some())
}).collect()
}
}
impl Drop for Supervisor {
fn drop(&mut self) {
// Ensure workers are stopped when supervisor is dropped
if !self.worker_processes.is_empty() {
println!("{}", "⚠️ Supervisor dropping - stopping remaining workers".yellow());
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::Builder::from_default_env()
.filter_level(log::LevelFilter::Info)
.format_timestamp(None)
.init();
println!("{}", "🎯 Hero Supervisor-Worker End-to-End Demo".blue().bold());
println!("{}", "==========================================".blue());
println!();
// Configuration
let redis_url = "redis://localhost:6379".to_string();
let worker_binary_path = "../../target/debug/worker";
// Check if worker binary exists
if !std::path::Path::new(worker_binary_path).exists() {
println!("{}", "❌ Worker binary not found!".red().bold());
println!("Please build the worker first:");
println!(" cd ../worker && cargo build");
return Err("Worker binary not found".into());
}
// Create supervisor
println!("{}", "🏗️ Creating supervisor...".cyan());
let mut supervisor = Supervisor::new(redis_url).await?;
println!("{}", "✅ Supervisor created successfully".green());
println!();
// Clear any existing jobs
let cleared_count = supervisor.clear_all_jobs().await?;
if cleared_count > 0 {
println!("{}", format!("🧹 Cleared {} existing jobs", cleared_count).yellow());
}
// Demo 1: Start a HeroScript worker
println!("{}", "📋 Demo 1: Starting HeroScript Worker".blue().bold());
println!("{}", "------------------------------------".blue());
supervisor.start_worker(ScriptType::HeroScript, worker_binary_path).await?;
// Show worker status
let worker_status = supervisor.get_worker_status();
println!("Active workers:");
for (id, script_type, active) in worker_status {
let status = if active { "🟢 Running" } else { "🔴 Stopped" };
println!(" {} - {} ({})", id, script_type.as_str(), status);
}
println!();
// Demo 2: Submit and execute a simple job
println!("{}", "📋 Demo 2: Submit and Execute Job".blue().bold());
println!("{}", "---------------------------------".blue());
let script = r#"
print("Hello from HeroScript worker!");
let result = 42 + 8;
print("Calculation: 42 + 8 = " + result);
result
"#;
let job_id = supervisor.submit_job(ScriptType::HeroScript, script).await?;
// Wait for job completion
match supervisor.wait_for_job_completion(&job_id, Duration::from_secs(10)).await {
Ok(result) => {
println!("{}", format!("🎉 Job result: {}", result).green().bold());
}
Err(e) => {
println!("{}", format!("❌ Job failed: {}", e).red());
}
}
println!();
// Demo 3: Submit multiple jobs
println!("{}", "📋 Demo 3: Multiple Jobs".blue().bold());
println!("{}", "------------------------".blue());
let jobs = vec![
("Job 1", r#"print("Job 1 executing"); "job1_result""#),
("Job 2", r#"print("Job 2 executing"); 100 + 200"#),
("Job 3", r#"print("Job 3 executing"); "hello_world""#),
];
let mut job_ids = Vec::new();
for (name, script) in jobs {
let job_id = supervisor.submit_job(ScriptType::HeroScript, script).await?;
job_ids.push((name, job_id));
println!("{} submitted: {}", name, job_ids.last().unwrap().1);
}
// Wait for all jobs to complete
for (name, job_id) in job_ids {
match supervisor.wait_for_job_completion(&job_id, Duration::from_secs(5)).await {
Ok(result) => {
println!("{} completed: {}", name, result);
}
Err(e) => {
println!("{} failed: {}", name, e);
}
}
}
println!();
// Demo 4: Job management
println!("{}", "📋 Demo 4: Job Management".blue().bold());
println!("{}", "-------------------------".blue());
let all_jobs = supervisor.list_jobs().await?;
println!("Total jobs in system: {}", all_jobs.len());
if !all_jobs.is_empty() {
println!("Job IDs:");
for (i, job_id) in all_jobs.iter().enumerate() {
println!(" {}. {}", i + 1, job_id);
}
}
println!();
// Demo 5: Error handling
println!("{}", "📋 Demo 5: Error Handling".blue().bold());
println!("{}", "-------------------------".blue());
let error_script = r#"
print("This job will cause an error");
let x = undefined_variable; // This will cause an error
x
"#;
let error_job_id = supervisor.submit_job(ScriptType::HeroScript, error_script).await?;
match supervisor.wait_for_job_completion(&error_job_id, Duration::from_secs(5)).await {
Ok(result) => {
println!("Unexpected success: {}", result);
}
Err(e) => {
println!("{}", format!("Expected error handled: {}", e).yellow());
}
}
println!();
// Demo 6: Cleanup
println!("{}", "📋 Demo 6: Cleanup".blue().bold());
println!("{}", "-------------------".blue());
let final_job_count = supervisor.list_jobs().await?.len();
println!("Jobs before cleanup: {}", final_job_count);
let cleared = supervisor.clear_all_jobs().await?;
println!("Jobs cleared: {}", cleared);
let remaining_jobs = supervisor.list_jobs().await?.len();
println!("Jobs after cleanup: {}", remaining_jobs);
println!();
// Stop all workers
supervisor.stop_all_workers().await;
println!("{}", "🎉 Demo completed successfully!".green().bold());
println!();
println!("{}", "Key Features Demonstrated:".blue().bold());
println!(" ✅ Supervisor lifecycle management");
println!(" ✅ Worker process spawning and management");
println!(" ✅ Job submission and execution");
println!(" ✅ Real-time job monitoring");
println!(" ✅ Multiple job handling");
println!(" ✅ Error handling and recovery");
println!(" ✅ Resource cleanup");
println!();
println!("{}", "The supervisor successfully managed the complete worker lifecycle!".green());
Ok(())
}

1
core/job/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

14
core/job/Cargo.toml Normal file
View File

@@ -0,0 +1,14 @@
[package]
name = "hero_job"
version = "0.1.0"
edition = "2021"
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
redis = { version = "0.25", features = ["tokio-comp"] }
tokio = { version = "1.0", features = ["full"] }
uuid = { version = "1.0", features = ["v4", "serde"] }
log = "0.4"
thiserror = "1.0"

381
core/job/src/lib.rs Normal file
View File

@@ -0,0 +1,381 @@
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use uuid::Uuid;
use redis::AsyncCommands;
use thiserror::Error;
/// Redis namespace prefix for all Hero job-related keys
pub const NAMESPACE_PREFIX: &str = "hero:job:";
/// Script type enumeration for different script engines
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ScriptType {
/// HeroScript - Hero's native scripting language
HeroScript,
/// Rhai SAL - Rhai Script Abstraction Layer
RhaiSAL,
/// Rhai DSL - Rhai Domain Specific Language
RhaiDSL,
}
impl ScriptType {
/// Get the worker queue suffix for this script type
pub fn worker_queue_suffix(&self) -> &'static str {
match self {
ScriptType::HeroScript => "heroscript",
ScriptType::RhaiSAL => "rhai_sal",
ScriptType::RhaiDSL => "rhai_dsl",
}
}
pub fn as_str(&self) -> &'static str {
match self {
ScriptType::HeroScript => "heroscript",
ScriptType::RhaiSAL => "rhai_sal",
ScriptType::RhaiDSL => "rhai_dsl",
}
}
}
/// Job status enumeration
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum JobStatus {
Dispatched,
WaitingForPrerequisites,
Started,
Error,
Finished,
}
impl JobStatus {
pub fn as_str(&self) -> &'static str {
match self {
JobStatus::Dispatched => "dispatched",
JobStatus::WaitingForPrerequisites => "waiting_for_prerequisites",
JobStatus::Started => "started",
JobStatus::Error => "error",
JobStatus::Finished => "finished",
}
}
pub fn from_str(s: &str) -> Option<Self> {
match s {
"dispatched" => Some(JobStatus::Dispatched),
"waiting_for_prerequisites" => Some(JobStatus::WaitingForPrerequisites),
"started" => Some(JobStatus::Started),
"error" => Some(JobStatus::Error),
"finished" => Some(JobStatus::Finished),
_ => None,
}
}
}
/// Representation of a script execution request.
///
/// This structure contains all the information needed to execute a script
/// on a worker service, including the script content, dependencies, and metadata.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Job {
pub id: String,
pub caller_id: String,
pub context_id: String,
pub script: String,
pub script_type: ScriptType,
pub timeout: Duration,
pub retries: u8, // retries on script execution
pub concurrent: bool, // whether to execute script in separate thread
pub log_path: Option<String>, // path to write logs of script execution to
pub env_vars: HashMap<String, String>, // environment variables for script execution
pub prerequisites: Vec<String>, // job IDs that must complete before this job can run
pub dependents: Vec<String>, // job IDs that depend on this job completing
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
/// Error types for job operations
#[derive(Error, Debug)]
pub enum JobError {
#[error("Redis error: {0}")]
RedisError(#[from] redis::RedisError),
#[error("Serialization error: {0}")]
SerializationError(#[from] serde_json::Error),
#[error("Job not found: {0}")]
JobNotFound(String),
#[error("Invalid job data: {0}")]
InvalidJobData(String),
#[error("Missing required field: {0}")]
MissingField(String),
}
impl Job {
/// Create a new job with the given parameters
pub fn new(
caller_id: String,
context_id: String,
script: String,
script_type: ScriptType,
) -> Self {
let now = Utc::now();
Self {
id: Uuid::new_v4().to_string(),
caller_id,
context_id,
script,
script_type,
timeout: Duration::from_secs(30),
retries: 0,
concurrent: false,
log_path: None,
env_vars: HashMap::new(),
prerequisites: Vec::new(),
dependents: Vec::new(),
created_at: now,
updated_at: now,
}
}
/// Store this job in Redis
pub async fn store_in_redis(&self, conn: &mut redis::aio::MultiplexedConnection) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, self.id);
let mut hset_args: Vec<(String, String)> = vec![
("jobId".to_string(), self.id.clone()),
("script".to_string(), self.script.clone()),
("script_type".to_string(), format!("{:?}", self.script_type)),
("callerId".to_string(), self.caller_id.clone()),
("contextId".to_string(), self.context_id.clone()),
("status".to_string(), "pending".to_string()),
("timeout".to_string(), self.timeout.as_secs().to_string()),
("retries".to_string(), self.retries.to_string()),
("concurrent".to_string(), self.concurrent.to_string()),
("createdAt".to_string(), self.created_at.to_rfc3339()),
("updatedAt".to_string(), self.updated_at.to_rfc3339()),
];
// Add optional log path
if let Some(log_path) = &self.log_path {
hset_args.push(("log_path".to_string(), log_path.clone()));
}
// Add environment variables as JSON string if any are provided
if !self.env_vars.is_empty() {
let env_vars_json = serde_json::to_string(&self.env_vars)?;
hset_args.push(("env_vars".to_string(), env_vars_json));
}
// Add prerequisites as JSON string if any are provided
if !self.prerequisites.is_empty() {
let prerequisites_json = serde_json::to_string(&self.prerequisites)?;
hset_args.push(("prerequisites".to_string(), prerequisites_json));
}
// Add dependents as JSON string if any are provided
if !self.dependents.is_empty() {
let dependents_json = serde_json::to_string(&self.dependents)?;
hset_args.push(("dependents".to_string(), dependents_json));
}
conn.hset_multiple::<_, _, _, ()>(&job_key, &hset_args).await?;
Ok(())
}
/// Load a job from Redis by ID
pub async fn load_from_redis(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<Self, JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let job_data: HashMap<String, String> = conn.hgetall(&job_key).await?;
if job_data.is_empty() {
return Err(JobError::JobNotFound(job_id.to_string()));
}
// Parse required fields
let id = job_data.get("jobId")
.ok_or_else(|| JobError::MissingField("jobId".to_string()))?
.clone();
let script = job_data.get("script")
.ok_or_else(|| JobError::MissingField("script".to_string()))?
.clone();
let script_type_str = job_data.get("script_type")
.ok_or_else(|| JobError::MissingField("script_type".to_string()))?;
let script_type = match script_type_str.as_str() {
"HeroScript" => ScriptType::HeroScript,
"RhaiSAL" => ScriptType::RhaiSAL,
"RhaiDSL" => ScriptType::RhaiDSL,
_ => return Err(JobError::InvalidJobData(format!("Unknown script type: {}", script_type_str))),
};
let caller_id = job_data.get("callerId")
.ok_or_else(|| JobError::MissingField("callerId".to_string()))?
.clone();
let context_id = job_data.get("contextId")
.ok_or_else(|| JobError::MissingField("contextId".to_string()))?
.clone();
let timeout_secs: u64 = job_data.get("timeout")
.ok_or_else(|| JobError::MissingField("timeout".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid timeout value".to_string()))?;
let retries: u8 = job_data.get("retries")
.unwrap_or(&"0".to_string())
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid retries value".to_string()))?;
let concurrent: bool = job_data.get("concurrent")
.unwrap_or(&"false".to_string())
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid concurrent value".to_string()))?;
let created_at = job_data.get("createdAt")
.ok_or_else(|| JobError::MissingField("createdAt".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid createdAt timestamp".to_string()))?;
let updated_at = job_data.get("updatedAt")
.ok_or_else(|| JobError::MissingField("updatedAt".to_string()))?
.parse()
.map_err(|_| JobError::InvalidJobData("Invalid updatedAt timestamp".to_string()))?;
// Parse optional fields
let log_path = job_data.get("log_path").cloned();
let env_vars = if let Some(env_vars_json) = job_data.get("env_vars") {
serde_json::from_str(env_vars_json)?
} else {
HashMap::new()
};
let prerequisites = if let Some(prerequisites_json) = job_data.get("prerequisites") {
serde_json::from_str(prerequisites_json)?
} else {
Vec::new()
};
let dependents = if let Some(dependents_json) = job_data.get("dependents") {
serde_json::from_str(dependents_json)?
} else {
Vec::new()
};
Ok(Self {
id,
caller_id,
context_id,
script,
script_type,
timeout: Duration::from_secs(timeout_secs),
retries,
concurrent,
log_path,
env_vars,
prerequisites,
dependents,
created_at,
updated_at,
})
}
/// Update job status in Redis
pub async fn update_status(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
status: JobStatus,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "status", status.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Get job status from Redis
pub async fn get_status(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<JobStatus, JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let status_str: String = conn.hget(&job_key, "status").await?;
JobStatus::from_str(&status_str)
.ok_or_else(|| JobError::InvalidJobData(format!("Unknown status: {}", status_str)))
}
/// Set job result in Redis
pub async fn set_result(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
result: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "output", result).await?;
conn.hset::<_, _, _, ()>(&job_key, "status", JobStatus::Finished.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Set job error in Redis
pub async fn set_error(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
error: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
let now = Utc::now();
conn.hset::<_, _, _, ()>(&job_key, "error", error).await?;
conn.hset::<_, _, _, ()>(&job_key, "status", JobStatus::Error.as_str()).await?;
conn.hset::<_, _, _, ()>(&job_key, "updatedAt", now.to_rfc3339()).await?;
Ok(())
}
/// Delete job from Redis
pub async fn delete_from_redis(
conn: &mut redis::aio::MultiplexedConnection,
job_id: &str,
) -> Result<(), JobError> {
let job_key = format!("{}{}", NAMESPACE_PREFIX, job_id);
conn.del::<_, ()>(&job_key).await?;
Ok(())
}
/// List all job IDs from Redis
pub async fn list_all_job_ids(
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<Vec<String>, JobError> {
// Search specifically for job keys with the exact job pattern
let job_keys: Vec<String> = conn.keys(format!("{}*", NAMESPACE_PREFIX)).await?;
let job_ids: Vec<String> = job_keys
.iter()
.filter_map(|key| {
// Only include keys that exactly match the job key pattern hero:job:*
if key.starts_with(NAMESPACE_PREFIX) {
let potential_id = key.strip_prefix(NAMESPACE_PREFIX)?;
// Validate that this looks like a UUID (job IDs are UUIDs)
if potential_id.len() == 36 && potential_id.chars().filter(|&c| c == '-').count() == 4 {
Some(potential_id.to_string())
} else {
None
}
} else {
None
}
})
.collect();
Ok(job_ids)
}
}

BIN
core/worker/.DS_Store vendored Normal file

Binary file not shown.

2
core/worker/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/target
worker_rhai_temp_db

1423
core/worker/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

29
core/worker/Cargo.toml Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "rhailib_worker"
version = "0.1.0"
edition = "2021"
[lib]
name = "rhailib_worker" # Can be different from package name, or same
path = "src/lib.rs"
[[bin]]
name = "worker"
path = "cmd/worker.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
redis = { version = "0.25.0", features = ["tokio-comp"] }
rhai = { version = "1.18.0", default-features = false, features = ["sync", "decimal", "std"] } # Added "decimal" for broader script support
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
log = "0.4"
env_logger = "0.10"
clap = { version = "4.4", features = ["derive"] }
uuid = { version = "1.6", features = ["v4", "serde"] } # Though task_id is string, uuid might be useful
chrono = { version = "0.4", features = ["serde"] }
rhai_dispatcher = { path = "../../../rhailib/src/dispatcher" }
rhailib_engine = { path = "../engine" }
heromodels = { path = "../../../db/heromodels", features = ["rhai"] }

75
core/worker/README.md Normal file
View File

@@ -0,0 +1,75 @@
# Rhai Worker
The `rhai_worker` crate implements a standalone worker service that listens for Rhai script execution tasks from a Redis queue, executes them, and posts results back to Redis. It is designed to be spawned as a separate OS process by an orchestrator like the `launcher` crate.
## Features
- **Redis Queue Consumption**: Listens to a specific Redis list (acting as a task queue) for incoming task IDs. The queue is determined by the `--circle-public-key` argument.
- **Rhai Script Execution**: Executes Rhai scripts retrieved from Redis based on task IDs.
- **Task State Management**: Updates task status (`processing`, `completed`, `error`) and stores results in Redis hashes.
- **Script Scope Injection**: Automatically injects two important constants into the Rhai script's scope:
- `CONTEXT_ID`: The public key of the worker's own circle.
- `CALLER_ID`: The public key of the entity that requested the script execution.
- **Asynchronous Operations**: Built with `tokio` for non-blocking Redis communication.
- **Graceful Error Handling**: Captures errors during script execution and stores them for the client.
## Core Components
- **`worker_lib` (Library Crate)**:
- **`Args`**: A struct (using `clap`) for parsing command-line arguments: `--redis-url` and `--circle-public-key`.
- **`run_worker_loop(engine: Engine, args: Args)`**: The main asynchronous function that:
- Connects to Redis.
- Continuously polls the designated Redis queue (`rhai_tasks:<circle_public_key>`) using `BLPOP`.
- Upon receiving a `task_id`, it fetches the task details from a Redis hash.
- It injects `CALLER_ID` and `CONTEXT_ID` into the script's scope.
- It executes the script and updates the task status in Redis with the output or error.
- **`worker` (Binary Crate - `cmd/worker.rs`)**:
- The main executable entry point. It parses command-line arguments, initializes a Rhai engine, and invokes `run_worker_loop`.
## How It Works
1. The worker executable is launched by an external process (e.g., `launcher`), which passes the required command-line arguments.
```bash
# This is typically done programmatically by a parent process.
/path/to/worker --redis-url redis://127.0.0.1/ --circle-public-key 02...abc
```
2. The `run_worker_loop` connects to Redis and starts listening to its designated task queue (e.g., `rhai_tasks:02...abc`).
3. A `rhai_dispatcher` submits a task by pushing a `task_id` to this queue and storing the script and other details in a Redis hash.
4. The worker's `BLPOP` command picks up the `task_id`.
5. The worker retrieves the script from the corresponding `rhai_task_details:<task_id>` hash.
6. It updates the task's status to "processing".
7. The Rhai script is executed within a scope that contains both `CONTEXT_ID` and `CALLER_ID`.
8. After execution, the status is updated to "completed" (with output) or "error" (with an error message).
9. The worker then goes back to listening for the next task.
## Prerequisites
- A running Redis instance accessible by the worker.
- An orchestrator process (like `launcher`) to spawn the worker.
- A `rhai_dispatcher` (or another system) to populate the Redis queues.
## Building and Running
The worker is intended to be built as a dependency and run by another program.
1. **Build the worker:**
```bash
# From the root of the rhailib project
cargo build --package worker
```
The binary will be located at `target/debug/worker`.
2. **Running the worker:**
The worker is not typically run manually. The `launcher` crate is responsible for spawning it with the correct arguments. If you need to run it manually for testing, you must provide the required arguments:
```bash
./target/debug/worker --redis-url redis://127.0.0.1/ --circle-public-key <a_valid_hex_public_key>
```
## Dependencies
Key dependencies include:
- `redis`: For asynchronous Redis communication.
- `rhai`: The Rhai script engine.
- `clap`: For command-line argument parsing.
- `tokio`: For the asynchronous runtime.
- `log`, `env_logger`: For logging.

113
core/worker/cmd/README.md Normal file
View File

@@ -0,0 +1,113 @@
# Rhai Worker Binary
A command-line worker for executing Rhai scripts from Redis task queues.
## Binary: `worker`
### Installation
Build the binary:
```bash
cargo build --bin worker --release
```
### Usage
```bash
# Basic usage - requires circle public key
worker --circle-public-key <CIRCLE_PUBLIC_KEY>
# Custom Redis URL
worker -c <CIRCLE_PUBLIC_KEY> --redis-url redis://localhost:6379/1
# Custom worker ID and database path
worker -c <CIRCLE_PUBLIC_KEY> --worker-id my_worker --db-path /tmp/worker_db
# Preserve tasks for debugging/benchmarking
worker -c <CIRCLE_PUBLIC_KEY> --preserve-tasks
# Remove timestamps from logs
worker -c <CIRCLE_PUBLIC_KEY> --no-timestamp
# Increase verbosity
worker -c <CIRCLE_PUBLIC_KEY> -v # Debug logging
worker -c <CIRCLE_PUBLIC_KEY> -vv # Full debug
worker -c <CIRCLE_PUBLIC_KEY> -vvv # Trace logging
```
### Command-Line Options
| Option | Short | Default | Description |
|--------|-------|---------|-------------|
| `--circle-public-key` | `-c` | **Required** | Circle public key to listen for tasks |
| `--redis-url` | `-r` | `redis://localhost:6379` | Redis connection URL |
| `--worker-id` | `-w` | `worker_1` | Unique worker identifier |
| `--preserve-tasks` | | `false` | Preserve task details after completion |
| `--db-path` | | `worker_rhai_temp_db` | Database path for Rhai engine |
| `--no-timestamp` | | `false` | Remove timestamps from log output |
| `--verbose` | `-v` | | Increase verbosity (stackable) |
### Features
- **Task Queue Processing**: Listens to Redis queues for Rhai script execution tasks
- **Performance Optimized**: Configured for maximum Rhai engine performance
- **Graceful Shutdown**: Supports shutdown signals for clean termination
- **Flexible Logging**: Configurable verbosity and timestamp control
- **Database Integration**: Uses heromodels for data persistence
- **Task Cleanup**: Optional task preservation for debugging/benchmarking
### How It Works
1. **Queue Listening**: Worker listens on Redis queue `rhailib:{circle_public_key}`
2. **Task Processing**: Receives task IDs, fetches task details from Redis
3. **Script Execution**: Executes Rhai scripts with configured engine
4. **Result Handling**: Updates task status and sends results to reply queues
5. **Cleanup**: Optionally cleans up task details after completion
### Configuration Examples
#### Development Worker
```bash
# Simple development worker
worker -c dev_circle_123
# Development with verbose logging (no timestamps)
worker -c dev_circle_123 -v --no-timestamp
```
#### Production Worker
```bash
# Production worker with custom configuration
worker \
--circle-public-key prod_circle_456 \
--redis-url redis://redis-server:6379/0 \
--worker-id prod_worker_1 \
--db-path /var/lib/worker/db \
--preserve-tasks
```
#### Benchmarking Worker
```bash
# Worker optimized for benchmarking
worker \
--circle-public-key bench_circle_789 \
--preserve-tasks \
--no-timestamp \
-vv
```
### Error Handling
The worker provides clear error messages for:
- Missing or invalid circle public key
- Redis connection failures
- Script execution errors
- Database access issues
### Dependencies
- `rhailib_engine`: Rhai engine with heromodels integration
- `redis`: Redis client for task queue management
- `rhai`: Script execution engine
- `clap`: Command-line argument parsing
- `env_logger`: Logging infrastructure

95
core/worker/cmd/worker.rs Normal file
View File

@@ -0,0 +1,95 @@
use clap::Parser;
use rhailib_engine::create_heromodels_engine;
use rhailib_worker::spawn_rhai_worker;
use tokio::sync::mpsc;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Worker ID for identification
#[arg(short, long)]
worker_id: String,
/// Redis URL
#[arg(short, long, default_value = "redis://localhost:6379")]
redis_url: String,
/// Preserve task details after completion (for benchmarking)
#[arg(long, default_value = "false")]
preserve_tasks: bool,
/// Root directory for engine database
#[arg(long, default_value = "worker_rhai_temp_db")]
db_path: String,
/// Disable timestamps in log output
#[arg(long, help = "Remove timestamps from log output")]
no_timestamp: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let args = Args::parse();
// Configure env_logger with or without timestamps
if args.no_timestamp {
env_logger::Builder::from_default_env()
.format_timestamp(None)
.init();
} else {
env_logger::init();
}
log::info!("Rhai Worker (binary) starting with performance-optimized engine.");
log::info!(
"Worker ID: {}, Redis: {}",
args.worker_id,
args.redis_url
);
let mut engine = create_heromodels_engine();
// Performance optimizations for benchmarking
engine.set_max_operations(0); // Unlimited operations for performance testing
engine.set_max_expr_depths(0, 0); // Unlimited expression depth
engine.set_max_string_size(0); // Unlimited string size
engine.set_max_array_size(0); // Unlimited array size
engine.set_max_map_size(0); // Unlimited map size
// Enable full optimization for maximum performance
engine.set_optimization_level(rhai::OptimizationLevel::Full);
log::info!("Engine configured for maximum performance");
// Create shutdown channel (for graceful shutdown, though not used in benchmarks)
let (_shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1);
// Spawn the worker
let worker_handle = spawn_rhai_worker(
args.worker_id,
args.db_path,
engine,
args.redis_url,
shutdown_rx,
args.preserve_tasks,
);
// Wait for the worker to complete
match worker_handle.await {
Ok(result) => match result {
Ok(_) => {
log::info!("Worker completed successfully");
Ok(())
}
Err(e) => {
log::error!("Worker failed: {}", e);
Err(e)
}
},
Err(e) => {
log::error!("Worker task panicked: {}", e);
Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
}
}

View File

@@ -0,0 +1,53 @@
# Architecture of the `rhailib_worker` Crate
The `rhailib_worker` crate implements a distributed task execution system for Rhai scripts, providing scalable, reliable script processing through Redis-based task queues. Workers are decoupled from contexts, allowing a single worker to process tasks for multiple contexts (circles).
## Core Architecture
```mermaid
graph TD
A[Worker Process] --> B[Task Queue Processing]
A --> C[Script Execution Engine]
A --> D[Result Management]
B --> B1[Redis Queue Monitoring]
B --> B2[Task Deserialization]
B --> B3[Priority Handling]
C --> C1[Rhai Engine Integration]
C --> C2[Context Management]
C --> C3[Error Handling]
D --> D1[Result Serialization]
D --> D2[Reply Queue Management]
D --> D3[Status Updates]
```
## Key Components
### Task Processing Pipeline
- **Queue Monitoring**: Continuous Redis queue polling for new tasks
- **Task Execution**: Secure Rhai script execution with proper context
- **Result Handling**: Comprehensive result and error management
### Engine Integration
- **Rhailib Engine**: Full integration with rhailib_engine for DSL access
- **Context Injection**: Proper authentication and database context setup
- **Security**: Isolated execution environment with access controls
### Scalability Features
- **Horizontal Scaling**: Multiple worker instances for load distribution
- **Queue-based Architecture**: Reliable task distribution via Redis
- **Fault Tolerance**: Robust error handling and recovery mechanisms
## Dependencies
- **Redis Integration**: Task queue management and communication
- **Rhai Engine**: Script execution with full DSL capabilities
- **Client Integration**: Shared data structures with rhai_dispatcher
- **Heromodels**: Database and business logic integration
- **Async Runtime**: Tokio for high-performance concurrent processing
## Deployment Patterns
Workers can be deployed as standalone processes, containerized services, or embedded components, providing flexibility for various deployment scenarios from development to production.

259
core/worker/src/lib.rs Normal file
View File

@@ -0,0 +1,259 @@
use chrono::Utc;
use log::{debug, error, info};
use redis::AsyncCommands;
use rhai::{Dynamic, Engine};
use rhai_dispatcher::RhaiTaskDetails; // Import for constructing the reply message
use serde_json;
use std::collections::HashMap;
use tokio::sync::mpsc; // For shutdown signal
use tokio::task::JoinHandle; // For serializing the reply message
const NAMESPACE_PREFIX: &str = "rhailib:";
const BLPOP_TIMEOUT_SECONDS: usize = 5;
// This function updates specific fields in the Redis hash.
// It doesn't need to know the full RhaiTaskDetails struct, only the field names.
async fn update_task_status_in_redis(
conn: &mut redis::aio::MultiplexedConnection,
task_id: &str,
status: &str,
output: Option<String>,
error_msg: Option<String>,
) -> redis::RedisResult<()> {
let task_key = format!("{}{}", NAMESPACE_PREFIX, task_id);
let mut updates: Vec<(&str, String)> = vec![
("status", status.to_string()),
("updatedAt", Utc::now().timestamp().to_string()),
];
if let Some(out) = output {
updates.push(("output", out));
}
if let Some(err) = error_msg {
updates.push(("error", err));
}
debug!(
"Updating task {} in Redis with status: {}, updates: {:?}",
task_id, status, updates
);
conn.hset_multiple::<_, _, _, ()>(&task_key, &updates)
.await?;
Ok(())
}
pub fn spawn_rhai_worker(
worker_id: String,
db_path: String,
mut engine: Engine,
redis_url: String,
mut shutdown_rx: mpsc::Receiver<()>, // Add shutdown receiver
preserve_tasks: bool, // Flag to control task cleanup
) -> JoinHandle<Result<(), Box<dyn std::error::Error + Send + Sync>>> {
tokio::spawn(async move {
let queue_key = format!("{}{}", NAMESPACE_PREFIX, worker_id);
info!(
"Rhai Worker for Worker ID '{}' starting. Connecting to Redis at {}. Listening on queue: {}. Waiting for tasks or shutdown signal.",
worker_id, redis_url, queue_key
);
let redis_client = match redis::Client::open(redis_url.as_str()) {
Ok(client) => client,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to open Redis client: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
let mut redis_conn = match redis_client.get_multiplexed_async_connection().await {
Ok(conn) => conn,
Err(e) => {
error!(
"Worker for Worker ID '{}': Failed to get Redis connection: {}",
worker_id, e
);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
info!(
"Worker for Worker ID '{}' successfully connected to Redis.",
worker_id
);
loop {
let blpop_keys = vec![queue_key.clone()];
tokio::select! {
// Listen for shutdown signal
_ = shutdown_rx.recv() => {
info!("Worker for Worker ID '{}': Shutdown signal received. Terminating loop.", worker_id.clone());
break;
}
// Listen for tasks from Redis
blpop_result = redis_conn.blpop(&blpop_keys, BLPOP_TIMEOUT_SECONDS as f64) => {
debug!("Worker for Worker ID '{}': Attempting BLPOP on queue: {}", worker_id.clone(), queue_key);
let response: Option<(String, String)> = match blpop_result {
Ok(resp) => resp,
Err(e) => {
error!("Worker '{}': Redis BLPOP error on queue {}: {}. Worker for this circle might stop.", worker_id, queue_key, e);
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
};
if let Some((_queue_name_recv, task_id)) = response {
info!("Worker '{}' received task_id: {} from queue: {}", worker_id, task_id, _queue_name_recv);
debug!("Worker '{}', Task {}: Processing started.", worker_id, task_id);
let task_details_key = format!("{}{}", NAMESPACE_PREFIX, task_id);
debug!("Worker '{}', Task {}: Attempting HGETALL from key: {}", worker_id, task_id, task_details_key);
let task_details_map_result: Result<HashMap<String, String>, _> =
redis_conn.hgetall(&task_details_key).await;
match task_details_map_result {
Ok(details_map) => {
debug!("Worker '{}', Task {}: HGETALL successful. Details: {:?}", worker_id, task_id, details_map);
let script_content_opt = details_map.get("script").cloned();
let created_at_str_opt = details_map.get("createdAt").cloned();
let caller_id = details_map.get("callerId").cloned().expect("callerId field missing from Redis hash");
let context_id = details_map.get("contextId").cloned().expect("contextId field missing from Redis hash");
if context_id.is_empty() {
error!("Worker '{}', Task {}: contextId field missing from Redis hash", worker_id, task_id);
return Err("contextId field missing from Redis hash".into());
}
if caller_id.is_empty() {
error!("Worker '{}', Task {}: callerId field missing from Redis hash", worker_id, task_id);
return Err("callerId field missing from Redis hash".into());
}
if let Some(script_content) = script_content_opt {
info!("Worker '{}' processing task_id: {}. Script: {:.50}...", context_id, task_id, script_content);
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to 'processing'.", context_id, task_id);
if let Err(e) = update_task_status_in_redis(&mut redis_conn, &task_id, "processing", None, None).await {
error!("Worker for Context ID '{}', Task {}: Failed to update status to 'processing': {}", context_id, task_id, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Status updated to 'processing'.", context_id, task_id);
}
let mut db_config = rhai::Map::new();
db_config.insert("DB_PATH".into(), db_path.clone().into());
db_config.insert("CALLER_ID".into(), caller_id.clone().into());
db_config.insert("CONTEXT_ID".into(), context_id.clone().into());
engine.set_default_tag(Dynamic::from(db_config)); // Or pass via CallFnOptions
debug!("Worker for Context ID '{}', Task {}: Evaluating script with Rhai engine.", context_id, task_id);
let mut final_status = "error".to_string(); // Default to error
let mut final_output: Option<String> = None;
let mut final_error_msg: Option<String> = None;
match engine.eval::<rhai::Dynamic>(&script_content) {
Ok(result) => {
let output_str = if result.is::<String>() {
// If the result is a string, we can unwrap it directly.
// This moves `result`, which is fine because it's the last time we use it in this branch.
result.into_string().unwrap()
} else {
result.to_string()
};
info!("Worker for Context ID '{}' task {} completed. Output: {}", context_id, task_id, output_str);
final_status = "completed".to_string();
final_output = Some(output_str);
}
Err(e) => {
let error_str = format!("{:?}", *e);
error!("Worker for Context ID '{}' task {} script evaluation failed. Error: {}", context_id, task_id, error_str);
final_error_msg = Some(error_str);
// final_status remains "error"
}
}
debug!("Worker for Context ID '{}', Task {}: Attempting to update status to '{}'.", context_id, task_id, final_status);
if let Err(e) = update_task_status_in_redis(
&mut redis_conn,
&task_id,
&final_status,
final_output.clone(), // Clone for task hash update
final_error_msg.clone(), // Clone for task hash update
).await {
error!("Worker for Context ID '{}', Task {}: Failed to update final status to '{}': {}", context_id, task_id, final_status, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Final status updated to '{}'.", context_id, task_id, final_status);
}
// Send to reply queue if specified
let created_at = created_at_str_opt
.and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(Utc::now); // Fallback, though createdAt should exist
let reply_details = RhaiTaskDetails {
task_id: task_id.to_string(), // Add the task_id
script: script_content.clone(), // Include script for context in reply
status: final_status, // The final status
output: final_output, // The final output
error: final_error_msg, // The final error
created_at, // Original creation time
updated_at: Utc::now(), // Time of this final update/reply
caller_id: caller_id.clone(),
context_id: context_id.clone(),
worker_id: worker_id.clone(),
};
let reply_queue_key = format!("{}:reply:{}", NAMESPACE_PREFIX, task_id);
match serde_json::to_string(&reply_details) {
Ok(reply_json) => {
let lpush_result: redis::RedisResult<i64> = redis_conn.lpush(&reply_queue_key, &reply_json).await;
match lpush_result {
Ok(_) => debug!("Worker for Context ID '{}', Task {}: Successfully sent result to reply queue {}", context_id, task_id, reply_queue_key),
Err(e_lpush) => error!("Worker for Context ID '{}', Task {}: Failed to LPUSH result to reply queue {}: {}", context_id, task_id, reply_queue_key, e_lpush),
}
}
Err(e_json) => {
error!("Worker for Context ID '{}', Task {}: Failed to serialize reply details for queue {}: {}", context_id, task_id, reply_queue_key, e_json);
}
}
// Clean up task details based on preserve_tasks flag
if !preserve_tasks {
// The worker is responsible for cleaning up the task details hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete task details key '{}': {}", context_id, task_id, task_details_key, e);
} else {
debug!("Worker for Context ID '{}', Task {}: Cleaned up task details key '{}'.", context_id, task_id, task_details_key);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving task details (preserve_tasks=true)", context_id, task_id);
}
} else { // Script content not found in hash
error!(
"Worker for Context ID '{}', Task {}: Script content not found in Redis hash. Details map: {:?}",
context_id, task_id, details_map
);
// Clean up invalid task details based on preserve_tasks flag
if !preserve_tasks {
// Even if the script is not found, the worker should clean up the invalid task hash.
if let Err(e) = redis_conn.del::<_, ()>(&task_details_key).await {
error!("Worker for Context ID '{}', Task {}: Failed to delete invalid task details key '{}': {}", context_id, task_id, task_details_key, e);
}
} else {
debug!("Worker for Context ID '{}', Task {}: Preserving invalid task details (preserve_tasks=true)", context_id, task_id);
}
}
}
Err(e) => {
error!(
"Worker '{}', Task {}: Failed to fetch details (HGETALL) from Redis for key {}. Error: {:?}",
worker_id, task_id, task_details_key, e
);
}
}
} else {
debug!("Worker '{}': BLPOP timed out on queue {}. No new tasks. Checking for shutdown signal again.", &worker_id, &queue_key);
}
} // End of blpop_result match
} // End of tokio::select!
} // End of loop
info!("Worker '{}' has shut down.", worker_id);
Ok(())
})
}