initial commit

This commit is contained in:
Timur Gordon
2025-08-26 14:49:21 +02:00
commit 767c66fb6a
66 changed files with 22035 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
target

2687
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

73
Cargo.toml Normal file
View File

@@ -0,0 +1,73 @@
[package]
name = "hero-supervisor"
version = "0.1.0"
edition = "2021"
[dependencies]
# Async runtime
tokio = { version = "1.0", features = ["full"] }
# Async trait support
async-trait = "0.1"
# Redis client
redis = { version = "0.25", features = ["aio", "tokio-comp"] }
# Job module dependencies (now integrated)
uuid = { version = "1.0", features = ["v4"] }
# Logging
log = "0.4"
thiserror = "1.0"
chrono = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
env_logger = "0.10"
sal-service-manager = { path = "../sal/service_manager" }
# CLI argument parsing
clap = { version = "4.0", features = ["derive"] }
toml = "0.8"
# OpenRPC dependencies (now always included)
jsonrpsee = { version = "0.24", features = ["server", "macros"] }
anyhow = "1.0"
# CORS support for OpenRPC server
tower-http = { version = "0.5", features = ["cors"] }
tower = "0.4"
[dev-dependencies]
tokio-test = "0.4"
hero-supervisor-openrpc-client = { path = "clients/openrpc" }
escargot = "0.5"
[features]
default = ["cli"]
cli = []
[[bin]]
name = "supervisor"
path = "cmd/supervisor.rs"
# Examples
[[example]]
name = "openrpc_comprehensive"
path = "examples/basic_openrpc_client.rs"
[[example]]
name = "test_queue_and_wait"
path = "examples/test_queue_and_wait.rs"
[[example]]
name = "test_openrpc_methods"
path = "examples/test_openrpc_methods.rs"
[[example]]
name = "mock_runner"
path = "examples/mock_runner.rs"
[[example]]
name = "supervisor"
path = "examples/supervisor/run_supervisor.rs"

195
README.md Normal file
View File

@@ -0,0 +1,195 @@
# Hero Supervisor
A Rust-based actor management system for the Hero ecosystem that provides unified process management, job queuing, and optional OpenRPC server integration.
## Architecture Overview
The Hero Supervisor uses a clean, feature-gated architecture that separates library functionality from CLI/server features to avoid dependency cycles and maintain modularity.
```
hero-supervisor/
├── src/ # Core library (no CLI dependencies)
│ ├── lib.rs # Main library exports and documentation
│ ├── supervisor.rs # Core supervisor logic and actor management
│ ├── runner.rs # Runner implementation for actor process management
│ ├── job.rs # Job data structures, builder pattern, and Redis key management
│ └── openrpc.rs # OpenRPC server (feature-gated)
├── cmd/ # CLI binaries
## Features
The crate uses Rust's feature system to provide conditional compilation:
- **`default`**: Includes all functionality - supervisor, OpenRPC server, and CLI binary
- **`cli`**: Enables the supervisor binary (included in default)
## Architecture
The Hero Supervisor uses a clean, simplified architecture with centralized resource management:
### Core Components
#### `SupervisorBuilder` → `Supervisor` → `SupervisorApp`
- **`SupervisorBuilder`**: Configures Redis URL, namespace, secrets, runners, and process manager
- **`Supervisor`**: Core engine that owns Redis client and process manager, manages runners centrally
- **`SupervisorApp`**: Main application that wraps supervisor and provides `start()` method for complete lifecycle management
### Key Design Decisions
- **Centralized Resources**: Supervisor exclusively owns Redis client and process manager (no per-runner instances)
- **Builder Pattern**: Flexible configuration through `SupervisorBuilder` with method chaining
- **Direct OpenRPC Integration**: RPC trait implemented directly on `Arc<Mutex<Supervisor>>` (no wrapper layers)
- **Simplified App**: `SupervisorApp::start()` handles everything - runners, OpenRPC server, graceful shutdown
## File Documentation
### Core Library Files
#### `src/lib.rs`
Main library entry point that exports `Supervisor`, `SupervisorBuilder`, `SupervisorApp`, and related types.
#### `src/supervisor.rs`
Core supervisor implementation with builder pattern. Manages runners, owns shared Redis client and process manager. Provides job queuing, runner lifecycle management, and status monitoring.
#### `src/app.rs`
Main application wrapper that provides `start()` method for complete lifecycle management. Handles OpenRPC server startup, graceful shutdown, and keeps the application running.
#### `src/runner.rs`
Simplified runner configuration and management. Contains `Runner` struct with configuration data only - no resource ownership. Integrates with supervisor's shared resources.
#### `src/job.rs`
Job data structures, builder pattern, and Redis key management. Defines `Job` struct with metadata, script content, and status tracking.
#### `src/openrpc.rs`
OpenRPC server implementation that exposes all supervisor functionality over JSON-RPC. Implements RPC trait directly on the supervisor for clean integration.
### Binary Files
#### `cmd/supervisor.rs`
Main supervisor binary that creates a supervisor using the builder pattern and starts the complete application with `app.start()`. The OpenRPC server is always enabled and starts automatically.
## Usage
### Running the Supervisor Binary
```bash
# Run with default (error) logging
cargo run --bin supervisor
# Run with info logging
RUST_LOG=info cargo run --bin supervisor
# Run with debug logging
RUST_LOG=debug cargo run --bin supervisor
# Run with trace logging (very verbose)
RUST_LOG=trace cargo run --bin supervisor
# Run with specific module logging
RUST_LOG=hero_supervisor=debug cargo run --bin supervisor
```
### Library Usage
```rust
use hero_supervisor::{SupervisorBuilder, SupervisorApp};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Build supervisor with configuration
let supervisor = SupervisorBuilder::new()
.redis_url("redis://localhost:6379")
.namespace("hero")
.build()
.await?;
// Create and start the complete application
let mut app = SupervisorApp::new(supervisor);
app.start().await?;
Ok(())
}
```
### As a Dependency
```toml
[dependencies]
hero-supervisor = "0.1.0"
```
## OpenRPC Server
The supervisor automatically starts an OpenRPC server on `127.0.0.1:3030` that exposes all supervisor functionality via JSON-RPC.
### Available Methods
- `add_runner` - Add a new actor/runner
- `remove_runner` - Remove an actor/runner
- `list_runners` - List all runner IDs
- `start_runner` - Start a specific runner
- `stop_runner` - Stop a specific runner
- `get_runner_status` - Get status of a specific runner
- `get_runner_logs` - Get logs for a specific runner
- `queue_job_to_runner` - Queue a job to a specific runner
- `get_all_runner_status` - Get status of all runners
- `start_all` - Start all runners
- `stop_all` - Stop all runners
- `get_all_status` - Get status summary for all runners
### Example JSON-RPC Call
```bash
curl -X POST -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"list_runners","id":1}' \
http://127.0.0.1:3030
```
## Development
### Building
```bash
# Library only
cargo build --no-default-features
# With CLI
cargo build --features cli
# With OpenRPC server
cargo build --features openrpc
```
### Testing
```bash
cargo test --all-features
```
### Running
```bash
# Start supervisor with OpenRPC server
RUST_LOG=info cargo run --features openrpc
```
## Dependencies
### Core Dependencies
- `tokio` - Async runtime
- `redis` - Redis client for job queuing
- `serde` - Serialization
- `log` - Logging
- `sal-service-manager` - Process management
### Feature-Gated Dependencies
- `jsonrpsee` - JSON-RPC server (openrpc feature)
- `anyhow` - Error handling (openrpc feature)
## Architecture Benefits
1. **No Cyclic Dependencies**: Library and OpenRPC server are in the same crate, eliminating dependency cycles
2. **Feature-Gated**: CLI and server functionality only compiled when needed
3. **Clean Separation**: Library can be used independently without CLI dependencies
4. **Conditional Compilation**: Rust's feature system ensures minimal dependencies for library users
5. **Single Binary**: One supervisor binary with optional OpenRPC server integration
## License
[Add your license information here]

0
admin-ui/Cargo.toml Normal file
View File

0
admin-ui/src/app.rs Normal file
View File

0
admin-ui/src/jobs.rs Normal file
View File

0
admin-ui/src/runners.rs Normal file
View File

0
admin-ui/styles.css Normal file
View File

2
clients/admin-ui/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
dist
target

3347
clients/admin-ui/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
[package]
name = "supervisor-admin-ui"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
yew = { version = "0.21", features = ["csr"] }
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"
web-sys = { version = "0.3", features = [
"console",
"Document",
"Element",
"HtmlElement",
"Window",
] }
js-sys = "0.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
gloo = { version = "0.11", features = ["console", "timers", "futures"] }
log = "0.4"
wasm-logger = "0.2"
uuid = { version = "1.0", features = ["v4", "js"] }
# Use our new WASM OpenRPC client
hero-supervisor-openrpc-client = { path = "../clients/openrpc" }

View File

@@ -0,0 +1,16 @@
[build]
target = "index.html"
dist = "dist"
[watch]
watch = ["src", "index.html", "styles.css"]
[serve]
address = "127.0.0.1"
port = 8080
open = false
[[hooks]]
stage = "pre_build"
command = "echo"
command_arguments = ["Building Supervisor Admin UI..."]

View File

@@ -0,0 +1,13 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Hero Supervisor</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css">
<link data-trunk rel="css" href="styles.css">
</head>
<body>
<div id="app"></div>
</body>
</html>

630
clients/admin-ui/src/app.rs Normal file
View File

@@ -0,0 +1,630 @@
use yew::prelude::*;
use wasm_bindgen_futures::spawn_local;
use gloo::console;
use hero_supervisor_openrpc_client::wasm::{WasmSupervisorClient, WasmJob};
use gloo::timers::callback::Interval;
use crate::sidebar::{Sidebar, SupervisorInfo};
use crate::runners::{Runners, RegisterForm};
use crate::jobs::Jobs;
/// Generate a unique job ID client-side using UUID v4
fn generate_job_id() -> String {
uuid::Uuid::new_v4().to_string()
}
#[derive(Clone, Default)]
pub struct JobForm {
pub payload: String,
pub runner_name: String,
pub executor: String,
pub secret: String,
}
#[derive(Clone, Debug, PartialEq)]
pub enum PingState {
Idle,
Waiting,
Success(String), // Result message
Error(String), // Error message
}
impl Default for PingState {
fn default() -> Self {
PingState::Idle
}
}
#[derive(Clone)]
pub struct AppState {
pub server_url: String,
pub runners: Vec<(String, String)>, // (name, status)
pub jobs: Vec<WasmJob>,
pub ongoing_jobs: Vec<String>, // Job IDs being polled
pub loading: bool,
pub register_form: RegisterForm,
pub job_form: JobForm,
pub supervisor_info: Option<SupervisorInfo>,
pub admin_secret: String,
pub ping_states: std::collections::HashMap<String, PingState>, // runner_name -> ping_state
}
#[function_component(App)]
pub fn app() -> Html {
let state = use_state(|| AppState {
server_url: "http://localhost:3030".to_string(),
runners: vec![],
jobs: vec![],
ongoing_jobs: vec![],
loading: false,
register_form: RegisterForm {
name: String::new(),
secret: String::new(),
},
job_form: JobForm {
payload: String::new(),
runner_name: String::new(),
executor: String::new(),
secret: String::new(),
},
supervisor_info: None,
admin_secret: String::new(),
ping_states: std::collections::HashMap::new(),
});
// Set up polling for ongoing jobs every 2 seconds
{
let state = state.clone();
use_effect_with((), move |_| {
let state = state.clone();
let poll_jobs = {
let state = state.clone();
Callback::from(move |_| {
let current_state = (*state).clone();
if !current_state.ongoing_jobs.is_empty() {
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let state_clone = state.clone();
spawn_local(async move {
console::log!("Polling ongoing jobs:", format!("{:?}", current_state.ongoing_jobs));
let mut updated_state = (*state_clone).clone();
let mut jobs_to_remove = Vec::new();
// Poll each ongoing job
for job_id in &current_state.ongoing_jobs {
match client.get_job(job_id).await {
Ok(updated_job) => {
// Find and update the job in the jobs list
if let Some(job_index) = updated_state.jobs.iter().position(|j| j.id() == *job_id) {
updated_state.jobs[job_index] = updated_job.clone();
console::log!("Updated job status for:", job_id);
}
}
Err(e) => {
console::error!("Failed to poll job:", job_id, format!("{:?}", e));
// Remove failed jobs from ongoing list
jobs_to_remove.push(job_id.clone());
}
}
}
// Remove completed/failed jobs from ongoing list
for job_id in jobs_to_remove {
updated_state.ongoing_jobs.retain(|id| id != &job_id);
}
state_clone.set(updated_state);
});
}
})
};
let interval = Interval::new(2000, move || {
poll_jobs.emit(());
});
move || drop(interval)
});
}
// Load initial data when component mounts
let load_initial_data = {
let state = state.clone();
let client_url = state.server_url.clone();
Callback::from(move |_: ()| {
let state = state.clone();
let client = WasmSupervisorClient::new(client_url.clone());
spawn_local(async move {
console::log!("Loading initial data...");
let mut current_state = (*state).clone();
current_state.loading = true;
state.set(current_state.clone());
// Load runners and jobs in parallel
let runners_result = client.list_runners().await;
let jobs_result = client.list_jobs().await;
match (runners_result, jobs_result) {
(Ok(runner_names), Ok(job_ids)) => {
console::log!("Successfully loaded runners:", format!("{:?}", runner_names));
console::log!("Successfully loaded jobs:", format!("{:?}", job_ids));
let runners_with_status: Vec<(String, String)> = runner_names
.into_iter()
.map(|name| (name, "Running".to_string()))
.collect();
// Fetch full job details for each job ID and identify unfinished jobs
let mut jobs = Vec::new();
let mut ongoing_jobs = Vec::new();
for job_id in job_ids {
match client.get_job(&job_id).await {
Ok(job) => {
// Check if job is unfinished (you may need to adjust this logic based on your job status field)
// For now, we'll assume all jobs are ongoing until we have proper status checking
ongoing_jobs.push(job_id.clone());
jobs.push(job);
}
Err(e) => {
console::error!("Failed to fetch job details for:", &job_id, format!("{:?}", e));
// Create placeholder job if fetch fails
jobs.push(WasmJob::new(job_id.clone(), "Loading...".to_string(), "Unknown".to_string(), "Unknown".to_string()));
}
}
}
let mut updated_state = (*state).clone();
updated_state.runners = runners_with_status;
updated_state.jobs = jobs;
updated_state.ongoing_jobs = ongoing_jobs.clone();
updated_state.loading = false;
console::log!("Added ongoing jobs to polling:", format!("{:?}", ongoing_jobs));
state.set(updated_state);
}
(Ok(runner_names), Err(jobs_err)) => {
console::log!("Successfully loaded runners:", format!("{:?}", runner_names));
console::error!("Failed to load jobs:", format!("{:?}", jobs_err));
let runners_with_status: Vec<(String, String)> = runner_names
.into_iter()
.map(|name| (name, "Running".to_string()))
.collect();
let mut updated_state = (*state).clone();
updated_state.runners = runners_with_status;
updated_state.loading = false;
state.set(updated_state);
}
(Err(runners_err), Ok(job_ids)) => {
console::error!("Failed to load runners:", format!("{:?}", runners_err));
console::log!("Successfully loaded jobs:", format!("{:?}", job_ids));
// Convert job IDs to WasmJob objects
let jobs: Vec<WasmJob> = job_ids
.into_iter()
.map(|id| {
WasmJob::new(id.clone(), "Loading...".to_string(), "Unknown".to_string(), "Unknown".to_string())
})
.collect();
let mut updated_state = (*state).clone();
updated_state.jobs = jobs;
updated_state.loading = false;
state.set(updated_state);
}
(Err(runners_err), Err(jobs_err)) => {
console::error!("Failed to load runners:", format!("{:?}", runners_err));
console::error!("Failed to load jobs:", format!("{:?}", jobs_err));
let mut updated_state = (*state).clone();
updated_state.loading = false;
state.set(updated_state);
}
}
});
})
};
use_effect_with((), move |_| {
load_initial_data.emit(());
|| ()
});
let on_load_runners = {
let state = state.clone();
let client_url = state.server_url.clone();
Callback::from(move |_: ()| {
let state = state.clone();
let client = WasmSupervisorClient::new(client_url.clone());
spawn_local(async move {
console::log!("Loading runners...");
let mut current_state = (*state).clone();
current_state.loading = true;
state.set(current_state.clone());
match client.list_runners().await {
Ok(runner_names) => {
console::log!("Successfully loaded runners:", format!("{:?}", runner_names));
// For now, assume all runners are "Running" - we'd need a separate status call
let runners_with_status: Vec<(String, String)> = runner_names
.into_iter()
.map(|name| (name, "Running".to_string()))
.collect();
let mut updated_state = (*state).clone();
updated_state.runners = runners_with_status;
updated_state.loading = false;
state.set(updated_state);
}
Err(e) => {
console::error!("Failed to load runners:", format!("{:?}", e));
let mut updated_state = (*state).clone();
updated_state.loading = false;
state.set(updated_state);
}
}
});
})
};
let on_register_form_change = {
let state = state.clone();
Callback::from(move |(field, value): (String, String)| {
let mut new_form = state.register_form.clone();
match field.as_str() {
"name" => new_form.name = value,
"secret" => new_form.secret = value,
_ => {}
}
let new_state = AppState {
server_url: state.server_url.clone(),
runners: state.runners.clone(),
jobs: state.jobs.clone(),
ongoing_jobs: state.ongoing_jobs.clone(),
loading: state.loading,
register_form: new_form,
job_form: state.job_form.clone(),
supervisor_info: state.supervisor_info.clone(),
admin_secret: state.admin_secret.clone(),
ping_states: state.ping_states.clone(),
};
state.set(new_state);
})
};
let on_register_runner = {
let state = state.clone();
Callback::from(move |_: ()| {
let current_state = (*state).clone();
// Add runner to UI immediately with "Registering" status
let new_runner = (
current_state.register_form.name.clone(),
"Registering".to_string(),
);
let mut updated_runners = current_state.runners.clone();
updated_runners.push(new_runner);
let mut temp_state = current_state.clone();
temp_state.runners = updated_runners;
// Clear form and update status to "Running"
temp_state.register_form = RegisterForm {
name: String::new(),
secret: String::new(),
};
// Update the newly added runner status to "Running"
if let Some(runner) = temp_state.runners.iter_mut()
.find(|(name, _)| name == &current_state.register_form.name) {
runner.1 = "Running".to_string();
}
state.set(temp_state);
})
};
// Admin secret change callback
let on_admin_secret_change = {
let state = state.clone();
Callback::from(move |admin_secret: String| {
let mut new_state = (*state).clone();
new_state.admin_secret = admin_secret;
state.set(new_state);
})
};
// Job form change callback
let on_job_form_change = {
let state = state.clone();
Callback::from(move |(field, value): (String, String)| {
let mut new_form = state.job_form.clone();
match field.as_str() {
"payload" => new_form.payload = value,
"runner_name" => new_form.runner_name = value,
"executor" => new_form.executor = value,
"secret" => new_form.secret = value,
_ => {}
}
let mut new_state = (*state).clone();
new_state.job_form = new_form;
state.set(new_state);
})
};
// Run job callback - now uses create_job for immediate display and polling
let on_run_job = {
let state = state.clone();
Callback::from(move |_| {
let current_state = (*state).clone();
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let job_form = current_state.job_form.clone();
let state_clone = state.clone();
spawn_local(async move {
console::log!("Creating job...");
// Generate unique job ID client-side
let job_id = generate_job_id();
// Create WasmJob from form data with client-generated ID
let job = WasmJob::new(
job_id.clone(),
job_form.payload.clone(),
job_form.executor.clone(),
job_form.runner_name.clone(),
);
// Immediately add job to the list with "pending" status
let mut updated_state = (*state_clone).clone();
updated_state.jobs.push(job.clone());
updated_state.ongoing_jobs.push(job_id.clone());
// Clear the job form
updated_state.job_form = JobForm::default();
state_clone.set(updated_state);
console::log!("Job added to list immediately with ID:", &job_id);
// Create the job using fire-and-forget create_job method
match client.create_job(job_form.secret.clone(), job).await {
Ok(returned_job_id) => {
console::log!("Job created successfully with ID:", &returned_job_id);
}
Err(e) => {
console::error!("Failed to create job:", format!("{:?}", e));
// Remove job from ongoing jobs if creation failed
let mut error_state = (*state_clone).clone();
error_state.ongoing_jobs.retain(|id| id != &job_id);
state_clone.set(error_state);
}
}
});
})
};
// Supervisor info loaded callback
let on_supervisor_info_loaded = {
let state = state.clone();
Callback::from(move |supervisor_info: SupervisorInfo| {
let mut new_state = (*state).clone();
new_state.supervisor_info = Some(supervisor_info);
state.set(new_state);
})
};
// Remove runner callback
let on_remove_runner = {
let state = state.clone();
Callback::from(move |runner_id: String| {
let current_state = (*state).clone();
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let state_clone = state.clone();
spawn_local(async move {
console::log!("Removing runner:", &runner_id);
match client.remove_runner(&runner_id).await {
Ok(_) => {
console::log!("Runner removed successfully");
// Remove runner from the list
let mut updated_state = (*state_clone).clone();
updated_state.runners.retain(|(name, _)| name != &runner_id);
state_clone.set(updated_state);
}
Err(e) => {
console::error!("Failed to remove runner:", format!("{:?}", e));
}
}
});
})
};
// Stop job callback
let on_stop_job = {
let state = state.clone();
Callback::from(move |job_id: String| {
let current_state = (*state).clone();
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let state_clone = state.clone();
spawn_local(async move {
console::log!("Stopping job:", &job_id);
match client.stop_job(&job_id).await {
Ok(_) => {
console::log!("Job stopped successfully");
// Remove job from ongoing jobs list
let mut updated_state = (*state_clone).clone();
updated_state.ongoing_jobs.retain(|id| id != &job_id);
state_clone.set(updated_state);
}
Err(e) => {
console::error!("Failed to stop job:", format!("{:?}", e));
}
}
});
})
};
// Delete job callback
let on_delete_job = {
let state = state.clone();
Callback::from(move |job_id: String| {
let current_state = (*state).clone();
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let state_clone = state.clone();
spawn_local(async move {
console::log!("Deleting job:", &job_id);
match client.delete_job(&job_id).await {
Ok(_) => {
console::log!("Job deleted successfully");
// Remove job from both jobs list and ongoing jobs list
let mut updated_state = (*state_clone).clone();
updated_state.jobs.retain(|job| job.id() != job_id);
updated_state.ongoing_jobs.retain(|id| id != &job_id);
state_clone.set(updated_state);
}
Err(e) => {
console::error!("Failed to delete job:", format!("{:?}", e));
}
}
});
})
};
// Ping runner callback - uses run_job for immediate result with proper state management
let on_ping_runner = {
let state = state.clone();
Callback::from(move |(runner_id, secret): (String, String)| {
let current_state = (*state).clone();
let client = WasmSupervisorClient::new(current_state.server_url.clone());
let state_clone = state.clone();
// Set ping state to waiting
{
let mut updated_state = (*state_clone).clone();
updated_state.ping_states.insert(runner_id.clone(), PingState::Waiting);
state_clone.set(updated_state);
}
spawn_local(async move {
console::log!("Pinging runner:", &runner_id);
// Generate unique job ID client-side
let job_id = generate_job_id();
// Create ping job with client-generated ID
let ping_job = WasmJob::new(
job_id.clone(),
"ping".to_string(),
"ping".to_string(),
runner_id.clone(),
);
// Use run_job for immediate result instead of create_job
match client.run_job(secret, ping_job).await {
Ok(result) => {
console::log!("Ping successful, result:", &result);
// Set ping state to success with result
let mut success_state = (*state_clone).clone();
success_state.ping_states.insert(runner_id.clone(), PingState::Success(result));
state_clone.set(success_state);
// Reset to idle after 3 seconds
let state_reset = state_clone.clone();
let runner_id_reset = runner_id.clone();
spawn_local(async move {
gloo::timers::future::TimeoutFuture::new(3000).await;
let mut reset_state = (*state_reset).clone();
reset_state.ping_states.insert(runner_id_reset, PingState::Idle);
state_reset.set(reset_state);
});
}
Err(e) => {
console::error!("Failed to ping runner:", format!("{:?}", e));
// Set ping state to error
let mut error_state = (*state_clone).clone();
let error_msg = format!("Error: {:?}", e);
error_state.ping_states.insert(runner_id.clone(), PingState::Error(error_msg));
state_clone.set(error_state);
// Reset to idle after 3 seconds
let state_reset = state_clone.clone();
let runner_id_reset = runner_id.clone();
spawn_local(async move {
gloo::timers::future::TimeoutFuture::new(3000).await;
let mut reset_state = (*state_reset).clone();
reset_state.ping_states.insert(runner_id_reset, PingState::Idle);
state_reset.set(reset_state);
});
}
}
});
})
};
// Load initial data
use_effect_with((), {
let on_load_runners = on_load_runners.clone();
move |_| {
on_load_runners.emit(());
|| ()
}
});
html! {
<div class="app-container">
<Sidebar
server_url={state.server_url.clone()}
supervisor_info={state.supervisor_info.clone()}
admin_secret={state.admin_secret.clone()}
on_admin_secret_change={on_admin_secret_change}
on_supervisor_info_loaded={on_supervisor_info_loaded}
/>
<div class="main-content">
<Runners
server_url={state.server_url.clone()}
runners={state.runners.clone()}
register_form={state.register_form.clone()}
ping_states={state.ping_states.clone()}
on_register_form_change={on_register_form_change}
on_register_runner={on_register_runner}
on_load_runners={on_load_runners.clone()}
on_remove_runner={on_remove_runner}
on_ping_runner={on_ping_runner}
/>
<Jobs
jobs={state.jobs.clone()}
server_url={state.server_url.clone()}
job_form={state.job_form.clone()}
runners={state.runners.clone()}
on_job_form_change={on_job_form_change}
on_run_job={on_run_job}
on_stop_job={on_stop_job}
on_delete_job={on_delete_job}
/>
// Floating refresh button
<button class="refresh-btn" onclick={on_load_runners.reform(|_| ())}>
{""}
</button>
</div>
</div>
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
use yew::prelude::*;
use yew_router::prelude::*;
use gloo::console;
use wasm_bindgen_futures::spawn_local;
use web_sys::HtmlInputElement;
use std::collections::HashMap;
use crate::app::Route;
use crate::types::{AddRunnerForm, RunnerType, ProcessManagerType};
use crate::services::{SupervisorService, use_supervisor_service};
#[function_component(AddRunner)]
pub fn add_runner() -> Html {
let navigator = use_navigator().unwrap();
let server_url = "http://localhost:8081";
let (service, service_error) = use_supervisor_service(server_url);
let form = use_state(|| AddRunnerForm::default());
let loading = use_state(|| false);
let error = use_state(|| None::<String>);
let success = use_state(|| false);
let on_actor_id_change = {
let form = form.clone();
Callback::from(move |e: Event| {
let input: HtmlInputElement = e.target_unchecked_into();
let mut new_form = (*form).clone();
new_form.actor_id = input.value();
form.set(new_form);
})
};
let on_runner_type_change = {
let form = form.clone();
Callback::from(move |e: Event| {
let select: web_sys::HtmlSelectElement = e.target_unchecked_into();
let mut new_form = (*form).clone();
new_form.runner_type = match select.value().as_str() {
"SALRunner" => RunnerType::SALRunner,
"OSISRunner" => RunnerType::OSISRunner,
"VRunner" => RunnerType::VRunner,
_ => RunnerType::SALRunner,
};
form.set(new_form);
})
};
let on_binary_path_change = {
let form = form.clone();
Callback::from(move |e: Event| {
let input: HtmlInputElement = e.target_unchecked_into();
let mut new_form = (*form).clone();
new_form.binary_path = input.value();
form.set(new_form);
})
};
let on_script_type_change = {
let form = form.clone();
Callback::from(move |e: Event| {
let input: HtmlInputElement = e.target_unchecked_into();
let mut new_form = (*form).clone();
new_form.script_type = input.value();
form.set(new_form);
})
};
let on_process_manager_change = {
let form = form.clone();
Callback::from(move |e: Event| {
let select: web_sys::HtmlSelectElement = e.target_unchecked_into();
let mut new_form = (*form).clone();
new_form.process_manager_type = match select.value().as_str() {
"Tmux" => ProcessManagerType::Tmux,
"Simple" => ProcessManagerType::Simple,
_ => ProcessManagerType::Simple,
};
form.set(new_form);
})
};
let on_submit = {
let form = form.clone();
let service = service.clone();
let loading = loading.clone();
let error = error.clone();
let success = success.clone();
let navigator = navigator.clone();
Callback::from(move |e: SubmitEvent| {
e.prevent_default();
if let Some(service) = &service {
let form = form.clone();
let service = service.clone();
let loading = loading.clone();
let error = error.clone();
let success = success.clone();
let navigator = navigator.clone();
loading.set(true);
error.set(None);
success.set(false);
spawn_local(async move {
let config = form.to_runner_config();
match service.add_runner(config, form.process_manager_type.clone()).await {
Ok(_) => {
console::log!("Runner added successfully");
success.set(true);
// Navigate back to runners list after a short delay
gloo::timers::callback::Timeout::new(1500, move || {
navigator.push(&Route::Runners);
}).forget();
}
Err(e) => {
console::error!("Failed to add runner:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
})
};
let on_cancel = {
let navigator = navigator.clone();
Callback::from(move |_| navigator.push(&Route::Runners))
};
html! {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h1 class="h3 mb-0">
<i class="bi bi-plus-circle me-2"></i>
{"Add New Runner"}
</h1>
<button class="btn btn-outline-secondary" onclick={on_cancel.clone()}>
<i class="bi bi-arrow-left me-1"></i>
{"Back to Runners"}
</button>
</div>
</div>
</div>
// Error display
if let Some(err) = service_error {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-danger">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Service Error: "}{err}
</div>
</div>
</div>
}
if let Some(err) = error.as_ref() {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-danger">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Error: "}{err}
</div>
</div>
</div>
}
if *success {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-success">
<i class="bi bi-check-circle-fill me-2"></i>
{"Runner added successfully! Redirecting..."}
</div>
</div>
</div>
}
<div class="row">
<div class="col-lg-8 mx-auto">
<div class="card">
<div class="card-header">
<h5 class="mb-0">{"Runner Configuration"}</h5>
</div>
<div class="card-body">
<form onsubmit={on_submit}>
<div class="row mb-3">
<div class="col-md-6">
<label for="actor_id" class="form-label">{"Actor ID"}</label>
<input
type="text"
class="form-control"
id="actor_id"
value={form.actor_id.clone()}
onchange={on_actor_id_change}
required=true
placeholder="e.g., sal_runner_1"
/>
<div class="form-text">{"Unique identifier for this runner"}</div>
</div>
<div class="col-md-6">
<label for="runner_type" class="form-label">{"Runner Type"}</label>
<select
class="form-select"
id="runner_type"
onchange={on_runner_type_change}
>
<option value="SALRunner" selected={matches!(form.runner_type, RunnerType::SALRunner)}>
{"SAL Runner"}
</option>
<option value="OSISRunner" selected={matches!(form.runner_type, RunnerType::OSISRunner)}>
{"OSIS Runner"}
</option>
<option value="VRunner" selected={matches!(form.runner_type, RunnerType::VRunner)}>
{"V Runner"}
</option>
</select>
</div>
</div>
<div class="row mb-3">
<div class="col-md-6">
<label for="binary_path" class="form-label">{"Binary Path"}</label>
<input
type="text"
class="form-control"
id="binary_path"
value={form.binary_path.clone()}
onchange={on_binary_path_change}
required=true
placeholder="/path/to/runner/binary"
/>
<div class="form-text">{"Full path to the runner executable"}</div>
</div>
<div class="col-md-6">
<label for="script_type" class="form-label">{"Script Type"}</label>
<input
type="text"
class="form-control"
id="script_type"
value={form.script_type.clone()}
onchange={on_script_type_change}
required=true
placeholder="e.g., rhai, bash, python"
/>
<div class="form-text">{"Type of scripts this runner will execute"}</div>
</div>
</div>
<div class="row mb-3">
<div class="col-md-6">
<label for="process_manager" class="form-label">{"Process Manager"}</label>
<select
class="form-select"
id="process_manager"
onchange={on_process_manager_change}
>
<option value="Simple" selected={matches!(form.process_manager_type, ProcessManagerType::Simple)}>
{"Simple"}
</option>
<option value="Tmux" selected={matches!(form.process_manager_type, ProcessManagerType::Tmux)}>
{"Tmux"}
</option>
</select>
<div class="form-text">{"Process management system to use"}</div>
</div>
</div>
<div class="d-flex justify-content-end gap-2">
<button type="button" class="btn btn-outline-secondary" onclick={on_cancel.clone()}>
{"Cancel"}
</button>
<button type="submit" class="btn btn-primary" disabled={*loading}>
if *loading {
<div class="spinner-border spinner-border-sm me-2" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
{"Adding Runner..."}
} else {
<i class="bi bi-plus-circle me-1"></i>
{"Add Runner"}
}
</button>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
}
}

View File

@@ -0,0 +1,294 @@
use yew::prelude::*;
use gloo::console;
use wasm_bindgen_futures::spawn_local;
use crate::types::{RunnerInfo, ProcessStatus};
use crate::components::{status_badge::StatusBadge, runner_card::RunnerCard};
use crate::services::{SupervisorService, use_supervisor_service};
#[function_component(Dashboard)]
pub fn dashboard() -> Html {
let server_url = "http://localhost:8081"; // Default supervisor server URL
let (service, service_error) = use_supervisor_service(server_url);
let runners = use_state(|| Vec::<RunnerInfo>::new());
let loading = use_state(|| false);
let error = use_state(|| None::<String>);
// Load runners on component mount and when service is available
{
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
use_effect_with(service.clone(), move |service| {
if let Some(service) = service {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
loading.set(true);
spawn_local(async move {
match service.get_all_runners().await {
Ok(runner_list) => {
runners.set(runner_list);
error.set(None);
}
Err(e) => {
console::error!("Failed to load runners:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
});
}
let on_refresh = {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
Callback::from(move |_: MouseEvent| {
if let Some(service) = &service {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
loading.set(true);
spawn_local(async move {
match service.get_all_runners().await {
Ok(runner_list) => {
runners.set(runner_list);
error.set(None);
}
Err(e) => {
console::error!("Failed to refresh runners:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
})
};
let on_start_all = {
let service = service.clone();
let on_refresh = on_refresh.clone();
let loading = loading.clone();
Callback::from(move |_: MouseEvent| {
if let Some(service) = &service {
let service = service.clone();
let on_refresh = on_refresh.clone();
let loading = loading.clone();
loading.set(true);
spawn_local(async move {
match service.start_all().await {
Ok(results) => {
console::log!("Start all results:", format!("{:?}", results));
on_refresh.emit(web_sys::MouseEvent::new("click").unwrap());
}
Err(e) => {
console::error!("Failed to start all runners:", e.to_string());
}
}
loading.set(false);
});
}
})
};
let on_stop_all = {
let service = service.clone();
let on_refresh = on_refresh.clone();
let loading = loading.clone();
Callback::from(move |_: MouseEvent| {
if let Some(service) = &service {
if gloo::dialogs::confirm("Are you sure you want to stop all runners?") {
let service = service.clone();
let on_refresh = on_refresh.clone();
let loading = loading.clone();
loading.set(true);
spawn_local(async move {
match service.stop_all(false).await {
Ok(results) => {
console::log!("Stop all results:", format!("{:?}", results));
on_refresh.emit(web_sys::MouseEvent::new("click").unwrap());
}
Err(e) => {
console::error!("Failed to stop all runners:", e.to_string());
}
}
loading.set(false);
});
}
}
})
};
// Create a proper on_update callback for RunnerCard
let on_runner_update = {
let on_refresh = on_refresh.clone();
Callback::from(move |_: ()| {
on_refresh.emit(web_sys::MouseEvent::new("click").unwrap());
})
};
// Calculate statistics
let total_runners = runners.len();
let running_count = runners.iter().filter(|r| r.status == ProcessStatus::Running).count();
let stopped_count = runners.iter().filter(|r| r.status == ProcessStatus::Stopped).count();
let failed_count = runners.iter().filter(|r| r.status == ProcessStatus::Failed).count();
html! {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h1 class="h3 mb-0">
<i class="bi bi-speedometer2 me-2"></i>
{"Dashboard"}
</h1>
<div class="btn-group">
<button class="btn btn-outline-primary" onclick={on_refresh} disabled={*loading}>
<i class="bi bi-arrow-clockwise me-1"></i>
{"Refresh"}
</button>
<button class="btn btn-outline-success" onclick={on_start_all} disabled={*loading || total_runners == 0}>
<i class="bi bi-play-fill me-1"></i>
{"Start All"}
</button>
<button class="btn btn-outline-warning" onclick={on_stop_all} disabled={*loading || total_runners == 0}>
<i class="bi bi-stop-fill me-1"></i>
{"Stop All"}
</button>
</div>
</div>
</div>
</div>
// Error display
if let Some(err) = service_error {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-danger">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Service Error: "}{err}
</div>
</div>
</div>
}
if let Some(err) = error.as_ref() {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-warning">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Error: "}{err}
</div>
</div>
</div>
}
// Statistics cards
<div class="row mb-4">
<div class="col-md-3 mb-3">
<div class="card text-center">
<div class="card-body">
<h2 class="card-title text-primary">{total_runners}</h2>
<p class="card-text">{"Total Runners"}</p>
</div>
</div>
</div>
<div class="col-md-3 mb-3">
<div class="card text-center">
<div class="card-body">
<h2 class="card-title text-success">{running_count}</h2>
<p class="card-text">{"Running"}</p>
</div>
</div>
</div>
<div class="col-md-3 mb-3">
<div class="card text-center">
<div class="card-body">
<h2 class="card-title text-warning">{stopped_count}</h2>
<p class="card-text">{"Stopped"}</p>
</div>
</div>
</div>
<div class="col-md-3 mb-3">
<div class="card text-center">
<div class="card-body">
<h2 class="card-title text-danger">{failed_count}</h2>
<p class="card-text">{"Failed"}</p>
</div>
</div>
</div>
</div>
// Loading state
if *loading {
<div class="row">
<div class="col-12 text-center">
<div class="spinner-border text-primary" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
<p class="mt-2">{"Loading runners..."}</p>
</div>
</div>
}
// Runners grid
if !*loading && total_runners > 0 {
<div class="row mb-4">
<div class="col-12">
<h4>{"Active Runners"}</h4>
</div>
</div>
<div class="row">
{for runners.iter().map(|runner| {
if let Some(service) = &service {
html! {
<RunnerCard
runner={runner.clone()}
service={service.clone()}
on_update={on_runner_update.clone()}
/>
}
} else {
html! {}
}
})}
</div>
}
// Empty state
if !*loading && total_runners == 0 && service.is_some() {
<div class="row">
<div class="col-12 text-center">
<div class="card">
<div class="card-body py-5">
<i class="bi bi-cpu display-1 text-muted mb-3"></i>
<h4 class="text-muted">{"No Runners Found"}</h4>
<p class="text-muted">{"Get started by adding your first runner."}</p>
<a href="/runners/add" class="btn btn-primary">
<i class="bi bi-plus-circle me-1"></i>
{"Add Runner"}
</a>
</div>
</div>
</div>
</div>
}
</div>
}
}

View File

@@ -0,0 +1,7 @@
pub mod navbar;
pub mod dashboard;
pub mod runners;
pub mod runner_detail;
pub mod add_runner;
pub mod runner_card;
pub mod status_badge;

View File

@@ -0,0 +1,67 @@
use yew::prelude::*;
use yew_router::prelude::*;
use crate::app::Route;
#[function_component(Navbar)]
pub fn navbar() -> Html {
let navigator = use_navigator().unwrap();
let on_dashboard_click = {
let navigator = navigator.clone();
Callback::from(move |_| navigator.push(&Route::Dashboard))
};
let on_runners_click = {
let navigator = navigator.clone();
Callback::from(move |_| navigator.push(&Route::Runners))
};
let on_add_runner_click = {
let navigator = navigator.clone();
Callback::from(move |_| navigator.push(&Route::AddRunner))
};
html! {
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
<div class="container-fluid">
<a class="navbar-brand" href="#">
<i class="bi bi-gear-fill me-2"></i>
{"Hero Supervisor Admin"}
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse"
data-bs-target="#navbarNav" aria-controls="navbarNav"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav me-auto">
<li class="nav-item">
<button class="nav-link btn btn-link" onclick={on_dashboard_click}>
<i class="bi bi-speedometer2 me-1"></i>
{"Dashboard"}
</button>
</li>
<li class="nav-item">
<button class="nav-link btn btn-link" onclick={on_runners_click}>
<i class="bi bi-cpu me-1"></i>
{"Runners"}
</button>
</li>
<li class="nav-item">
<button class="nav-link btn btn-link" onclick={on_add_runner_click}>
<i class="bi bi-plus-circle me-1"></i>
{"Add Runner"}
</button>
</li>
</ul>
<div class="navbar-text">
<small class="text-muted">{"Connected to Supervisor"}</small>
</div>
</div>
</div>
</nav>
}
}

View File

@@ -0,0 +1,191 @@
use yew::prelude::*;
use yew_router::prelude::*;
use gloo::console;
use wasm_bindgen_futures::spawn_local;
use crate::app::Route;
use crate::types::{RunnerInfo, ProcessStatus};
use crate::components::status_badge::StatusBadge;
use crate::services::SupervisorService;
#[derive(Properties, PartialEq)]
pub struct RunnerCardProps {
pub runner: RunnerInfo,
pub service: SupervisorService,
pub on_update: Callback<()>,
}
#[function_component(RunnerCard)]
pub fn runner_card(props: &RunnerCardProps) -> Html {
let navigator = use_navigator().unwrap();
let loading = use_state(|| false);
let runner_id = props.runner.id.clone();
let on_view_details = {
let navigator = navigator.clone();
let runner_id = runner_id.clone();
Callback::from(move |_| {
navigator.push(&Route::RunnerDetail { id: runner_id.clone() });
})
};
let on_start = {
let service = props.service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = props.on_update.clone();
Callback::from(move |_| {
let service = service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = on_update.clone();
loading.set(true);
spawn_local(async move {
match service.start_runner(&runner_id).await {
Ok(_) => {
console::log!("Runner started successfully");
on_update.emit(());
}
Err(e) => {
console::error!("Failed to start runner:", e.to_string());
}
}
loading.set(false);
});
})
};
let on_stop = {
let service = props.service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = props.on_update.clone();
Callback::from(move |_| {
let service = service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = on_update.clone();
loading.set(true);
spawn_local(async move {
match service.stop_runner(&runner_id, false).await {
Ok(_) => {
console::log!("Runner stopped successfully");
on_update.emit(());
}
Err(e) => {
console::error!("Failed to stop runner:", e.to_string());
}
}
loading.set(false);
});
})
};
let on_remove = {
let service = props.service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = props.on_update.clone();
Callback::from(move |_| {
if gloo::dialogs::confirm("Are you sure you want to remove this runner?") {
let service = service.clone();
let runner_id = runner_id.clone();
let loading = loading.clone();
let on_update = on_update.clone();
loading.set(true);
spawn_local(async move {
match service.remove_runner(&runner_id).await {
Ok(_) => {
console::log!("Runner removed successfully");
on_update.emit(());
}
Err(e) => {
console::error!("Failed to remove runner:", e.to_string());
}
}
loading.set(false);
});
}
})
};
let is_loading = *loading;
let can_start = matches!(props.runner.status, ProcessStatus::Stopped | ProcessStatus::Failed);
let can_stop = matches!(props.runner.status, ProcessStatus::Running);
html! {
<div class="col-md-6 col-lg-4 mb-4">
<div class="card h-100">
<div class="card-header d-flex justify-content-between align-items-center">
<h6 class="mb-0">
<i class="bi bi-cpu me-2"></i>
{&props.runner.id}
</h6>
<StatusBadge status={props.runner.status.clone()} />
</div>
<div class="card-body">
<div class="mb-2">
<small class="text-muted">{"Type: "}</small>
<span class="badge bg-info">
{format!("{:?}", props.runner.config.runner_type)}
</span>
</div>
<div class="mb-2">
<small class="text-muted">{"Script: "}</small>
<code class="small">{&props.runner.config.script_type}</code>
</div>
<div class="mb-3">
<small class="text-muted">{"Binary: "}</small>
<code class="small">{props.runner.config.binary_path.to_string_lossy()}</code>
</div>
if !props.runner.logs.is_empty() {
<div class="mb-3">
<small class="text-muted">{"Recent logs: "}</small>
<div class="log-container p-2 rounded small">
{for props.runner.logs.iter().take(3).map(|log| html! {
<div>{&log.message}</div>
})}
</div>
</div>
}
</div>
<div class="card-footer">
<div class="btn-group w-100" role="group">
if can_start && !is_loading {
<button class="btn btn-outline-success btn-sm" onclick={on_start}>
<i class="bi bi-play-fill me-1"></i>
{"Start"}
</button>
}
if can_stop && !is_loading {
<button class="btn btn-outline-warning btn-sm" onclick={on_stop}>
<i class="bi bi-stop-fill me-1"></i>
{"Stop"}
</button>
}
if is_loading {
<button class="btn btn-outline-secondary btn-sm" disabled=true>
<div class="spinner-border spinner-border-sm me-1" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
{"Working..."}
</button>
}
<button class="btn btn-outline-primary btn-sm" onclick={on_view_details}>
<i class="bi bi-eye me-1"></i>
{"Details"}
</button>
<button class="btn btn-outline-danger btn-sm" onclick={on_remove} disabled={is_loading}>
<i class="bi bi-trash me-1"></i>
{"Remove"}
</button>
</div>
</div>
</div>
</div>
}
}

View File

@@ -0,0 +1,437 @@
use yew::prelude::*;
use yew_router::prelude::*;
use gloo::console;
use wasm_bindgen_futures::spawn_local;
use web_sys::HtmlTextAreaElement;
use crate::app::Route;
use crate::types::{RunnerInfo, ProcessStatus, JobBuilder, JobType};
use crate::components::status_badge::StatusBadge;
use crate::services::{SupervisorService, use_supervisor_service};
#[derive(Properties, PartialEq)]
pub struct RunnerDetailProps {
pub runner_id: String,
}
#[function_component(RunnerDetail)]
pub fn runner_detail(props: &RunnerDetailProps) -> Html {
let navigator = use_navigator().unwrap();
let server_url = "http://localhost:8081";
let (service, service_error) = use_supervisor_service(server_url);
let runner = use_state(|| None::<RunnerInfo>);
let loading = use_state(|| false);
let error = use_state(|| None::<String>);
let logs_loading = use_state(|| false);
let job_script = use_state(|| String::new());
let job_loading = use_state(|| false);
let job_result = use_state(|| None::<String>);
// Load runner details
{
let runner = runner.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
let runner_id = props.runner_id.clone();
use_effect_with((service.clone(), runner_id.clone()), move |(service, runner_id)| {
if let Some(service) = service {
let runner = runner.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
let runner_id = runner_id.clone();
loading.set(true);
spawn_local(async move {
match service.get_all_runners().await {
Ok(runners) => {
if let Some(found_runner) = runners.into_iter().find(|r| r.id == runner_id) {
runner.set(Some(found_runner));
error.set(None);
} else {
error.set(Some("Runner not found".to_string()));
}
}
Err(e) => {
console::error!("Failed to load runner:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
});
}
let on_back = {
let navigator = navigator.clone();
Callback::from(move |_| navigator.push(&Route::Runners))
};
let on_start = {
let service = service.clone();
let runner_id = props.runner_id.clone();
let runner = runner.clone();
let loading = loading.clone();
Callback::from(move |_| {
if let Some(service) = &service {
let service = service.clone();
let runner_id = runner_id.clone();
let runner = runner.clone();
let loading = loading.clone();
loading.set(true);
spawn_local(async move {
match service.start_runner(&runner_id).await {
Ok(_) => {
console::log!("Runner started successfully");
// Refresh runner status
if let Ok(status) = service.get_runner_status(&runner_id).await {
if let Some(mut current_runner) = (*runner).clone() {
current_runner.status = status;
runner.set(Some(current_runner));
}
}
}
Err(e) => {
console::error!("Failed to start runner:", e.to_string());
}
}
loading.set(false);
});
}
})
};
let on_stop = {
let service = service.clone();
let runner_id = props.runner_id.clone();
let runner = runner.clone();
let loading = loading.clone();
Callback::from(move |_| {
if let Some(service) = &service {
let service = service.clone();
let runner_id = runner_id.clone();
let runner = runner.clone();
let loading = loading.clone();
loading.set(true);
spawn_local(async move {
match service.stop_runner(&runner_id, false).await {
Ok(_) => {
console::log!("Runner stopped successfully");
// Refresh runner status
if let Ok(status) = service.get_runner_status(&runner_id).await {
if let Some(mut current_runner) = (*runner).clone() {
current_runner.status = status;
runner.set(Some(current_runner));
}
}
}
Err(e) => {
console::error!("Failed to stop runner:", e.to_string());
}
}
loading.set(false);
});
}
})
};
let on_refresh_logs = {
let service = service.clone();
let runner_id = props.runner_id.clone();
let runner = runner.clone();
let logs_loading = logs_loading.clone();
Callback::from(move |_| {
if let Some(service) = &service {
let service = service.clone();
let runner_id = runner_id.clone();
let runner = runner.clone();
let logs_loading = logs_loading.clone();
logs_loading.set(true);
spawn_local(async move {
match service.get_runner_logs(&runner_id, Some(100), false).await {
Ok(logs) => {
if let Some(mut current_runner) = (*runner).clone() {
current_runner.logs = logs;
runner.set(Some(current_runner));
}
}
Err(e) => {
console::error!("Failed to refresh logs:", e.to_string());
}
}
logs_loading.set(false);
});
}
})
};
let on_script_change = {
let job_script = job_script.clone();
Callback::from(move |e: Event| {
let textarea: HtmlTextAreaElement = e.target_unchecked_into();
job_script.set(textarea.value());
})
};
let on_run_job = {
let service = service.clone();
let runner_id = props.runner_id.clone();
let job_script = job_script.clone();
let job_loading = job_loading.clone();
let job_result = job_result.clone();
Callback::from(move |_| {
if let Some(service) = &service {
let script = (*job_script).clone();
if !script.trim().is_empty() {
let service = service.clone();
let runner_id = runner_id.clone();
let job_loading = job_loading.clone();
let job_result = job_result.clone();
job_loading.set(true);
job_result.set(None);
spawn_local(async move {
let job = JobBuilder::new()
.caller_id("admin-ui")
.context_id("test-job")
.payload(script)
.job_type(JobType::SAL)
.runner_name(&runner_id)
.build();
match job {
Ok(job) => {
match service.queue_and_wait(&runner_id, job, 30).await {
Ok(result) => {
job_result.set(result);
}
Err(e) => {
job_result.set(Some(format!("Error: {}", e)));
}
}
}
Err(e) => {
job_result.set(Some(format!("Job creation error: {}", e)));
}
}
job_loading.set(false);
});
}
}
})
};
html! {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h1 class="h3 mb-0">
<i class="bi bi-cpu me-2"></i>
{"Runner Details: "}{&props.runner_id}
</h1>
<button class="btn btn-outline-secondary" onclick={on_back}>
<i class="bi bi-arrow-left me-1"></i>
{"Back to Runners"}
</button>
</div>
</div>
</div>
// Error display
if let Some(err) = service_error {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-danger">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Service Error: "}{err}
</div>
</div>
</div>
}
if let Some(err) = error.as_ref() {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-warning">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Error: "}{err}
</div>
</div>
</div>
}
// Loading state
if *loading {
<div class="row">
<div class="col-12 text-center">
<div class="spinner-border text-primary" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
<p class="mt-2">{"Loading runner details..."}</p>
</div>
</div>
}
// Runner details
if let Some(runner_info) = runner.as_ref() {
<div class="row">
// Left column - Runner info and controls
<div class="col-lg-6 mb-4">
<div class="card h-100">
<div class="card-header d-flex justify-content-between align-items-center">
<h5 class="mb-0">{"Runner Information"}</h5>
<StatusBadge status={runner_info.status.clone()} />
</div>
<div class="card-body">
<div class="row mb-3">
<div class="col-sm-4"><strong>{"ID:"}</strong></div>
<div class="col-sm-8"><code>{&runner_info.id}</code></div>
</div>
<div class="row mb-3">
<div class="col-sm-4"><strong>{"Type:"}</strong></div>
<div class="col-sm-8">
<span class="badge bg-info">
{format!("{:?}", runner_info.config.runner_type)}
</span>
</div>
</div>
<div class="row mb-3">
<div class="col-sm-4"><strong>{"Script Type:"}</strong></div>
<div class="col-sm-8"><code>{&runner_info.config.script_type}</code></div>
</div>
<div class="row mb-3">
<div class="col-sm-4"><strong>{"Binary Path:"}</strong></div>
<div class="col-sm-8"><code class="small">{runner_info.config.binary_path.to_string_lossy()}</code></div>
</div>
<div class="row mb-3">
<div class="col-sm-4"><strong>{"Restart Policy:"}</strong></div>
<div class="col-sm-8">{&runner_info.config.restart_policy}</div>
</div>
</div>
<div class="card-footer">
<div class="btn-group w-100">
if matches!(runner_info.status, ProcessStatus::Stopped | ProcessStatus::Failed) && !*loading {
<button class="btn btn-outline-success" onclick={on_start}>
<i class="bi bi-play-fill me-1"></i>
{"Start"}
</button>
}
if matches!(runner_info.status, ProcessStatus::Running) && !*loading {
<button class="btn btn-outline-warning" onclick={on_stop}>
<i class="bi bi-stop-fill me-1"></i>
{"Stop"}
</button>
}
if *loading {
<button class="btn btn-outline-secondary" disabled=true>
<div class="spinner-border spinner-border-sm me-1" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
{"Working..."}
</button>
}
</div>
</div>
</div>
</div>
// Right column - Job execution
<div class="col-lg-6 mb-4">
<div class="card h-100">
<div class="card-header">
<h5 class="mb-0">{"Test Job Execution"}</h5>
</div>
<div class="card-body">
<div class="mb-3">
<label for="job_script" class="form-label">{"Script Content"}</label>
<textarea
class="form-control"
id="job_script"
rows="6"
value={(*job_script).clone()}
onchange={on_script_change}
placeholder="Enter script content to execute..."
></textarea>
</div>
<button
class="btn btn-primary w-100 mb-3"
onclick={on_run_job}
disabled={*job_loading || job_script.trim().is_empty()}
>
if *job_loading {
<div class="spinner-border spinner-border-sm me-2" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
{"Running Job..."}
} else {
<i class="bi bi-play-circle me-1"></i>
{"Run Job"}
}
</button>
if let Some(result) = job_result.as_ref() {
<div class="mb-3">
<label class="form-label">{"Job Result"}</label>
<div class="log-container p-3 rounded">
<pre class="mb-0">{result}</pre>
</div>
</div>
}
</div>
</div>
</div>
</div>
// Logs section
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-header d-flex justify-content-between align-items-center">
<h5 class="mb-0">{"Logs"}</h5>
<button class="btn btn-outline-primary btn-sm" onclick={on_refresh_logs} disabled={*logs_loading}>
if *logs_loading {
<div class="spinner-border spinner-border-sm me-1" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
{"Refreshing..."}
} else {
<i class="bi bi-arrow-clockwise me-1"></i>
{"Refresh Logs"}
}
</button>
</div>
<div class="card-body p-0">
if runner_info.logs.is_empty() {
<div class="p-4 text-center text-muted">
{"No logs available"}
</div>
} else {
<div class="log-container p-3" style="max-height: 400px; overflow-y: auto;">
{for runner_info.logs.iter().map(|log| html! {
<div class="mb-1">
<small class="text-muted me-2">{&log.timestamp}</small>
{&log.message}
</div>
})}
</div>
}
</div>
</div>
</div>
</div>
}
</div>
}
}

View File

@@ -0,0 +1,278 @@
use yew::prelude::*;
use yew_router::prelude::*;
use gloo::console;
use wasm_bindgen_futures::spawn_local;
use crate::app::Route;
use crate::types::{RunnerInfo, ProcessStatus};
use crate::components::{status_badge::StatusBadge, runner_card::RunnerCard};
use crate::services::{SupervisorService, use_supervisor_service};
#[function_component(RunnersList)]
pub fn runners_list() -> Html {
let navigator = use_navigator().unwrap();
let server_url = "http://localhost:8081"; // Default supervisor server URL
let (service, service_error) = use_supervisor_service(server_url);
let runners = use_state(|| Vec::<RunnerInfo>::new());
let loading = use_state(|| false);
let error = use_state(|| None::<String>);
let view_mode = use_state(|| "grid"); // "grid" or "table"
// Load runners on component mount and when service is available
{
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
use_effect_with(service.clone(), move |service| {
if let Some(service) = service {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
loading.set(true);
spawn_local(async move {
match service.get_all_runners().await {
Ok(runner_list) => {
runners.set(runner_list);
error.set(None);
}
Err(e) => {
console::error!("Failed to load runners:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
});
}
let on_refresh = {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
Callback::from(move |_: MouseEvent| {
if let Some(service) = &service {
let runners = runners.clone();
let loading = loading.clone();
let error = error.clone();
let service = service.clone();
loading.set(true);
spawn_local(async move {
match service.get_all_runners().await {
Ok(runner_list) => {
runners.set(runner_list);
error.set(None);
}
Err(e) => {
console::error!("Failed to refresh runners:", e.to_string());
error.set(Some(e.to_string()));
}
}
loading.set(false);
});
}
})
};
let on_add_runner = {
let navigator = navigator.clone();
Callback::from(move |_: MouseEvent| navigator.push(&Route::AddRunner))
};
let on_toggle_view = {
let view_mode = view_mode.clone();
Callback::from(move |_: MouseEvent| {
let current: &str = view_mode.as_ref();
view_mode.set(if current == "grid" { "table" } else { "grid" });
})
};
// Create a separate callback for runner updates that matches the expected signature
let on_runner_update = {
let on_refresh = on_refresh.clone();
Callback::from(move |_: ()| {
on_refresh.emit(web_sys::MouseEvent::new("click").unwrap());
})
};
html! {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h1 class="h3 mb-0">
<i class="bi bi-cpu me-2"></i>
{"Runners"}
</h1>
<div class="btn-group">
<button class="btn btn-outline-secondary" onclick={on_toggle_view}>
if *view_mode == "grid" {
<i class="bi bi-table me-1"></i>
{"Table View"}
} else {
<i class="bi bi-grid-3x3-gap me-1"></i>
{"Grid View"}
}
</button>
<button class="btn btn-outline-primary" onclick={on_refresh} disabled={*loading}>
<i class="bi bi-arrow-clockwise me-1"></i>
{"Refresh"}
</button>
<button class="btn btn-primary" onclick={on_add_runner.clone()}>
<i class="bi bi-plus-circle me-1"></i>
{"Add Runner"}
</button>
</div>
</div>
</div>
</div>
// Error display
if let Some(err) = service_error {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-danger">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Service Error: "}{err}
</div>
</div>
</div>
}
if let Some(err) = error.as_ref() {
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-warning">
<i class="bi bi-exclamation-triangle-fill me-2"></i>
{"Error: "}{err}
</div>
</div>
</div>
}
// Loading state
if *loading {
<div class="row">
<div class="col-12 text-center">
<div class="spinner-border text-primary" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
<p class="mt-2">{"Loading runners..."}</p>
</div>
</div>
}
// Content based on view mode
if !*loading && !runners.is_empty() {
if *view_mode == "grid" {
// Grid view
<div class="row">
{for runners.iter().map(|runner| {
if let Some(service) = &service {
html! {
<RunnerCard
runner={runner.clone()}
service={service.clone()}
on_update={on_runner_update.clone()}
/>
}
} else {
html! {}
}
})}
</div>
} else {
// Table view
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-body">
<div class="table-responsive">
<table class="table table-dark table-hover">
<thead>
<tr>
<th>{"ID"}</th>
<th>{"Type"}</th>
<th>{"Status"}</th>
<th>{"Script Type"}</th>
<th>{"Binary Path"}</th>
<th>{"Actions"}</th>
</tr>
</thead>
<tbody>
{for runners.iter().map(|runner| {
let runner_id = runner.id.clone();
let on_view_details = {
let navigator = navigator.clone();
let runner_id = runner_id.clone();
Callback::from(move |_| {
navigator.push(&Route::RunnerDetail { id: runner_id.clone() });
})
};
html! {
<tr>
<td>
<code>{&runner.id}</code>
</td>
<td>
<span class="badge bg-info">
{format!("{:?}", runner.config.runner_type)}
</span>
</td>
<td>
<StatusBadge status={runner.status.clone()} />
</td>
<td>
<code class="small">{&runner.config.script_type}</code>
</td>
<td>
<code class="small">{runner.config.binary_path.to_string_lossy()}</code>
</td>
<td>
<button class="btn btn-outline-primary btn-sm" onclick={on_view_details}>
<i class="bi bi-eye me-1"></i>
{"Details"}
</button>
</td>
</tr>
}
})}
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
}
}
// Empty state
if !*loading && runners.is_empty() && service.is_some() {
<div class="row">
<div class="col-12 text-center">
<div class="card">
<div class="card-body py-5">
<i class="bi bi-cpu display-1 text-muted mb-3"></i>
<h4 class="text-muted">{"No Runners Found"}</h4>
<p class="text-muted">{"Get started by adding your first runner."}</p>
<button class="btn btn-primary" onclick={on_add_runner.clone()}>
<i class="bi bi-plus-circle me-1"></i>
{"Add Runner"}
</button>
</div>
</div>
</div>
</div>
}
</div>
}
}

View File

@@ -0,0 +1,30 @@
use yew::prelude::*;
use crate::types::ProcessStatus;
#[derive(Properties, PartialEq)]
pub struct StatusBadgeProps {
pub status: ProcessStatus,
#[prop_or_default]
pub size: Option<String>,
}
#[function_component(StatusBadge)]
pub fn status_badge(props: &StatusBadgeProps) -> Html {
let (badge_class, icon, text) = match props.status {
ProcessStatus::Running => ("badge bg-success", "bi-play-circle-fill", "Running"),
ProcessStatus::Stopped => ("badge bg-danger", "bi-stop-circle-fill", "Stopped"),
ProcessStatus::Starting => ("badge bg-warning", "bi-hourglass-split", "Starting"),
ProcessStatus::Stopping => ("badge bg-warning", "bi-hourglass-split", "Stopping"),
ProcessStatus::Failed => ("badge bg-danger", "bi-exclamation-triangle-fill", "Failed"),
ProcessStatus::Unknown => ("badge bg-secondary", "bi-question-circle-fill", "Unknown"),
};
let size_class = props.size.as_deref().unwrap_or("");
html! {
<span class={format!("{} {}", badge_class, size_class)}>
<i class={format!("{} me-1", icon)}></i>
{text}
</span>
}
}

View File

@@ -0,0 +1,185 @@
use yew::prelude::*;
use hero_supervisor_openrpc_client::wasm::WasmJob;
use crate::app::JobForm;
use web_sys::{Event, HtmlInputElement, MouseEvent};
#[derive(Properties)]
pub struct JobsProps {
pub jobs: Vec<WasmJob>,
pub server_url: String,
pub job_form: JobForm,
pub runners: Vec<(String, String)>, // (name, status) - list of registered runners
pub on_job_form_change: Callback<(String, String)>,
pub on_run_job: Callback<()>,
pub on_stop_job: Callback<String>,
pub on_delete_job: Callback<String>,
}
impl PartialEq for JobsProps {
fn eq(&self, other: &Self) -> bool {
// Since WasmJob doesn't implement PartialEq, we'll compare by length
// This is a simple comparison that will trigger re-renders when the job list changes
self.jobs.len() == other.jobs.len() &&
self.server_url == other.server_url &&
self.job_form.payload == other.job_form.payload &&
self.job_form.runner_name == other.job_form.runner_name &&
self.job_form.executor == other.job_form.executor &&
self.job_form.secret == other.job_form.secret &&
self.runners.len() == other.runners.len()
// Note: Callbacks don't implement PartialEq, so we skip them
}
}
#[function_component(Jobs)]
pub fn jobs(props: &JobsProps) -> Html {
let on_payload_change = {
let on_change = props.on_job_form_change.clone();
Callback::from(move |e: Event| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
on_change.emit(("payload".to_string(), input.value()));
})
};
let on_runner_name_change = {
let on_change = props.on_job_form_change.clone();
Callback::from(move |e: Event| {
let input: HtmlInputElement = e.target_unchecked_into();
on_change.emit(("runner_name".to_string(), input.value()));
})
};
let on_executor_change = {
let on_change = props.on_job_form_change.clone();
Callback::from(move |e: Event| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
on_change.emit(("executor".to_string(), input.value()));
})
};
let on_secret_change = {
let on_change = props.on_job_form_change.clone();
Callback::from(move |e: Event| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
on_change.emit(("secret".to_string(), input.value()));
})
};
let on_run_click = {
let on_run = props.on_run_job.clone();
Callback::from(move |_: MouseEvent| {
on_run.emit(());
})
};
html! {
<div class="jobs-section">
<h2>{"Jobs"}</h2>
<div class="table-container">
<table class="table">
<thead>
<tr>
<th>{"Job ID"}</th>
<th>{"Payload"}</th>
<th>{"Runner"}</th>
<th>{"Executor"}</th>
<th>{"Status"}</th>
</tr>
</thead>
<tbody>
// Job creation form as first row
<tr class="job-form-row">
<td>
<span class="text-muted">{"New Job"}</span>
</td>
<td>
<input
type="text"
class="form-control table-input"
placeholder="Script content"
value={props.job_form.payload.clone()}
onchange={on_payload_change}
/>
</td>
<td>
<select
class="form-control table-input"
value={props.job_form.runner_name.clone()}
onchange={on_runner_name_change}
>
<option value="" disabled=true>{"-Select Runner-"}</option>
{ for props.runners.iter().map(|(name, _status)| {
html! {
<option value={name.clone()} selected={name == &props.job_form.runner_name}>
{name}
</option>
}
})}
</select>
</td>
<td>
<input
type="text"
class="form-control table-input"
placeholder="Executor"
value={props.job_form.executor.clone()}
onchange={on_executor_change}
/>
</td>
<td class="action-cell">
<input
type="password"
class="form-control table-input secret-input"
placeholder="Secret"
value={props.job_form.secret.clone()}
onchange={on_secret_change}
/>
<button
class="btn btn-primary btn-sm"
onclick={on_run_click}
>
{"Run"}
</button>
</td>
</tr>
// Existing jobs
{for props.jobs.iter().map(|job| {
let job_id = job.id();
let on_stop = props.on_stop_job.clone();
let on_delete = props.on_delete_job.clone();
let job_id_stop = job_id.clone();
let job_id_delete = job_id.clone();
html! {
<tr>
<td><small class="text-muted">{job_id}</small></td>
<td><code class="code">{job.payload()}</code></td>
<td>{job.runner_name()}</td>
<td>{job.executor()}</td>
<td class="action-cell">
<span class="status-badge">{"Queued"}</span>
<button
class="btn-icon btn-stop"
title="Stop Job"
onclick={Callback::from(move |_| on_stop.emit(job_id_stop.clone()))}
>
{""}
</button>
<button
class="btn-icon btn-delete"
title="Delete Job"
onclick={Callback::from(move |_| on_delete.emit(job_id_delete.clone()))}
>
{"🗑"}
</button>
</td>
</tr>
}
})}
</tbody>
</table>
</div>
</div>
}
}

View File

@@ -0,0 +1,12 @@
use wasm_bindgen::prelude::*;
mod app;
mod sidebar;
mod runners;
mod jobs;
#[wasm_bindgen(start)]
pub fn main() {
wasm_logger::init(wasm_logger::Config::default());
yew::Renderer::<app::App>::new().render();
}

View File

@@ -0,0 +1,219 @@
use yew::prelude::*;
use wasm_bindgen_futures::spawn_local;
use gloo::console;
use hero_supervisor_openrpc_client::wasm::WasmSupervisorClient;
use wasm_bindgen::JsCast;
use crate::app::PingState;
use std::collections::HashMap;
#[derive(Clone, PartialEq)]
pub struct RegisterForm {
pub name: String,
pub secret: String,
}
#[derive(Properties, PartialEq)]
pub struct RunnersProps {
pub server_url: String,
pub runners: Vec<(String, String)>, // (name, status)
pub register_form: RegisterForm,
pub ping_states: HashMap<String, PingState>, // runner_name -> ping_state
pub on_register_form_change: Callback<(String, String)>,
pub on_register_runner: Callback<()>,
pub on_load_runners: Callback<()>,
pub on_remove_runner: Callback<String>,
pub on_ping_runner: Callback<(String, String)>, // (runner_name, secret)
}
#[function_component(Runners)]
pub fn runners(props: &RunnersProps) -> Html {
let on_register_runner = {
let server_url = props.server_url.clone();
let register_form = props.register_form.clone();
let on_register_runner = props.on_register_runner.clone();
Callback::from(move |_: ()| {
let server_url = server_url.clone();
let register_form = register_form.clone();
let on_register_runner = on_register_runner.clone();
let client = WasmSupervisorClient::new(server_url);
spawn_local(async move {
console::log!("Registering runner...");
// Validate form data
if register_form.name.is_empty() {
console::error!("Runner name is required");
return;
}
if register_form.secret.is_empty() {
console::error!("Secret is required");
return;
}
// Make actual registration call (use name as queue)
match client.register_runner(
&register_form.secret,
&register_form.name,
&register_form.name, // queue = name
).await {
Ok(runner_name) => {
console::log!("Runner registered successfully:", runner_name);
on_register_runner.emit(());
}
Err(e) => {
console::error!("Failed to register runner:", format!("{:?}", e));
}
}
});
})
};
html! {
<div class="runners-grid">
// Registration card (first card)
<div class="card register-card">
<div class="card-title">{"+ Register Runner"}</div>
<form onsubmit={on_register_runner.reform(|e: web_sys::SubmitEvent| {
e.prevent_default();
()
})}>
<div class="form-group">
<input
type="text"
class="form-control"
placeholder="Runner name"
value={props.register_form.name.clone()}
onchange={props.on_register_form_change.reform(|e: web_sys::Event| {
let input: web_sys::HtmlInputElement = e.target().unwrap().dyn_into().unwrap();
("name".to_string(), input.value())
})}
/>
</div>
<div class="form-group form-row">
<input
type="password"
class="form-control form-control-inline"
placeholder="Secret"
value={props.register_form.secret.clone()}
onchange={props.on_register_form_change.reform(|e: web_sys::Event| {
let input: web_sys::HtmlInputElement = e.target().unwrap().dyn_into().unwrap();
("secret".to_string(), input.value())
})}
/>
<button type="submit" class="btn btn-primary">
{"Register"}
</button>
</div>
</form>
</div>
// Existing runner cards
{for props.runners.iter().map(|(name, status)| {
let status_class = match status.as_str() {
"Running" => "status-running",
"Stopped" => "status-stopped",
"Starting" => "status-starting",
"Stopping" => "status-starting",
"Registering" => "status-registering",
_ => "status-stopped",
};
let name_clone = name.clone();
let name_clone2 = name.clone();
let on_remove = props.on_remove_runner.clone();
let on_ping = props.on_ping_runner.clone();
html! {
<div class="card runner-card">
<div class="card-header">
<div class="runner-title-section">
<div class="runner-title-with-dot">
<span class={format!("connection-dot {}", status_class)} title={status.clone()}>
{""}
</span>
<div class="card-title">{name}</div>
</div>
<small class="queue-info">
{"redis://localhost:6379/runner:"}{name}
</small>
</div>
<div class="runner-actions-top">
<button
class="btn btn-sm btn-outline-secondary btn-remove"
title="Remove Runner"
onclick={Callback::from(move |_| on_remove.emit(name_clone2.clone()))}
>
<svg class="trash-icon" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<polyline points="3,6 5,6 21,6"></polyline>
<path d="m5,6 1,14 c0,1.1 0.9,2 2,2 h8 c1.1,0 2,-0.9 2,-2 l1,-14"></path>
<path d="m10,11 v6"></path>
<path d="m14,11 v6"></path>
<path d="M7,6V4c0-1.1,0.9-2,2-2h6c0-1.1,0.9-2,2-2v2"></path>
</svg>
</button>
</div>
</div>
<div class="runner-chart">
<div class="chart-placeholder">
{"📊 Live job count chart (5s updates)"}
</div>
</div>
<div class="ping-section">
{
match props.ping_states.get(name).cloned().unwrap_or(PingState::Idle) {
PingState::Idle => html! {
<div class="input-group input-group-sm">
<input
type="password"
class="form-control"
placeholder="Secret"
id={format!("ping-secret-{}", name)}
/>
<button
class="btn btn-outline-primary"
title="Ping Runner"
onclick={Callback::from(move |_| {
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input_id = format!("ping-secret-{}", name_clone.clone());
if let Some(input) = document.get_element_by_id(&input_id) {
let input: web_sys::HtmlInputElement = input.dyn_into().unwrap();
let secret = input.value();
if !secret.is_empty() {
on_ping.emit((name_clone.clone(), secret));
input.set_value("");
}
}
})}
>
{"Ping"}
</button>
</div>
},
PingState::Waiting => html! {
<div class="ping-status ping-waiting">
<span class="ping-spinner">{""}</span>
<span>{"Waiting for response..."}</span>
</div>
},
PingState::Success(result) => html! {
<div class="ping-status ping-success">
<span class="ping-icon">{""}</span>
<span>{format!("Success: {}", result)}</span>
</div>
},
PingState::Error(error) => html! {
<div class="ping-status ping-error">
<span class="ping-icon">{""}</span>
<span>{error}</span>
</div>
},
}
}
</div>
</div>
}
})}
</div>
}
}

View File

@@ -0,0 +1,145 @@
use gloo::console;
use std::rc::Rc;
use std::cell::RefCell;
use crate::wasm_client::{WasmSupervisorClient, WasmClientResult as ClientResult, RunnerConfig, ProcessManagerType, ProcessStatus, LogInfo, Job, RunnerType};
use wasm_bindgen_futures::spawn_local;
use yew::prelude::*;
use crate::types::{RunnerInfo, AppState};
/// Service for managing supervisor client operations
#[derive(Clone)]
pub struct SupervisorService {
client: Rc<RefCell<WasmSupervisorClient>>,
}
impl PartialEq for SupervisorService {
fn eq(&self, other: &Self) -> bool {
// Compare by server URL since that's the main identifier
self.client.borrow().server_url() == other.client.borrow().server_url()
}
}
impl SupervisorService {
pub fn new(server_url: &str) -> ClientResult<Self> {
let client = WasmSupervisorClient::new(server_url);
Ok(Self {
client: Rc::new(RefCell::new(client)),
})
}
/// Get all runners with their status and basic info
pub async fn get_all_runners(&self) -> ClientResult<Vec<RunnerInfo>> {
let runner_ids = self.client.borrow_mut().list_runners().await?;
let mut runners = Vec::new();
for id in runner_ids {
let status = self.client.borrow_mut().get_runner_status(&id).await.unwrap_or(ProcessStatus::Unknown);
let logs = self.client.borrow_mut().get_runner_logs(&id, Some(50), false).await.unwrap_or_default();
// Create a basic runner config since we don't have a get_runner_config method
let config = RunnerConfig {
actor_id: id.clone(),
runner_type: RunnerType::SALRunner, // Default
binary_path: std::path::PathBuf::from("unknown"),
script_type: "unknown".to_string(),
args: vec![],
env_vars: std::collections::HashMap::new(),
working_dir: None,
restart_policy: "always".to_string(),
health_check_command: None,
dependencies: vec![],
};
runners.push(RunnerInfo {
id,
config,
status,
logs,
});
}
Ok(runners)
}
/// Add a new runner
pub async fn add_runner(&self, config: RunnerConfig, process_manager_type: ProcessManagerType) -> ClientResult<()> {
self.client.borrow_mut().add_runner(config, process_manager_type).await
}
/// Remove a runner
pub async fn remove_runner(&self, actor_id: &str) -> ClientResult<()> {
self.client.borrow_mut().remove_runner(actor_id).await
}
/// Start a runner
pub async fn start_runner(&self, actor_id: &str) -> ClientResult<()> {
self.client.borrow_mut().start_runner(actor_id).await
}
/// Stop a runner
pub async fn stop_runner(&self, actor_id: &str, force: bool) -> ClientResult<()> {
self.client.borrow_mut().stop_runner(actor_id, force).await
}
/// Get runner status
pub async fn get_runner_status(&self, actor_id: &str) -> ClientResult<ProcessStatus> {
self.client.borrow_mut().get_runner_status(actor_id).await
}
/// Get runner logs
pub async fn get_runner_logs(&self, actor_id: &str, lines: Option<usize>, follow: bool) -> ClientResult<Vec<LogInfo>> {
self.client.borrow_mut().get_runner_logs(actor_id, lines, follow).await
}
/// Start all runners
pub async fn start_all(&self) -> ClientResult<Vec<(String, bool)>> {
self.client.borrow_mut().start_all().await
}
/// Stop all runners
pub async fn stop_all(&self, force: bool) -> ClientResult<Vec<(String, bool)>> {
self.client.borrow_mut().stop_all(force).await
}
/// Queue a job to a runner
pub async fn queue_job(&self, runner_name: &str, job: Job) -> ClientResult<()> {
self.client.borrow_mut().queue_job_to_runner(runner_name, job).await
}
/// Queue a job and wait for result
pub async fn queue_and_wait(&self, runner_name: &str, job: Job, timeout_secs: u64) -> ClientResult<Option<String>> {
self.client.borrow_mut().queue_and_wait(runner_name, job, timeout_secs).await
}
}
/// Hook for managing supervisor service state
#[hook]
pub fn use_supervisor_service(server_url: &str) -> (Option<SupervisorService>, Option<String>) {
let server_url = server_url.to_string();
let service_state = use_state(|| None);
let error_state = use_state(|| None);
{
let service_state = service_state.clone();
let error_state = error_state.clone();
let server_url = server_url.clone();
use_effect_with(server_url.clone(), move |_| {
spawn_local(async move {
match SupervisorService::new(&server_url) {
Ok(service) => {
service_state.set(Some(service));
error_state.set(None);
}
Err(e) => {
console::error!("Failed to create supervisor service:", e.to_string());
error_state.set(Some(e.to_string()));
}
}
});
});
}
((*service_state).clone(), (*error_state).clone())
}

View File

@@ -0,0 +1,292 @@
use yew::prelude::*;
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::spawn_local;
use gloo::console;
use hero_supervisor_openrpc_client::wasm::WasmSupervisorClient;
#[derive(Clone, PartialEq)]
pub struct SupervisorInfo {
pub server_url: String,
pub admin_secrets_count: usize,
pub user_secrets_count: usize,
pub register_secrets_count: usize,
pub runners_count: usize,
}
#[derive(Properties, PartialEq)]
pub struct SidebarProps {
pub server_url: String,
pub supervisor_info: Option<SupervisorInfo>,
pub admin_secret: String,
pub on_admin_secret_change: Callback<String>,
pub on_supervisor_info_loaded: Callback<SupervisorInfo>,
}
#[function_component(Sidebar)]
pub fn sidebar(props: &SidebarProps) -> Html {
let is_unlocked = use_state(|| false);
let unlock_secret = use_state(|| String::new());
let admin_secrets = use_state(|| Vec::<String>::new());
let user_secrets = use_state(|| Vec::<String>::new());
let register_secrets = use_state(|| Vec::<String>::new());
let is_loading = use_state(|| false);
let on_unlock_secret_change = {
let unlock_secret = unlock_secret.clone();
Callback::from(move |e: web_sys::Event| {
let input: web_sys::HtmlInputElement = e.target().unwrap().dyn_into().unwrap();
unlock_secret.set(input.value());
})
};
let on_unlock_submit = {
let unlock_secret = unlock_secret.clone();
let is_unlocked = is_unlocked.clone();
let is_loading = is_loading.clone();
let admin_secrets = admin_secrets.clone();
let user_secrets = user_secrets.clone();
let register_secrets = register_secrets.clone();
let server_url = props.server_url.clone();
Callback::from(move |_: web_sys::MouseEvent| {
let unlock_secret = unlock_secret.clone();
let is_unlocked = is_unlocked.clone();
let is_loading = is_loading.clone();
let admin_secrets = admin_secrets.clone();
let user_secrets = user_secrets.clone();
let register_secrets = register_secrets.clone();
let server_url = server_url.clone();
let secret_value = (*unlock_secret).clone();
if secret_value.is_empty() {
return;
}
is_loading.set(true);
spawn_local(async move {
let client = WasmSupervisorClient::new(server_url);
// Try to load all secrets
match client.list_admin_secrets(&secret_value).await {
Ok(secrets) => {
admin_secrets.set(secrets);
// Load user secrets
if let Ok(user_secs) = client.list_user_secrets(&secret_value).await {
user_secrets.set(user_secs);
}
// Load register secrets
if let Ok(reg_secs) = client.list_register_secrets(&secret_value).await {
register_secrets.set(reg_secs);
}
is_unlocked.set(true);
unlock_secret.set(String::new());
console::log!("Secrets unlocked successfully");
}
Err(e) => {
console::error!("Failed to unlock secrets:", format!("{:?}", e));
}
}
is_loading.set(false);
});
})
};
let on_lock_click = {
let is_unlocked = is_unlocked.clone();
let admin_secrets = admin_secrets.clone();
let user_secrets = user_secrets.clone();
let register_secrets = register_secrets.clone();
Callback::from(move |_: web_sys::MouseEvent| {
is_unlocked.set(false);
admin_secrets.set(Vec::new());
user_secrets.set(Vec::new());
register_secrets.set(Vec::new());
console::log!("Secrets locked");
})
};
html! {
<div class="sidebar">
<div class="sidebar-header">
<h2>{"Supervisor"}</h2>
</div>
<div class="sidebar-content">
<div class="sidebar-sections">
// Server Info Section
<div class="server-info">
<div class="server-header">
<h3 class="supervisor-title">{"Hero Supervisor"}</h3>
</div>
<div class="server-url">
<span class="connection-indicator connected"></span>
<span class="url-text">{props.server_url.clone()}</span>
</div>
</div>
// Secrets Management Section
<div class="secrets-section">
<div class="secrets-header">
<span class="secrets-title">{"Secrets"}</span>
if !*is_unlocked {
<button
class="unlock-btn"
onclick={on_unlock_submit}
disabled={*is_loading || unlock_secret.is_empty()}
>
<i class={if *is_loading { "fas fa-spinner fa-spin" } else { "fas fa-unlock" }}></i>
</button>
} else {
<button
class="lock-btn"
onclick={on_lock_click}
>
<i class="fas fa-lock"></i>
</button>
}
</div>
if !*is_unlocked {
<div class="unlock-input-row">
<input
type="password"
class="unlock-input"
placeholder="Enter admin secret to unlock"
value={(*unlock_secret).clone()}
onchange={on_unlock_secret_change}
disabled={*is_loading}
/>
</div>
}
if *is_unlocked {
<div class="secrets-content">
<div class="secret-group">
<div class="secret-header">
<span class="secret-title">{"Admin secrets"}</span>
</div>
<div class="secret-list">
{ for admin_secrets.iter().enumerate().map(|(i, secret)| {
html! {
<div class="secret-item" key={i}>
<div class="secret-value">{secret.clone()}</div>
<button class="btn-icon btn-remove">
<i class="fas fa-minus"></i>
</button>
</div>
}
})}
<div class="secret-add-row">
<input
type="text"
class="secret-add-input"
placeholder="New admin secret"
/>
<button class="btn-icon btn-add">
<i class="fas fa-plus"></i>
</button>
</div>
</div>
</div>
<div class="secret-group">
<div class="secret-header">
<span class="secret-title">{"User secrets"}</span>
</div>
<div class="secret-list">
{ for user_secrets.iter().enumerate().map(|(i, secret)| {
html! {
<div class="secret-item" key={i}>
<div class="secret-value">{secret.clone()}</div>
<button class="btn-icon btn-remove">
<i class="fas fa-minus"></i>
</button>
</div>
}
})}
<div class="secret-add-row">
<input
type="text"
class="secret-add-input"
placeholder="New user secret"
/>
<button class="btn-icon btn-add">
<i class="fas fa-plus"></i>
</button>
</div>
</div>
</div>
<div class="secret-group">
<div class="secret-header">
<span class="secret-title">{"Register secrets"}</span>
</div>
<div class="secret-list">
{ for register_secrets.iter().enumerate().map(|(i, secret)| {
html! {
<div class="secret-item" key={i}>
<div class="secret-value">{secret.clone()}</div>
<button class="btn-icon btn-remove">
<i class="fas fa-minus"></i>
</button>
</div>
}
})}
<div class="secret-add-row">
<input
type="text"
class="secret-add-input"
placeholder="New register secret"
/>
<button class="btn-icon btn-add">
<i class="fas fa-plus"></i>
</button>
</div>
</div>
</div>
</div>
}
</div>
if *is_unlocked {
<div class="save-section">
<button class="save-changes-btn">
{"Save Changes"}
</button>
</div>
}
</div>
</div>
// Documentation Links at Bottom
<div class="sidebar-footer">
<div class="docs-section">
<h5>{"Documentation"}</h5>
<div class="docs-links">
<a href="https://github.com/herocode/supervisor" target="_blank" class="doc-link">
{"📖 User Guide"}
</a>
<a href="https://github.com/herocode/supervisor/blob/main/README.md" target="_blank" class="doc-link">
{"🚀 Getting Started"}
</a>
<a href="https://github.com/herocode/supervisor/issues" target="_blank" class="doc-link">
{"🐛 Report Issues"}
</a>
<a href="https://github.com/herocode/supervisor/wiki" target="_blank" class="doc-link">
{"📚 API Reference"}
</a>
</div>
</div>
</div>
</div>
}
}

View File

@@ -0,0 +1,61 @@
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::Duration;
// Re-export types from the WASM client
pub use crate::wasm_client::{
WasmClientError as ClientError, WasmClientResult as ClientResult, JobType, ProcessStatus,
RunnerType, RunnerConfig, ProcessManagerType, LogInfo, Job, JobBuilder
};
/// UI-specific runner information combining config and status
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RunnerInfo {
pub id: String,
pub config: RunnerConfig,
pub status: ProcessStatus,
pub logs: Vec<LogInfo>,
}
/// Form data for adding a new runner
#[derive(Debug, Clone, Default)]
pub struct AddRunnerForm {
pub actor_id: String,
pub runner_type: RunnerType,
pub binary_path: String,
pub script_type: String,
pub args: Vec<String>,
pub env_vars: HashMap<String, String>,
pub working_dir: Option<PathBuf>,
pub restart_policy: String,
pub health_check_command: Option<String>,
pub dependencies: Vec<String>,
pub process_manager_type: ProcessManagerType,
}
impl AddRunnerForm {
pub fn to_runner_config(&self) -> RunnerConfig {
RunnerConfig {
actor_id: self.actor_id.clone(),
runner_type: self.runner_type.clone(),
binary_path: PathBuf::from(&self.binary_path),
script_type: self.script_type.clone(),
args: self.args.clone(),
env_vars: self.env_vars.clone(),
working_dir: self.working_dir.clone(),
restart_policy: self.restart_policy.clone(),
health_check_command: self.health_check_command.clone(),
dependencies: self.dependencies.clone(),
}
}
}
/// Application state for managing runners
#[derive(Debug, Clone, Default)]
pub struct AppState {
pub runners: Vec<RunnerInfo>,
pub loading: bool,
pub error: Option<String>,
pub server_url: String,
}

View File

@@ -0,0 +1,378 @@
use gloo::net::http::Request;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::path::PathBuf;
use thiserror::Error;
use uuid::Uuid;
/// WASM-compatible client for Hero Supervisor OpenRPC server
#[derive(Clone)]
pub struct WasmSupervisorClient {
server_url: String,
request_id: u64,
}
/// Error types for client operations
#[derive(Error, Debug)]
pub enum WasmClientError {
#[error("HTTP request error: {0}")]
Http(String),
#[error("JSON serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("Server error: {message}")]
Server { message: String },
}
/// Result type for client operations
pub type WasmClientResult<T> = Result<T, WasmClientError>;
/// Types of runners supported by the supervisor
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum RunnerType {
SALRunner,
OSISRunner,
VRunner,
}
impl Default for RunnerType {
fn default() -> Self {
RunnerType::SALRunner
}
}
/// Process manager types
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ProcessManagerType {
Simple,
Tmux,
}
impl Default for ProcessManagerType {
fn default() -> Self {
ProcessManagerType::Simple
}
}
/// Configuration for an actor runner
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RunnerConfig {
pub actor_id: String,
pub runner_type: RunnerType,
pub binary_path: PathBuf,
pub script_type: String,
pub args: Vec<String>,
pub env_vars: HashMap<String, String>,
pub working_dir: Option<PathBuf>,
pub restart_policy: String,
pub health_check_command: Option<String>,
pub dependencies: Vec<String>,
}
/// Job type enumeration
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum JobType {
SAL,
OSIS,
V,
}
/// Job structure for creating and managing jobs
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Job {
pub id: String,
pub caller_id: String,
pub context_id: String,
pub payload: String,
pub job_type: JobType,
pub runner_name: String,
pub timeout: Option<u64>,
pub env_vars: HashMap<String, String>,
}
/// Process status information
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ProcessStatus {
Running,
Stopped,
Starting,
Stopping,
Failed,
Unknown,
}
/// Log information structure
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct LogInfo {
pub timestamp: String,
pub level: String,
pub message: String,
}
impl WasmSupervisorClient {
/// Create a new supervisor client
pub fn new(server_url: impl Into<String>) -> Self {
Self {
server_url: server_url.into(),
request_id: 0,
}
}
/// Get the server URL
pub fn server_url(&self) -> &str {
&self.server_url
}
/// Make a JSON-RPC request
async fn make_request<T>(&mut self, method: &str, params: Value) -> WasmClientResult<T>
where
T: for<'de> Deserialize<'de>,
{
self.request_id += 1;
let request_body = json!({
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.request_id
});
let response = Request::post(&self.server_url)
.header("Content-Type", "application/json")
.json(&request_body)
.map_err(|e| WasmClientError::Http(e.to_string()))?
.send()
.await
.map_err(|e| WasmClientError::Http(e.to_string()))?;
if !response.ok() {
return Err(WasmClientError::Http(format!(
"HTTP error: {} {}",
response.status(),
response.status_text()
)));
}
let response_text = response
.text()
.await
.map_err(|e| WasmClientError::Http(e.to_string()))?;
let response_json: Value = serde_json::from_str(&response_text)?;
if let Some(error) = response_json.get("error") {
return Err(WasmClientError::Server {
message: error.get("message")
.and_then(|m| m.as_str())
.unwrap_or("Unknown server error")
.to_string(),
});
}
let result = response_json
.get("result")
.ok_or_else(|| WasmClientError::Server {
message: "No result in response".to_string(),
})?;
serde_json::from_value(result.clone()).map_err(Into::into)
}
/// Add a new runner to the supervisor
pub async fn add_runner(
&mut self,
config: RunnerConfig,
process_manager_type: ProcessManagerType,
) -> WasmClientResult<()> {
let params = json!({
"config": config,
"process_manager_type": process_manager_type
});
self.make_request("add_runner", params).await
}
/// Remove a runner from the supervisor
pub async fn remove_runner(&mut self, actor_id: &str) -> WasmClientResult<()> {
let params = json!({ "actor_id": actor_id });
self.make_request("remove_runner", params).await
}
/// List all runner IDs
pub async fn list_runners(&mut self) -> WasmClientResult<Vec<String>> {
self.make_request("list_runners", json!({})).await
}
/// Start a specific runner
pub async fn start_runner(&mut self, actor_id: &str) -> WasmClientResult<()> {
let params = json!({ "actor_id": actor_id });
self.make_request("start_runner", params).await
}
/// Stop a specific runner
pub async fn stop_runner(&mut self, actor_id: &str, force: bool) -> WasmClientResult<()> {
let params = json!({ "actor_id": actor_id, "force": force });
self.make_request("stop_runner", params).await
}
/// Get status of a specific runner
pub async fn get_runner_status(&mut self, actor_id: &str) -> WasmClientResult<ProcessStatus> {
let params = json!({ "actor_id": actor_id });
self.make_request("get_runner_status", params).await
}
/// Get logs for a specific runner
pub async fn get_runner_logs(
&mut self,
actor_id: &str,
lines: Option<usize>,
follow: bool,
) -> WasmClientResult<Vec<LogInfo>> {
let params = json!({
"actor_id": actor_id,
"lines": lines,
"follow": follow
});
self.make_request("get_runner_logs", params).await
}
/// Queue a job to a specific runner
pub async fn queue_job_to_runner(&mut self, runner_name: &str, job: Job) -> WasmClientResult<()> {
let params = json!({
"runner_name": runner_name,
"job": job
});
self.make_request("queue_job_to_runner", params).await
}
/// Queue a job to a specific runner and wait for the result
pub async fn queue_and_wait(
&mut self,
runner_name: &str,
job: Job,
timeout_secs: u64,
) -> WasmClientResult<Option<String>> {
let params = json!({
"runner_name": runner_name,
"job": job,
"timeout_secs": timeout_secs
});
self.make_request("queue_and_wait", params).await
}
/// Get job result by job ID
pub async fn get_job_result(&mut self, job_id: &str) -> WasmClientResult<Option<String>> {
let params = json!({ "job_id": job_id });
self.make_request("get_job_result", params).await
}
/// Get status of all runners
pub async fn get_all_runner_status(&mut self) -> WasmClientResult<Vec<(String, ProcessStatus)>> {
self.make_request("get_all_runner_status", json!({})).await
}
/// Start all runners
pub async fn start_all(&mut self) -> WasmClientResult<Vec<(String, bool)>> {
self.make_request("start_all", json!({})).await
}
/// Stop all runners
pub async fn stop_all(&mut self, force: bool) -> WasmClientResult<Vec<(String, bool)>> {
let params = json!({ "force": force });
self.make_request("stop_all", params).await
}
}
/// Builder for creating jobs with a fluent API
#[derive(Debug, Clone, Default)]
pub struct JobBuilder {
id: Option<String>,
caller_id: Option<String>,
context_id: Option<String>,
payload: Option<String>,
job_type: Option<JobType>,
runner_name: Option<String>,
timeout: Option<u64>,
env_vars: HashMap<String, String>,
}
impl JobBuilder {
/// Create a new job builder
pub fn new() -> Self {
Self::default()
}
/// Set the caller ID for this job
pub fn caller_id(mut self, caller_id: impl Into<String>) -> Self {
self.caller_id = Some(caller_id.into());
self
}
/// Set the context ID for this job
pub fn context_id(mut self, context_id: impl Into<String>) -> Self {
self.context_id = Some(context_id.into());
self
}
/// Set the payload (script content) for this job
pub fn payload(mut self, payload: impl Into<String>) -> Self {
self.payload = Some(payload.into());
self
}
/// Set the job type
pub fn job_type(mut self, job_type: JobType) -> Self {
self.job_type = Some(job_type);
self
}
/// Set the runner name for this job
pub fn runner_name(mut self, runner_name: impl Into<String>) -> Self {
self.runner_name = Some(runner_name.into());
self
}
/// Set the timeout for job execution
pub fn timeout(mut self, timeout_secs: u64) -> Self {
self.timeout = Some(timeout_secs);
self
}
/// Set a single environment variable
pub fn env_var(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.env_vars.insert(key.into(), value.into());
self
}
/// Set multiple environment variables from a HashMap
pub fn env_vars(mut self, env_vars: HashMap<String, String>) -> Self {
self.env_vars = env_vars;
self
}
/// Build the job
pub fn build(self) -> WasmClientResult<Job> {
Ok(Job {
id: self.id.unwrap_or_else(|| Uuid::new_v4().to_string()),
caller_id: self.caller_id.ok_or_else(|| WasmClientError::Server {
message: "caller_id is required".to_string(),
})?,
context_id: self.context_id.ok_or_else(|| WasmClientError::Server {
message: "context_id is required".to_string(),
})?,
payload: self.payload.ok_or_else(|| WasmClientError::Server {
message: "payload is required".to_string(),
})?,
job_type: self.job_type.ok_or_else(|| WasmClientError::Server {
message: "job_type is required".to_string(),
})?,
runner_name: self.runner_name.ok_or_else(|| WasmClientError::Server {
message: "runner_name is required".to_string(),
})?,
timeout: self.timeout,
env_vars: self.env_vars,
})
}
}

1129
clients/admin-ui/styles.css Normal file

File diff suppressed because it is too large Load Diff

2
clients/openrpc/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
pkg
target

View File

@@ -0,0 +1,59 @@
[package]
name = "hero-supervisor-openrpc-client-wasm"
version = "0.1.0"
edition = "2021"
description = "WASM-compatible OpenRPC client for Hero Supervisor"
license = "MIT OR Apache-2.0"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
# WASM bindings
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"
js-sys = "0.3"
# Web APIs
web-sys = { version = "0.3", features = [
"console",
"Request",
"RequestInit",
"RequestMode",
"Response",
"Window",
"Headers",
"AbortController",
"AbortSignal",
] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde-wasm-bindgen = "0.6"
# Error handling
thiserror = "1.0"
# UUID for job IDs
uuid = { version = "1.0", features = ["v4", "serde", "js"] }
# Time handling
chrono = { version = "0.4", features = ["serde", "wasmbind"] }
# Collections
indexmap = "2.0"
# Logging for WASM
log = "0.4"
console_log = "1.0"
# Async utilities
futures = "0.3"
[dependencies.getrandom]
version = "0.2"
features = ["js"]
[dev-dependencies]
wasm-bindgen-test = "0.3"

2710
clients/openrpc/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,82 @@
[package]
name = "hero-supervisor-openrpc-client"
version = "0.1.0"
edition = "2021"
description = "OpenRPC client for Hero Supervisor"
license = "MIT OR Apache-2.0"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
# Common dependencies for both native and WASM
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror = "1.0"
log = "0.4"
uuid = { version = "1.0", features = ["v4", "serde"] }
# Native JSON-RPC client (not WASM compatible)
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
jsonrpsee = { version = "0.24", features = ["http-client", "macros"] }
tokio = { version = "1.0", features = ["full"] }
hero-supervisor = { path = "../.." }
env_logger = "0.11"
# WASM-specific dependencies
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"
js-sys = "0.3"
web-sys = { version = "0.3", features = [
"console",
"Request",
"RequestInit",
"RequestMode",
"Response",
"Headers",
"Window",
] }
console_log = "1.0"
getrandom = { version = "0.2", features = ["js"] }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
wasm-bindgen-test = "0.3"
# UUID for job IDs (native)
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.uuid]
version = "1.0"
features = ["v4", "serde"]
# Time handling (native)
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.chrono]
version = "0.4"
features = ["serde"]
# WASM-compatible dependencies (already defined above)
[target.'cfg(target_arch = "wasm32")'.dependencies.chrono]
version = "0.4"
features = ["serde", "wasmbind"]
[target.'cfg(target_arch = "wasm32")'.dependencies.uuid]
version = "1.0"
features = ["v4", "serde", "js"]
# Collections
indexmap = "2.0"
# Interactive CLI
crossterm = "0.27"
ratatui = "0.28"
# Command line parsing
clap = { version = "4.0", features = ["derive"] }
[[bin]]
name = "openrpc-cli"
path = "cmd/main.rs"
[dev-dependencies]
# Testing utilities
tokio-test = "0.4"

196
clients/openrpc/README.md Normal file
View File

@@ -0,0 +1,196 @@
# Hero Supervisor OpenRPC Client
A Rust client library for interacting with the Hero Supervisor OpenRPC server. This crate provides a simple, async interface for managing actors and jobs remotely.
## Features
- **Async API**: Built on `tokio` and `jsonrpsee` for high-performance async operations
- **Type Safety**: Full Rust type safety with serde serialization/deserialization
- **Job Builder**: Fluent API for creating jobs with validation
- **Comprehensive Coverage**: All supervisor operations available via client
- **Error Handling**: Detailed error types with proper error propagation
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
hero-supervisor-openrpc-client = "0.1.0"
tokio = { version = "1.0", features = ["full"] }
```
## Quick Start
```rust
use hero_supervisor_openrpc_client::{
SupervisorClient, RunnerConfig, RunnerType, ProcessManagerType, JobBuilder, JobType
};
use std::path::PathBuf;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client
let client = SupervisorClient::new("http://127.0.0.1:3030")?;
// Add a runner
let config = RunnerConfig {
actor_id: "my_actor".to_string(),
runner_type: RunnerType::OSISRunner,
binary_path: PathBuf::from("/path/to/actor/binary"),
db_path: "/path/to/db".to_string(),
redis_url: "redis://localhost:6379".to_string(),
};
client.add_runner(config, ProcessManagerType::Simple).await?;
// Start the runner
client.start_runner("my_actor").await?;
// Create and queue a job
let job = JobBuilder::new()
.caller_id("my_client")
.context_id("example_context")
.payload("print('Hello from Hero Supervisor!');")
.job_type(JobType::OSIS)
.runner_name("my_actor")
.timeout(Duration::from_secs(60))
.build()?;
client.queue_job_to_runner("my_actor", job).await?;
// Check runner status
let status = client.get_runner_status("my_actor").await?;
println!("Runner status: {:?}", status);
// List all runners
let runners = client.list_runners().await?;
println!("Active runners: {:?}", runners);
Ok(())
}
```
## API Reference
### Client Creation
```rust
let client = SupervisorClient::new("http://127.0.0.1:3030")?;
```
### Runner Management
```rust
// Add a runner
client.add_runner(config, ProcessManagerType::Simple).await?;
// Remove a runner
client.remove_runner("actor_id").await?;
// List all runners
let runners = client.list_runners().await?;
// Start/stop runners
client.start_runner("actor_id").await?;
client.stop_runner("actor_id", false).await?; // force = false
// Get runner status
let status = client.get_runner_status("actor_id").await?;
// Get runner logs
let logs = client.get_runner_logs("actor_id", Some(100), false).await?;
```
### Job Management
```rust
// Create a job using the builder
let job = JobBuilder::new()
.caller_id("client_id")
.context_id("context_id")
.payload("script_content")
.job_type(JobType::OSIS)
.runner_name("target_actor")
.timeout(Duration::from_secs(300))
.env_var("KEY", "value")
.build()?;
// Queue the job
client.queue_job_to_runner("actor_id", job).await?;
```
### Bulk Operations
```rust
// Start all runners
let results = client.start_all().await?;
// Stop all runners
let results = client.stop_all(false).await?; // force = false
// Get status of all runners
let statuses = client.get_all_runner_status().await?;
```
## Types
### RunnerType
- `SALRunner` - System abstraction layer operations
- `OSISRunner` - Operating system interface operations
- `VRunner` - Virtualization operations
- `PyRunner` - Python-based actors
### JobType
- `SAL` - SAL job type
- `OSIS` - OSIS job type
- `V` - V job type
- `Python` - Python job type
### ProcessManagerType
- `Simple` - Direct process spawning
- `Tmux(String)` - Tmux session-based management
### ProcessStatus
- `Running` - Process is active
- `Stopped` - Process is stopped
- `Failed` - Process failed
- `Unknown` - Status unknown
## Error Handling
The client uses the `ClientError` enum for error handling:
```rust
use hero_supervisor_openrpc_client::ClientError;
match client.start_runner("actor_id").await {
Ok(()) => println!("Runner started successfully"),
Err(ClientError::JsonRpc(e)) => println!("JSON-RPC error: {}", e),
Err(ClientError::Server { message }) => println!("Server error: {}", message),
Err(e) => println!("Other error: {}", e),
}
```
## Examples
See the `examples/` directory for complete usage examples:
- `basic_client.rs` - Basic client usage
- `job_management.rs` - Job creation and management
- `runner_lifecycle.rs` - Complete runner lifecycle management
## Requirements
- Rust 1.70+
- Hero Supervisor server running with OpenRPC feature enabled
- Network access to the supervisor server
## License
Licensed under either of Apache License, Version 2.0 or MIT license at your option.

29
clients/openrpc/build-wasm.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Build script for WASM-compatible OpenRPC client
set -e
echo "Building WASM OpenRPC client..."
# Check if wasm-pack is installed
if ! command -v wasm-pack &> /dev/null; then
echo "wasm-pack is not installed. Installing..."
curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
fi
# Build the WASM package
echo "Building WASM package..."
wasm-pack build --target web --out-dir pkg-wasm
echo "WASM build complete! Package available in pkg-wasm/"
echo ""
echo "To use in a web project:"
echo "1. Copy the pkg-wasm directory to your web project"
echo "2. Import the module in your JavaScript:"
echo " import init, { WasmSupervisorClient, create_client, create_job } from './pkg-wasm/hero_supervisor_openrpc_client_wasm.js';"
echo "3. Initialize the WASM module:"
echo " await init();"
echo "4. Create and use the client:"
echo " const client = create_client('http://localhost:3030');"
echo " const runners = await client.list_runners();"

872
clients/openrpc/cmd/main.rs Normal file
View File

@@ -0,0 +1,872 @@
//! Interactive CLI for Hero Supervisor OpenRPC Client
//!
//! This CLI provides an interactive interface to explore and test OpenRPC methods
//! with arrow key navigation, parameter input, and response display.
use clap::Parser;
use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyEventKind},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::CrosstermBackend,
layout::{Alignment, Constraint, Direction, Layout, Margin, Rect},
style::{Color, Modifier, Style},
text::{Line, Span, Text},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame, Terminal,
};
use serde_json::json;
use std::io;
use chrono;
use hero_supervisor_openrpc_client::{SupervisorClient, RunnerConfig, RunnerType, ProcessManagerType};
use std::path::PathBuf;
#[derive(Parser)]
#[command(name = "openrpc-cli")]
#[command(about = "Interactive CLI for Hero Supervisor OpenRPC")]
struct Cli {
/// OpenRPC server URL
#[arg(short, long, default_value = "http://127.0.0.1:3030")]
url: String,
}
#[derive(Debug, Clone)]
struct RpcMethod {
name: String,
description: String,
params: Vec<RpcParam>,
}
#[derive(Debug, Clone)]
struct RpcParam {
name: String,
param_type: String,
required: bool,
description: String,
}
struct App {
client: SupervisorClient,
methods: Vec<RpcMethod>,
list_state: ListState,
current_screen: Screen,
selected_method: Option<RpcMethod>,
param_inputs: Vec<String>,
current_param_index: usize,
response: Option<String>,
error_message: Option<String>,
}
#[derive(Debug, PartialEq)]
enum Screen {
MethodList,
ParamInput,
Response,
}
impl App {
async fn new(url: String) -> Result<Self, Box<dyn std::error::Error>> {
let client = SupervisorClient::new(&url)?;
// Test connection to OpenRPC server using the standard rpc.discover method
// This is the proper OpenRPC way to test server connectivity and discover available methods
let discovery_result = client.discover().await;
match discovery_result {
Ok(discovery_info) => {
println!("✓ Connected to OpenRPC server at {}", url);
if let Some(info) = discovery_info.get("info") {
if let Some(title) = info.get("title").and_then(|t| t.as_str()) {
println!(" Server: {}", title);
}
if let Some(version) = info.get("version").and_then(|v| v.as_str()) {
println!(" Version: {}", version);
}
}
}
Err(e) => {
return Err(format!("Failed to connect to OpenRPC server at {}: {}\nMake sure the supervisor is running with OpenRPC enabled.", url, e).into());
}
}
let methods = vec![
RpcMethod {
name: "list_runners".to_string(),
description: "List all registered runners".to_string(),
params: vec![],
},
RpcMethod {
name: "register_runner".to_string(),
description: "Register a new runner to the supervisor with secret authentication".to_string(),
params: vec![
RpcParam {
name: "secret".to_string(),
param_type: "String".to_string(),
required: true,
description: "Secret required for runner registration".to_string(),
},
RpcParam {
name: "name".to_string(),
param_type: "String".to_string(),
required: true,
description: "Name of the runner".to_string(),
},
RpcParam {
name: "queue".to_string(),
param_type: "String".to_string(),
required: true,
description: "Queue name for the runner to listen to".to_string(),
},
],
},
RpcMethod {
name: "run_job".to_string(),
description: "Run a job on the appropriate runner".to_string(),
params: vec![
RpcParam {
name: "secret".to_string(),
param_type: "String".to_string(),
required: true,
description: "Secret required for job execution".to_string(),
},
RpcParam {
name: "job_id".to_string(),
param_type: "String".to_string(),
required: true,
description: "Job ID".to_string(),
},
RpcParam {
name: "runner_name".to_string(),
param_type: "String".to_string(),
required: true,
description: "Name of the runner to execute the job".to_string(),
},
RpcParam {
name: "payload".to_string(),
param_type: "String".to_string(),
required: true,
description: "Job payload/script content".to_string(),
},
],
},
RpcMethod {
name: "remove_runner".to_string(),
description: "Remove a runner from the supervisor".to_string(),
params: vec![
RpcParam {
name: "actor_id".to_string(),
param_type: "String".to_string(),
required: true,
description: "ID of the runner to remove".to_string(),
},
],
},
RpcMethod {
name: "start_runner".to_string(),
description: "Start a specific runner".to_string(),
params: vec![
RpcParam {
name: "actor_id".to_string(),
param_type: "String".to_string(),
required: true,
description: "ID of the runner to start".to_string(),
},
],
},
RpcMethod {
name: "stop_runner".to_string(),
description: "Stop a specific runner".to_string(),
params: vec![
RpcParam {
name: "actor_id".to_string(),
param_type: "String".to_string(),
required: true,
description: "ID of the runner to stop".to_string(),
},
RpcParam {
name: "force".to_string(),
param_type: "bool".to_string(),
required: true,
description: "Whether to force stop the runner".to_string(),
},
],
},
RpcMethod {
name: "get_runner_status".to_string(),
description: "Get the status of a specific runner".to_string(),
params: vec![
RpcParam {
name: "actor_id".to_string(),
param_type: "String".to_string(),
required: true,
description: "ID of the runner".to_string(),
},
],
},
RpcMethod {
name: "get_all_runner_status".to_string(),
description: "Get status of all runners".to_string(),
params: vec![],
},
RpcMethod {
name: "start_all".to_string(),
description: "Start all runners".to_string(),
params: vec![],
},
RpcMethod {
name: "stop_all".to_string(),
description: "Stop all runners".to_string(),
params: vec![
RpcParam {
name: "force".to_string(),
param_type: "bool".to_string(),
required: true,
description: "Whether to force stop all runners".to_string(),
},
],
},
RpcMethod {
name: "get_all_status".to_string(),
description: "Get status of all components".to_string(),
params: vec![],
},
];
let mut list_state = ListState::default();
list_state.select(Some(0));
Ok(App {
client,
methods,
list_state,
current_screen: Screen::MethodList,
selected_method: None,
param_inputs: vec![],
current_param_index: 0,
response: None,
error_message: None,
})
}
fn next_method(&mut self) {
let i = match self.list_state.selected() {
Some(i) => {
if i >= self.methods.len() - 1 {
0
} else {
i + 1
}
}
None => 0,
};
self.list_state.select(Some(i));
}
fn previous_method(&mut self) {
let i = match self.list_state.selected() {
Some(i) => {
if i == 0 {
self.methods.len() - 1
} else {
i - 1
}
}
None => 0,
};
self.list_state.select(Some(i));
}
fn select_method(&mut self) {
if let Some(i) = self.list_state.selected() {
let method = self.methods[i].clone();
if method.params.is_empty() {
// No parameters needed, call directly
self.selected_method = Some(method);
self.current_screen = Screen::Response;
} else {
// Parameters needed, go to input screen
self.selected_method = Some(method.clone());
self.param_inputs = vec!["".to_string(); method.params.len()];
self.current_param_index = 0;
self.current_screen = Screen::ParamInput;
}
}
}
fn next_param(&mut self) {
if let Some(method) = &self.selected_method {
if self.current_param_index < method.params.len() - 1 {
self.current_param_index += 1;
}
}
}
fn previous_param(&mut self) {
if self.current_param_index > 0 {
self.current_param_index -= 1;
}
}
fn add_char_to_current_param(&mut self, c: char) {
if self.current_param_index < self.param_inputs.len() {
self.param_inputs[self.current_param_index].push(c);
}
}
fn remove_char_from_current_param(&mut self) {
if self.current_param_index < self.param_inputs.len() {
self.param_inputs[self.current_param_index].pop();
}
}
async fn execute_method(&mut self) {
if let Some(method) = &self.selected_method {
self.error_message = None;
self.response = None;
// Build parameters
let mut params = json!({});
if !method.params.is_empty() {
for (i, param) in method.params.iter().enumerate() {
let input = &self.param_inputs[i];
if input.is_empty() && param.required {
self.error_message = Some(format!("Required parameter '{}' is empty", param.name));
return;
}
if !input.is_empty() {
let value = match param.param_type.as_str() {
"bool" => {
match input.to_lowercase().as_str() {
"true" | "1" | "yes" => json!(true),
"false" | "0" | "no" => json!(false),
_ => {
self.error_message = Some(format!("Invalid boolean value for '{}': {}", param.name, input));
return;
}
}
}
"i32" | "i64" | "u32" | "u64" => {
match input.parse::<i64>() {
Ok(n) => json!(n),
Err(_) => {
self.error_message = Some(format!("Invalid number for '{}': {}", param.name, input));
return;
}
}
}
_ => json!(input),
};
if method.name == "register_runner" {
// Special handling for register_runner method
match param.name.as_str() {
"secret" => params["secret"] = value,
"name" => params["name"] = value,
"queue" => params["queue"] = value,
_ => {}
}
} else if method.name == "run_job" {
// Special handling for run_job method
match param.name.as_str() {
"secret" => params["secret"] = value,
"job_id" => params["job_id"] = value,
"runner_name" => params["runner_name"] = value,
"payload" => params["payload"] = value,
_ => {}
}
} else {
params[&param.name] = value;
}
}
}
}
// Execute the method
let result: Result<serde_json::Value, hero_supervisor_openrpc_client::ClientError> = match method.name.as_str() {
"list_runners" => {
match self.client.list_runners().await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
}
"get_all_runner_status" => {
match self.client.get_all_runner_status().await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
}
"start_all" => {
match self.client.start_all().await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
}
"get_all_status" => {
match self.client.get_all_status().await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
}
"stop_all" => {
let force = params.get("force").and_then(|v| v.as_bool()).unwrap_or(false);
match self.client.stop_all(force).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
}
"start_runner" => {
if let Some(actor_id) = params.get("actor_id").and_then(|v| v.as_str()) {
match self.client.start_runner(actor_id).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing actor_id parameter"))
))
}
}
"stop_runner" => {
if let (Some(actor_id), Some(force)) = (
params.get("actor_id").and_then(|v| v.as_str()),
params.get("force").and_then(|v| v.as_bool())
) {
match self.client.stop_runner(actor_id, force).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing parameters"))
))
}
}
"remove_runner" => {
if let Some(actor_id) = params.get("actor_id").and_then(|v| v.as_str()) {
match self.client.remove_runner(actor_id).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing actor_id parameter"))
))
}
}
"get_runner_status" => {
if let Some(actor_id) = params.get("actor_id").and_then(|v| v.as_str()) {
match self.client.get_runner_status(actor_id).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing actor_id parameter"))
))
}
}
"register_runner" => {
if let (Some(secret), Some(name), Some(queue)) = (
params.get("secret").and_then(|v| v.as_str()),
params.get("name").and_then(|v| v.as_str()),
params.get("queue").and_then(|v| v.as_str())
) {
match self.client.register_runner(secret, name, queue).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing required parameters: secret, name, queue"))
))
}
}
"run_job" => {
if let (Some(secret), Some(job_id), Some(runner_name), Some(payload)) = (
params.get("secret").and_then(|v| v.as_str()),
params.get("job_id").and_then(|v| v.as_str()),
params.get("runner_name").and_then(|v| v.as_str()),
params.get("payload").and_then(|v| v.as_str())
) {
// Create a job object
let job = serde_json::json!({
"id": job_id,
"caller_id": "cli_user",
"context_id": "cli_context",
"payload": payload,
"job_type": "SAL",
"runner_name": runner_name,
"timeout": 30000000000u64, // 30 seconds in nanoseconds
"env_vars": {},
"created_at": chrono::Utc::now().to_rfc3339(),
"updated_at": chrono::Utc::now().to_rfc3339()
});
match self.client.run_job(secret, job).await {
Ok(response) => {
match serde_json::to_value(response) {
Ok(value) => Ok(value),
Err(e) => Err(hero_supervisor_openrpc_client::ClientError::from(e)),
}
},
Err(e) => Err(e),
}
} else {
Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Missing required parameters: secret, job_id, runner_name, payload"))
))
}
}
_ => Err(hero_supervisor_openrpc_client::ClientError::from(
serde_json::Error::io(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Method not implemented in CLI"))
)),
};
match result {
Ok(response) => {
self.response = Some(format!("{:#}", response));
}
Err(e) => {
self.error_message = Some(format!("Error: {}", e));
}
}
self.current_screen = Screen::Response;
}
}
fn back_to_methods(&mut self) {
self.current_screen = Screen::MethodList;
self.selected_method = None;
self.param_inputs.clear();
self.current_param_index = 0;
self.response = None;
self.error_message = None;
}
}
fn ui(f: &mut Frame, app: &mut App) {
match app.current_screen {
Screen::MethodList => draw_method_list(f, app),
Screen::ParamInput => draw_param_input(f, app),
Screen::Response => draw_response(f, app),
}
}
fn draw_method_list(f: &mut Frame, app: &mut App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(1)
.constraints([Constraint::Min(0)].as_ref())
.split(f.area());
let items: Vec<ListItem> = app
.methods
.iter()
.map(|method| {
let content = vec![Line::from(vec![
Span::styled(&method.name, Style::default().fg(Color::Yellow)),
Span::raw(" - "),
Span::raw(&method.description),
])];
ListItem::new(content)
})
.collect();
let items = List::new(items)
.block(
Block::default()
.borders(Borders::ALL)
.title("OpenRPC Methods (↑↓ to navigate, Enter to select, q to quit)"),
)
.highlight_style(
Style::default()
.bg(Color::LightGreen)
.fg(Color::Black)
.add_modifier(Modifier::BOLD),
)
.highlight_symbol(">> ");
f.render_stateful_widget(items, chunks[0], &mut app.list_state);
}
fn draw_param_input(f: &mut Frame, app: &mut App) {
if let Some(method) = &app.selected_method {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(1)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let title = Paragraph::new(format!("Parameters for: {}", method.name))
.block(Block::default().borders(Borders::ALL).title("Method"));
f.render_widget(title, chunks[0]);
// Parameters - create proper form layout with separate label and input areas
let param_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Length(5); method.params.len()])
.split(chunks[1]);
for (i, param) in method.params.iter().enumerate() {
let is_current = i == app.current_param_index;
// Split each parameter into label and input areas
let param_layout = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(2), Constraint::Length(3)])
.split(param_chunks[i]);
// Parameter label and description
let label_style = if is_current {
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::White)
};
let label_text = vec![
Line::from(vec![
Span::styled(&param.name, label_style),
Span::raw(if param.required { " (required)" } else { " (optional)" }),
Span::raw(format!(" [{}]", param.param_type)),
]),
Line::from(Span::styled(&param.description, Style::default().fg(Color::Gray))),
];
let label_widget = Paragraph::new(label_text)
.block(Block::default().borders(Borders::NONE));
f.render_widget(label_widget, param_layout[0]);
// Input field
let empty_string = String::new();
let input_value = app.param_inputs.get(i).unwrap_or(&empty_string);
let input_display = if is_current {
if input_value.is_empty() {
"".to_string() // Show cursor when active and empty
} else {
format!("{}", input_value) // Show cursor at end when active
}
} else {
if input_value.is_empty() {
" ".to_string() // Empty space for inactive empty fields
} else {
input_value.clone()
}
};
let input_style = if is_current {
Style::default().fg(Color::Black).bg(Color::Cyan)
} else {
Style::default().fg(Color::White).bg(Color::DarkGray)
};
let border_style = if is_current {
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Gray)
};
let input_widget = Paragraph::new(Line::from(Span::styled(input_display, input_style)))
.block(
Block::default()
.borders(Borders::ALL)
.border_style(border_style)
.title(if is_current { " INPUT " } else { "" }),
);
f.render_widget(input_widget, param_layout[1]);
}
// Instructions
let instructions = Paragraph::new("↑↓ to navigate params, type to edit, Enter to execute, Esc to go back")
.block(Block::default().borders(Borders::ALL).title("Instructions"));
f.render_widget(instructions, chunks[2]);
}
}
fn draw_response(f: &mut Frame, app: &mut App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(1)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let method_name = app.selected_method.as_ref().map(|m| m.name.as_str()).unwrap_or("Unknown");
let title = Paragraph::new(format!("Response for: {}", method_name))
.block(Block::default().borders(Borders::ALL).title("Response"));
f.render_widget(title, chunks[0]);
// Response content
let content = if let Some(error) = &app.error_message {
Text::from(error.clone()).style(Style::default().fg(Color::Red))
} else if let Some(response) = &app.response {
Text::from(response.clone()).style(Style::default().fg(Color::Green))
} else {
Text::from("Executing...").style(Style::default().fg(Color::Yellow))
};
let response_widget = Paragraph::new(content)
.block(Block::default().borders(Borders::ALL))
.wrap(Wrap { trim: true });
f.render_widget(response_widget, chunks[1]);
// Instructions
let instructions = Paragraph::new("Esc to go back to methods")
.block(Block::default().borders(Borders::ALL).title("Instructions"));
f.render_widget(instructions, chunks[2]);
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli = Cli::parse();
// Setup terminal
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
// Create app
let mut app = match App::new(cli.url).await {
Ok(app) => app,
Err(e) => {
// Cleanup terminal before showing error
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
eprintln!("Failed to connect to OpenRPC server: {}", e);
eprintln!("Make sure the supervisor is running with OpenRPC enabled.");
std::process::exit(1);
}
};
// Main loop
loop {
terminal.draw(|f| ui(f, &mut app))?;
if let Event::Key(key) = event::read()? {
if key.kind == KeyEventKind::Press {
match app.current_screen {
Screen::MethodList => {
match key.code {
KeyCode::Char('q') => break,
KeyCode::Down => app.next_method(),
KeyCode::Up => app.previous_method(),
KeyCode::Enter => {
app.select_method();
// If the selected method has no parameters, execute it immediately
if let Some(method) = &app.selected_method {
if method.params.is_empty() {
app.execute_method().await;
}
}
},
_ => {}
}
}
Screen::ParamInput => {
match key.code {
KeyCode::Esc => app.back_to_methods(),
KeyCode::Up => app.previous_param(),
KeyCode::Down => app.next_param(),
KeyCode::Enter => {
app.execute_method().await;
}
KeyCode::Backspace => app.remove_char_from_current_param(),
KeyCode::Char(c) => app.add_char_to_current_param(c),
_ => {}
}
}
Screen::Response => {
match key.code {
KeyCode::Esc => app.back_to_methods(),
_ => {}
}
}
}
}
}
}
// Restore terminal
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
Ok(())
}

View File

@@ -0,0 +1,202 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hero Supervisor WASM OpenRPC Client Example</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
.container {
background: #f5f5f5;
padding: 20px;
border-radius: 8px;
margin: 10px 0;
}
button {
background: #007cba;
color: white;
border: none;
padding: 10px 20px;
border-radius: 4px;
cursor: pointer;
margin: 5px;
}
button:hover {
background: #005a87;
}
button:disabled {
background: #ccc;
cursor: not-allowed;
}
input, textarea {
width: 100%;
padding: 8px;
margin: 5px 0;
border: 1px solid #ddd;
border-radius: 4px;
}
.output {
background: #fff;
border: 1px solid #ddd;
padding: 10px;
margin: 10px 0;
border-radius: 4px;
white-space: pre-wrap;
font-family: monospace;
max-height: 200px;
overflow-y: auto;
}
.error {
color: #d32f2f;
}
.success {
color: #2e7d32;
}
</style>
</head>
<body>
<h1>Hero Supervisor WASM OpenRPC Client</h1>
<div class="container">
<h2>Connection</h2>
<input type="text" id="serverUrl" placeholder="Server URL" value="http://localhost:3030">
<button onclick="testConnection()">Test Connection</button>
<div id="connectionOutput" class="output"></div>
</div>
<div class="container">
<h2>Runner Management</h2>
<button onclick="listRunners()">List Runners</button>
<div id="runnersOutput" class="output"></div>
<h3>Register Runner</h3>
<input type="text" id="registerSecret" placeholder="Secret" value="admin123">
<input type="text" id="runnerName" placeholder="Runner Name" value="wasm_runner">
<input type="text" id="runnerQueue" placeholder="Queue Name" value="wasm_queue">
<button onclick="registerRunner()">Register Runner</button>
<div id="registerOutput" class="output"></div>
</div>
<div class="container">
<h2>Job Execution</h2>
<input type="text" id="jobSecret" placeholder="Secret" value="admin123">
<input type="text" id="jobId" placeholder="Job ID" value="">
<input type="text" id="jobRunnerName" placeholder="Runner Name" value="wasm_runner">
<textarea id="jobPayload" placeholder="Job Payload" rows="3">echo "Hello from WASM client!"</textarea>
<button onclick="runJob()">Run Job</button>
<div id="jobOutput" class="output"></div>
</div>
<script type="module">
import init, {
WasmSupervisorClient,
WasmJob,
create_client,
create_job
} from './pkg-wasm/hero_supervisor_openrpc_client_wasm.js';
let client = null;
// Initialize WASM module
async function initWasm() {
try {
await init();
console.log('WASM module initialized');
document.getElementById('connectionOutput').textContent = 'WASM module loaded successfully';
document.getElementById('connectionOutput').className = 'output success';
} catch (error) {
console.error('Failed to initialize WASM:', error);
document.getElementById('connectionOutput').textContent = `Failed to initialize WASM: ${error}`;
document.getElementById('connectionOutput').className = 'output error';
}
}
// Test connection to supervisor
window.testConnection = async function() {
try {
const serverUrl = document.getElementById('serverUrl').value;
client = create_client(serverUrl);
const result = await client.discover();
document.getElementById('connectionOutput').textContent = `Connection successful!\n${JSON.stringify(result, null, 2)}`;
document.getElementById('connectionOutput').className = 'output success';
} catch (error) {
document.getElementById('connectionOutput').textContent = `Connection failed: ${error}`;
document.getElementById('connectionOutput').className = 'output error';
}
};
// List all runners
window.listRunners = async function() {
try {
if (!client) {
throw new Error('Client not initialized. Test connection first.');
}
const runners = await client.list_runners();
document.getElementById('runnersOutput').textContent = `Runners:\n${JSON.stringify(runners, null, 2)}`;
document.getElementById('runnersOutput').className = 'output success';
} catch (error) {
document.getElementById('runnersOutput').textContent = `Failed to list runners: ${error}`;
document.getElementById('runnersOutput').className = 'output error';
}
};
// Register a new runner
window.registerRunner = async function() {
try {
if (!client) {
throw new Error('Client not initialized. Test connection first.');
}
const secret = document.getElementById('registerSecret').value;
const name = document.getElementById('runnerName').value;
const queue = document.getElementById('runnerQueue').value;
await client.register_runner(secret, name, queue);
document.getElementById('registerOutput').textContent = `Runner '${name}' registered successfully!`;
document.getElementById('registerOutput').className = 'output success';
} catch (error) {
document.getElementById('registerOutput').textContent = `Failed to register runner: ${error}`;
document.getElementById('registerOutput').className = 'output error';
}
};
// Run a job
window.runJob = async function() {
try {
if (!client) {
throw new Error('Client not initialized. Test connection first.');
}
const secret = document.getElementById('jobSecret').value;
let jobId = document.getElementById('jobId').value;
const runnerName = document.getElementById('jobRunnerName').value;
const payload = document.getElementById('jobPayload').value;
// Generate job ID if not provided
if (!jobId) {
jobId = 'job_' + Math.random().toString(36).substr(2, 9);
document.getElementById('jobId').value = jobId;
}
const job = create_job(jobId, payload, "SAL", runnerName);
const result = await client.run_job(secret, job);
document.getElementById('jobOutput').textContent = `Job executed successfully!\nJob ID: ${jobId}\nResult: ${result}`;
document.getElementById('jobOutput').className = 'output success';
} catch (error) {
document.getElementById('jobOutput').textContent = `Failed to run job: ${error}`;
document.getElementById('jobOutput').className = 'output error';
}
};
// Initialize on page load
initWasm();
</script>
</body>
</html>

1037
clients/openrpc/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

668
clients/openrpc/src/wasm.rs Normal file
View File

@@ -0,0 +1,668 @@
//! WASM-compatible OpenRPC client for Hero Supervisor
//!
//! This module provides a WASM-compatible client library for interacting with the Hero Supervisor
//! OpenRPC server using browser-native fetch APIs.
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::JsFuture;
use web_sys::{Request, RequestInit, RequestMode, Response, Headers};
use serde::{Deserialize, Serialize};
// use std::collections::HashMap; // Unused
use thiserror::Error;
use uuid::Uuid;
// use js_sys::Promise; // Unused
/// WASM-compatible client for communicating with Hero Supervisor OpenRPC server
#[wasm_bindgen]
pub struct WasmSupervisorClient {
server_url: String,
}
/// Error types for WASM client operations
#[derive(Error, Debug)]
pub enum WasmClientError {
#[error("Network error: {0}")]
Network(String),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("JavaScript error: {0}")]
JavaScript(String),
#[error("Server error: {message}")]
Server { message: String },
#[error("Invalid response format")]
InvalidResponse,
}
/// Result type for WASM client operations
pub type WasmClientResult<T> = Result<T, WasmClientError>;
/// JSON-RPC request structure
#[derive(Serialize)]
struct JsonRpcRequest {
jsonrpc: String,
method: String,
params: serde_json::Value,
id: u32,
}
/// JSON-RPC response structure
#[derive(Deserialize)]
struct JsonRpcResponse {
jsonrpc: String,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<JsonRpcError>,
id: u32,
}
/// JSON-RPC error structure
#[derive(Deserialize)]
struct JsonRpcError {
code: i32,
message: String,
#[serde(skip_serializing_if = "Option::is_none")]
data: Option<serde_json::Value>,
}
/// Types of runners supported by the supervisor
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[wasm_bindgen]
pub enum WasmRunnerType {
SALRunner,
OSISRunner,
VRunner,
}
/// Job type enumeration that maps to runner types
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[wasm_bindgen]
pub enum WasmJobType {
SAL,
OSIS,
V,
}
/// Job structure for creating and managing jobs
#[derive(Debug, Clone, Serialize, Deserialize)]
#[wasm_bindgen]
pub struct WasmJob {
id: String,
caller_id: String,
context_id: String,
payload: String,
runner_name: String,
executor: String,
timeout_secs: u64,
env_vars: String, // JSON string of HashMap<String, String>
created_at: String,
updated_at: String,
}
#[wasm_bindgen]
impl WasmSupervisorClient {
/// Create a new WASM supervisor client
#[wasm_bindgen(constructor)]
pub fn new(server_url: String) -> Self {
console_log::init_with_level(log::Level::Info).ok();
Self { server_url }
}
/// Get the server URL
#[wasm_bindgen(getter)]
pub fn server_url(&self) -> String {
self.server_url.clone()
}
/// Test connection using OpenRPC discovery method
pub async fn discover(&self) -> Result<JsValue, JsValue> {
let result = self.call_method("rpc.discover", serde_json::Value::Null).await;
match result {
Ok(value) => Ok(wasm_bindgen::JsValue::from_str(&value.to_string())),
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Register a new runner to the supervisor with secret authentication
pub async fn register_runner(&self, secret: &str, name: &str, queue: &str) -> Result<String, JsValue> {
let params = serde_json::json!([{
"secret": secret,
"name": name,
"queue": queue
}]);
match self.call_method("register_runner", params).await {
Ok(result) => {
// Extract the runner name from the result
if let Some(runner_name) = result.as_str() {
Ok(runner_name.to_string())
} else {
Err(JsValue::from_str("Invalid response format: expected runner name"))
}
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Create a job (fire-and-forget, non-blocking)
#[wasm_bindgen]
pub async fn create_job(&self, secret: String, job: WasmJob) -> Result<String, JsValue> {
// Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner
let params = serde_json::json!([{
"secret": secret,
"job": {
"id": job.id,
"caller_id": job.caller_id,
"context_id": job.context_id,
"payload": job.payload,
"runner_name": job.runner_name,
"executor": job.executor,
"timeout": {
"secs": job.timeout_secs,
"nanos": 0
},
"env_vars": serde_json::from_str::<serde_json::Value>(&job.env_vars).unwrap_or(serde_json::json!({})),
"created_at": job.created_at,
"updated_at": job.updated_at
}
}]);
match self.call_method("create_job", params).await {
Ok(result) => {
if let Some(job_id) = result.as_str() {
Ok(job_id.to_string())
} else {
Ok(result.to_string())
}
}
Err(e) => Err(JsValue::from_str(&format!("Failed to create job: {:?}", e)))
}
}
/// Run a job on a specific runner (blocking, returns result)
#[wasm_bindgen]
pub async fn run_job(&self, secret: String, job: WasmJob) -> Result<String, JsValue> {
// Backend expects RunJobParams struct with secret and job fields - wrap in array like register_runner
let params = serde_json::json!([{
"secret": secret,
"job": {
"id": job.id,
"caller_id": job.caller_id,
"context_id": job.context_id,
"payload": job.payload,
"runner_name": job.runner_name,
"executor": job.executor,
"timeout": {
"secs": job.timeout_secs,
"nanos": 0
},
"env_vars": serde_json::from_str::<serde_json::Value>(&job.env_vars).unwrap_or(serde_json::json!({})),
"created_at": job.created_at,
"updated_at": job.updated_at
}
}]);
match self.call_method("run_job", params).await {
Ok(result) => {
if let Some(result_str) = result.as_str() {
Ok(result_str.to_string())
} else {
Ok(result.to_string())
}
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// List all runner IDs
pub async fn list_runners(&self) -> Result<Vec<String>, JsValue> {
match self.call_method("list_runners", serde_json::Value::Null).await {
Ok(result) => {
if let Ok(runners) = serde_json::from_value::<Vec<String>>(result) {
Ok(runners)
} else {
Err(JsValue::from_str("Invalid response format for list_runners"))
}
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// List all job IDs from Redis
pub async fn list_jobs(&self) -> Result<Vec<String>, JsValue> {
match self.call_method("list_jobs", serde_json::Value::Null).await {
Ok(result) => {
if let Ok(jobs) = serde_json::from_value::<Vec<String>>(result) {
Ok(jobs)
} else {
Err(JsValue::from_str("Invalid response format for list_jobs"))
}
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Get a job by job ID
pub async fn get_job(&self, job_id: &str) -> Result<WasmJob, JsValue> {
let params = serde_json::json!([job_id]);
match self.call_method("get_job", params).await {
Ok(result) => {
// Convert the Job result to WasmJob
if let Ok(job_value) = serde_json::from_value::<serde_json::Value>(result) {
// Extract fields from the job
let id = job_value.get("id").and_then(|v| v.as_str()).unwrap_or("").to_string();
let caller_id = job_value.get("caller_id").and_then(|v| v.as_str()).unwrap_or("").to_string();
let context_id = job_value.get("context_id").and_then(|v| v.as_str()).unwrap_or("").to_string();
let payload = job_value.get("payload").and_then(|v| v.as_str()).unwrap_or("").to_string();
let runner_name = job_value.get("runner_name").and_then(|v| v.as_str()).unwrap_or("").to_string();
let executor = job_value.get("executor").and_then(|v| v.as_str()).unwrap_or("").to_string();
let timeout_secs = job_value.get("timeout").and_then(|v| v.get("secs")).and_then(|v| v.as_u64()).unwrap_or(30);
let env_vars = job_value.get("env_vars").map(|v| v.to_string()).unwrap_or_else(|| "{}".to_string());
let created_at = job_value.get("created_at").and_then(|v| v.as_str()).unwrap_or("").to_string();
let updated_at = job_value.get("updated_at").and_then(|v| v.as_str()).unwrap_or("").to_string();
Ok(WasmJob {
id,
caller_id,
context_id,
payload,
runner_name,
executor,
timeout_secs,
env_vars,
created_at,
updated_at,
})
} else {
Err(JsValue::from_str("Invalid response format for get_job"))
}
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Ping a runner by dispatching a ping job to its queue
#[wasm_bindgen]
pub async fn ping_runner(&self, runner_id: &str) -> Result<String, JsValue> {
let params = serde_json::json!([runner_id]);
match self.call_method("ping_runner", params).await {
Ok(result) => {
if let Some(job_id) = result.as_str() {
Ok(job_id.to_string())
} else {
Ok(result.to_string())
}
}
Err(e) => Err(JsValue::from_str(&format!("Failed to ping runner: {:?}", e)))
}
}
/// Stop a job by ID
#[wasm_bindgen]
pub async fn stop_job(&self, job_id: &str) -> Result<(), JsValue> {
let params = serde_json::json!([job_id]);
match self.call_method("stop_job", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&format!("Failed to stop job: {:?}", e)))
}
}
/// Delete a job by ID
#[wasm_bindgen]
pub async fn delete_job(&self, job_id: &str) -> Result<(), JsValue> {
let params = serde_json::json!([job_id]);
match self.call_method("delete_job", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&format!("Failed to delete job: {:?}", e)))
}
}
/// Remove a runner from the supervisor
pub async fn remove_runner(&self, actor_id: &str) -> Result<(), JsValue> {
let params = serde_json::json!([actor_id]);
match self.call_method("remove_runner", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Start a specific runner
pub async fn start_runner(&self, actor_id: &str) -> Result<(), JsValue> {
let params = serde_json::json!([actor_id]);
match self.call_method("start_runner", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Stop a specific runner
pub async fn stop_runner(&self, actor_id: &str, force: bool) -> Result<(), JsValue> {
let params = serde_json::json!([actor_id, force]);
self.call_method("stop_runner", params)
.await
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(())
}
/// Get a specific runner by ID
pub async fn get_runner(&self, actor_id: &str) -> Result<JsValue, JsValue> {
let params = serde_json::json!([actor_id]);
let result = self.call_method("get_runner", params)
.await
.map_err(|e| JsValue::from_str(&e.to_string()))?;
// Convert the serde_json::Value to a JsValue via string serialization
let json_string = serde_json::to_string(&result)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(js_sys::JSON::parse(&json_string)
.map_err(|e| JsValue::from_str("Failed to parse JSON"))?)
}
/// Add a secret to the supervisor
pub async fn add_secret(&self, admin_secret: &str, secret_type: &str, secret_value: &str) -> Result<(), JsValue> {
let params = serde_json::json!([{
"admin_secret": admin_secret,
"secret_type": secret_type,
"secret_value": secret_value
}]);
match self.call_method("add_secret", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Remove a secret from the supervisor
pub async fn remove_secret(&self, admin_secret: &str, secret_type: &str, secret_value: &str) -> Result<(), JsValue> {
let params = serde_json::json!([{
"admin_secret": admin_secret,
"secret_type": secret_type,
"secret_value": secret_value
}]);
match self.call_method("remove_secret", params).await {
Ok(_) => Ok(()),
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// List secrets (returns supervisor info including secret counts)
pub async fn list_secrets(&self, admin_secret: &str) -> Result<JsValue, JsValue> {
let params = serde_json::json!([{
"admin_secret": admin_secret
}]);
match self.call_method("list_secrets", params).await {
Ok(result) => {
// Convert serde_json::Value to JsValue
let result_str = serde_json::to_string(&result)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(js_sys::JSON::parse(&result_str)
.map_err(|e| JsValue::from_str(&format!("JSON parse error: {:?}", e)))?)
},
Err(e) => Err(JsValue::from_str(&e.to_string())),
}
}
/// Get supervisor information including secret counts
pub async fn get_supervisor_info(&self, admin_secret: &str) -> Result<JsValue, JsValue> {
let params = serde_json::json!({
"admin_secret": admin_secret
});
match self.call_method("get_supervisor_info", params).await {
Ok(result) => {
let result_str = serde_json::to_string(&result)
.map_err(|e| JsValue::from_str(&format!("Serialization error: {:?}", e)))?;
Ok(js_sys::JSON::parse(&result_str)
.map_err(|e| JsValue::from_str(&format!("JSON parse error: {:?}", e)))?)
},
Err(e) => Err(JsValue::from_str(&format!("Failed to get supervisor info: {:?}", e))),
}
}
/// List admin secrets (returns actual secret values)
pub async fn list_admin_secrets(&self, admin_secret: &str) -> Result<Vec<String>, JsValue> {
let params = serde_json::json!({
"admin_secret": admin_secret
});
match self.call_method("list_admin_secrets", params).await {
Ok(result) => {
let secrets: Vec<String> = serde_json::from_value(result)
.map_err(|e| JsValue::from_str(&format!("Failed to parse admin secrets: {:?}", e)))?;
Ok(secrets)
},
Err(e) => Err(JsValue::from_str(&format!("Failed to list admin secrets: {:?}", e))),
}
}
/// List user secrets (returns actual secret values)
pub async fn list_user_secrets(&self, admin_secret: &str) -> Result<Vec<String>, JsValue> {
let params = serde_json::json!({
"admin_secret": admin_secret
});
match self.call_method("list_user_secrets", params).await {
Ok(result) => {
let secrets: Vec<String> = serde_json::from_value(result)
.map_err(|e| JsValue::from_str(&format!("Failed to parse user secrets: {:?}", e)))?;
Ok(secrets)
},
Err(e) => Err(JsValue::from_str(&format!("Failed to list user secrets: {:?}", e))),
}
}
/// List register secrets (returns actual secret values)
pub async fn list_register_secrets(&self, admin_secret: &str) -> Result<Vec<String>, JsValue> {
let params = serde_json::json!({
"admin_secret": admin_secret
});
match self.call_method("list_register_secrets", params).await {
Ok(result) => {
let secrets: Vec<String> = serde_json::from_value(result)
.map_err(|e| JsValue::from_str(&format!("Failed to parse register secrets: {:?}", e)))?;
Ok(secrets)
},
Err(e) => Err(JsValue::from_str(&format!("Failed to list register secrets: {:?}", e))),
}
}
}
#[wasm_bindgen]
impl WasmJob {
/// Create a new job with default values
#[wasm_bindgen(constructor)]
pub fn new(id: String, payload: String, executor: String, runner_name: String) -> Self {
let now = js_sys::Date::new_0().to_iso_string().as_string().unwrap();
Self {
id,
caller_id: "wasm_client".to_string(),
context_id: "wasm_context".to_string(),
payload,
runner_name,
executor,
timeout_secs: 30,
env_vars: "{}".to_string(),
created_at: now.clone(),
updated_at: now,
}
}
/// Set the caller ID
#[wasm_bindgen(setter)]
pub fn set_caller_id(&mut self, caller_id: String) {
self.caller_id = caller_id;
}
/// Set the context ID
#[wasm_bindgen(setter)]
pub fn set_context_id(&mut self, context_id: String) {
self.context_id = context_id;
}
/// Set the timeout in seconds
#[wasm_bindgen(setter)]
pub fn set_timeout_secs(&mut self, timeout_secs: u64) {
self.timeout_secs = timeout_secs;
}
/// Set environment variables as JSON string
#[wasm_bindgen(setter)]
pub fn set_env_vars(&mut self, env_vars: String) {
self.env_vars = env_vars;
}
/// Generate a new UUID for the job
#[wasm_bindgen]
pub fn generate_id(&mut self) {
self.id = Uuid::new_v4().to_string();
}
/// Get the job ID
#[wasm_bindgen(getter)]
pub fn id(&self) -> String {
self.id.clone()
}
/// Get the caller ID
#[wasm_bindgen(getter)]
pub fn caller_id(&self) -> String {
self.caller_id.clone()
}
/// Get the context ID
#[wasm_bindgen(getter)]
pub fn context_id(&self) -> String {
self.context_id.clone()
}
/// Get the payload
#[wasm_bindgen(getter)]
pub fn payload(&self) -> String {
self.payload.clone()
}
/// Get the job type
#[wasm_bindgen(getter)]
pub fn executor(&self) -> String {
self.executor.clone()
}
/// Get the runner name
#[wasm_bindgen(getter)]
pub fn runner_name(&self) -> String {
self.runner_name.clone()
}
/// Get the timeout in seconds
#[wasm_bindgen(getter)]
pub fn timeout_secs(&self) -> u64 {
self.timeout_secs
}
/// Get the environment variables as JSON string
#[wasm_bindgen(getter)]
pub fn env_vars(&self) -> String {
self.env_vars.clone()
}
/// Get the created timestamp
#[wasm_bindgen(getter)]
pub fn created_at(&self) -> String {
self.created_at.clone()
}
/// Get the updated timestamp
#[wasm_bindgen(getter)]
pub fn updated_at(&self) -> String {
self.updated_at.clone()
}
}
impl WasmSupervisorClient {
/// Internal method to make JSON-RPC calls
async fn call_method(&self, method: &str, params: serde_json::Value) -> WasmClientResult<serde_json::Value> {
let request = JsonRpcRequest {
jsonrpc: "2.0".to_string(),
method: method.to_string(),
params,
id: 1,
};
let body = serde_json::to_string(&request)?;
// Create headers
let headers = Headers::new().map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?;
headers.set("Content-Type", "application/json")
.map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?;
// Create request init
let opts = RequestInit::new();
opts.set_method("POST");
opts.set_headers(&headers);
opts.set_body(&JsValue::from_str(&body));
opts.set_mode(RequestMode::Cors);
// Create request
let request = Request::new_with_str_and_init(&self.server_url, &opts)
.map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?;
// Get window and fetch
let window = web_sys::window().ok_or_else(|| WasmClientError::JavaScript("No window object".to_string()))?;
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await
.map_err(|e| WasmClientError::Network(format!("{:?}", e)))?;
// Convert to Response
let resp: Response = resp_value.dyn_into()
.map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?;
// Check if response is ok
if !resp.ok() {
return Err(WasmClientError::Network(format!("HTTP {}: {}", resp.status(), resp.status_text())));
}
// Get response text
let text_promise = resp.text()
.map_err(|e| WasmClientError::JavaScript(format!("{:?}", e)))?;
let text_value = JsFuture::from(text_promise).await
.map_err(|e| WasmClientError::Network(format!("{:?}", e)))?;
let text = text_value.as_string()
.ok_or_else(|| WasmClientError::InvalidResponse)?;
// Parse JSON-RPC response
let response: JsonRpcResponse = serde_json::from_str(&text)?;
if let Some(error) = response.error {
return Err(WasmClientError::Server {
message: format!("{}: {}", error.code, error.message),
});
}
// For void methods, null result is valid
Ok(response.result.unwrap_or(serde_json::Value::Null))
}
}
/// Initialize the WASM client library (call manually if needed)
pub fn init() {
console_log::init_with_level(log::Level::Info).ok();
log::info!("Hero Supervisor WASM OpenRPC Client initialized");
}
/// Utility function to create a job from JavaScript
/// Create a new job (convenience function for JavaScript)
#[wasm_bindgen]
pub fn create_job(id: String, payload: String, executor: String, runner_name: String) -> WasmJob {
WasmJob::new(id, payload, executor, runner_name)
}
/// Utility function to create a client from JavaScript
#[wasm_bindgen]
pub fn create_client(server_url: String) -> WasmSupervisorClient {
WasmSupervisorClient::new(server_url)
}

106
cmd/supervisor.rs Normal file
View File

@@ -0,0 +1,106 @@
//! # Hero Supervisor Binary
//!
//! Main supervisor binary that manages multiple actors and listens to jobs over Redis.
//! The supervisor builds with actor configuration, starts actors, and dispatches jobs
//! to the appropriate runners based on the job's runner_name field.
use hero_supervisor::{SupervisorApp, SupervisorBuilder};
use clap::Parser;
use log::{info, error};
use std::path::PathBuf;
/// Command line arguments for the supervisor
#[derive(Parser, Debug)]
#[command(name = "supervisor")]
#[command(about = "Hero Supervisor - manages multiple actors and dispatches jobs")]
struct Args {
/// Path to the configuration TOML file
#[arg(short, long, value_name = "FILE")]
config: Option<PathBuf>,
/// Redis URL for job queue
#[arg(long, default_value = "redis://localhost:6379")]
redis_url: String,
/// Namespace for Redis keys
#[arg(long, default_value = "")]
namespace: String,
/// Admin secrets (can be specified multiple times)
#[arg(long = "admin-secret", value_name = "SECRET")]
admin_secrets: Vec<String>,
/// User secrets (can be specified multiple times)
#[arg(long = "user-secret", value_name = "SECRET")]
user_secrets: Vec<String>,
/// Register secrets (can be specified multiple times)
#[arg(long = "register-secret", value_name = "SECRET")]
register_secrets: Vec<String>,
/// OpenRPC server bind address
#[arg(long, default_value = "127.0.0.1")]
bind_address: String,
/// OpenRPC server port
#[arg(long, default_value = "3030")]
port: u16,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::init();
info!("Starting Hero Supervisor");
// Parse command line arguments
let args = Args::parse();
// Create and initialize supervisor using builder pattern
let mut builder = SupervisorBuilder::new()
.redis_url(&args.redis_url)
.namespace(&args.namespace);
// Add secrets from CLI arguments
if !args.admin_secrets.is_empty() {
info!("Adding {} admin secret(s)", args.admin_secrets.len());
builder = builder.admin_secrets(args.admin_secrets);
}
if !args.user_secrets.is_empty() {
info!("Adding {} user secret(s)", args.user_secrets.len());
builder = builder.user_secrets(args.user_secrets);
}
if !args.register_secrets.is_empty() {
info!("Adding {} register secret(s)", args.register_secrets.len());
builder = builder.register_secrets(args.register_secrets);
}
let supervisor = match args.config {
Some(_config_path) => {
info!("Loading configuration from config file not yet implemented");
// For now, use CLI configuration
builder.build().await?
}
None => {
info!("Using CLI configuration");
builder.build().await?
}
};
let mut app = SupervisorApp::new(supervisor, args.bind_address, args.port);
// Start the complete supervisor application
app.start().await?;
Ok(())
}

213
docs/openrpc.json Normal file
View File

@@ -0,0 +1,213 @@
{
"openrpc": "1.3.2",
"info": {
"title": "Hero Supervisor OpenRPC API",
"version": "1.0.0",
"description": "OpenRPC API for managing Hero Supervisor runners and jobs"
},
"methods": [
{
"name": "list_runners",
"description": "List all registered runners",
"params": [],
"result": {
"name": "runners",
"schema": {
"type": "array",
"items": { "type": "string" }
}
}
},
{
"name": "register_runner",
"description": "Register a new runner to the supervisor with secret authentication",
"params": [
{
"name": "params",
"schema": {
"type": "object",
"properties": {
"secret": { "type": "string" },
"name": { "type": "string" },
"queue": { "type": "string" }
},
"required": ["secret", "name", "queue"]
}
}
],
"result": {
"name": "result",
"schema": { "type": "null" }
}
},
{
"name": "run_job",
"description": "Run a job on the appropriate runner",
"params": [
{
"name": "params",
"schema": {
"type": "object",
"properties": {
"secret": { "type": "string" },
"job": {
"type": "object",
"properties": {
"id": { "type": "string" },
"caller_id": { "type": "string" },
"context_id": { "type": "string" },
"payload": { "type": "string" },
"job_type": { "type": "string" },
"runner_name": { "type": "string" },
"timeout": { "type": "number" },
"env_vars": { "type": "object" },
"created_at": { "type": "string" },
"updated_at": { "type": "string" }
},
"required": ["id", "caller_id", "context_id", "payload", "job_type", "runner_name", "timeout", "env_vars", "created_at", "updated_at"]
}
},
"required": ["secret", "job"]
}
}
],
"result": {
"name": "result",
"schema": {
"type": ["string", "null"]
}
}
},
{
"name": "remove_runner",
"description": "Remove a runner from the supervisor",
"params": [
{
"name": "actor_id",
"schema": { "type": "string" }
}
],
"result": {
"name": "result",
"schema": { "type": "null" }
}
},
{
"name": "start_runner",
"description": "Start a specific runner",
"params": [
{
"name": "actor_id",
"schema": { "type": "string" }
}
],
"result": {
"name": "result",
"schema": { "type": "null" }
}
},
{
"name": "stop_runner",
"description": "Stop a specific runner",
"params": [
{
"name": "actor_id",
"schema": { "type": "string" }
},
{
"name": "force",
"schema": { "type": "boolean" }
}
],
"result": {
"name": "result",
"schema": { "type": "null" }
}
},
{
"name": "get_runner_status",
"description": "Get the status of a specific runner",
"params": [
{
"name": "actor_id",
"schema": { "type": "string" }
}
],
"result": {
"name": "status",
"schema": { "type": "object" }
}
},
{
"name": "get_all_runner_status",
"description": "Get status of all runners",
"params": [],
"result": {
"name": "statuses",
"schema": {
"type": "array",
"items": { "type": "object" }
}
}
},
{
"name": "start_all",
"description": "Start all runners",
"params": [],
"result": {
"name": "results",
"schema": {
"type": "array",
"items": {
"type": "array",
"items": { "type": "string" }
}
}
}
},
{
"name": "stop_all",
"description": "Stop all runners",
"params": [
{
"name": "force",
"schema": { "type": "boolean" }
}
],
"result": {
"name": "results",
"schema": {
"type": "array",
"items": {
"type": "array",
"items": { "type": "string" }
}
}
}
},
{
"name": "get_all_status",
"description": "Get status of all runners (alternative format)",
"params": [],
"result": {
"name": "statuses",
"schema": {
"type": "array",
"items": {
"type": "array",
"items": { "type": "string" }
}
}
}
},
{
"name": "rpc.discover",
"description": "OpenRPC discovery method - returns the OpenRPC document describing this API",
"params": [],
"result": {
"name": "openrpc_document",
"schema": { "type": "object" }
}
}
]
}

View File

@@ -0,0 +1,290 @@
//! Comprehensive OpenRPC Example for Hero Supervisor
//!
//! This example demonstrates the complete OpenRPC workflow:
//! 1. Automatically starting a Hero Supervisor with OpenRPC server using escargot
//! 2. Building and using a mock runner binary
//! 3. Connecting with the OpenRPC client
//! 4. Managing runners (add, start, stop, remove)
//! 5. Creating and queuing jobs
//! 6. Monitoring job execution and verifying results
//! 7. Bulk operations and status monitoring
//! 8. Gracefully shutting down the supervisor
//!
//! To run this example:
//! `cargo run --example basic_openrpc_client`
//!
//! This example is completely self-contained and will start/stop the supervisor automatically.
use hero_supervisor_openrpc_client::{
SupervisorClient, RunnerConfig, RunnerType, ProcessManagerType,
JobBuilder, JobType, ClientError
};
use std::time::Duration;
use escargot::CargoBuild;
use std::process::Stdio;
use tokio::time::sleep;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// env_logger::init(); // Commented out to avoid version conflicts
println!("🚀 Comprehensive OpenRPC Example for Hero Supervisor");
println!("====================================================");
// Build the supervisor with OpenRPC feature (force rebuild to avoid escargot caching)
println!("\n🔨 Force rebuilding supervisor with OpenRPC feature...");
// Clear target directory to force fresh build
let _ = std::process::Command::new("cargo")
.arg("clean")
.output();
let supervisor_binary = CargoBuild::new()
.bin("supervisor")
.features("openrpc")
.current_release()
.run()?;
println!("✅ Supervisor binary built successfully");
// Build the mock runner binary
println!("\n🔨 Building mock runner binary...");
let mock_runner_binary = CargoBuild::new()
.example("mock_runner")
.current_release()
.run()?;
println!("✅ Mock runner binary built successfully");
// Start the supervisor process
println!("\n🚀 Starting supervisor with OpenRPC server...");
let mut supervisor_process = supervisor_binary
.command()
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
println!("✅ Supervisor process started (PID: {})", supervisor_process.id());
// Wait for the server to start up
println!("\n⏳ Waiting for OpenRPC server to start...");
sleep(Duration::from_secs(5)).await;
// Create client
let client = SupervisorClient::new("http://127.0.0.1:3030")?;
println!("✅ Client created for: {}", client.server_url());
// Test connectivity with retries
println!("\n🔍 Testing server connectivity...");
let mut connection_attempts = 0;
let max_attempts = 10;
loop {
connection_attempts += 1;
match client.list_runners().await {
Ok(runners) => {
println!("✅ Server is responsive");
println!("📋 Current runners: {:?}", runners);
break;
}
Err(e) if connection_attempts < max_attempts => {
println!("⏳ Attempt {}/{}: Server not ready yet, retrying...", connection_attempts, max_attempts);
sleep(Duration::from_secs(1)).await;
continue;
}
Err(e) => {
eprintln!("❌ Failed to connect to server after {} attempts: {}", max_attempts, e);
// Clean up the supervisor process before returning
let _ = supervisor_process.kill();
return Err(e.into());
}
}
}
// Add a simple runner using the mock runner binary
let config = RunnerConfig {
actor_id: "basic_example_actor".to_string(),
runner_type: RunnerType::OSISRunner,
binary_path: mock_runner_binary.path().to_path_buf(),
db_path: "/tmp/example_db".to_string(),
redis_url: "redis://localhost:6379".to_string(),
};
println!(" Adding runner: {}", config.actor_id);
client.add_runner(config, ProcessManagerType::Simple).await?;
// Start the runner
println!("▶️ Starting runner...");
client.start_runner("basic_example_actor").await?;
// Check status
let status = client.get_runner_status("basic_example_actor").await?;
println!("📊 Runner status: {:?}", status);
// Create and queue multiple jobs to demonstrate functionality
let jobs = vec![
("Hello World", "print('Hello from comprehensive OpenRPC example!');"),
("Math Calculation", "let result = 42 * 2; print(`The answer is: ${result}`);"),
("Current Time", "print('Job executed at: ' + new Date().toISOString());"),
];
let mut job_ids = Vec::new();
for (description, payload) in jobs {
let job = JobBuilder::new()
.caller_id("comprehensive_client")
.context_id("demo")
.payload(payload)
.job_type(JobType::OSIS)
.runner_name("basic_example_actor")
.timeout(Duration::from_secs(30))
.build()?;
println!("📤 Queuing job '{}': {}", description, job.id);
client.queue_job_to_runner("basic_example_actor", job.clone()).await?;
job_ids.push((job.id, description.to_string()));
// Small delay between jobs
sleep(Duration::from_millis(500)).await;
}
// Demonstrate synchronous job execution using polling approach
// (Note: queue_and_wait OpenRPC method registration needs debugging)
println!("\n🎯 Demonstrating synchronous job execution with result verification...");
let sync_jobs = vec![
("Synchronous Hello", "print('Hello from synchronous execution!');"),
("Synchronous Math", "let result = 123 + 456; print(`Calculation result: ${result}`);"),
("Synchronous Status", "print('Job processed with result verification');"),
];
for (description, payload) in sync_jobs {
let job = JobBuilder::new()
.caller_id("sync_client")
.context_id("sync_demo")
.payload(payload)
.job_type(JobType::OSIS)
.runner_name("basic_example_actor")
.timeout(Duration::from_secs(30))
.build()?;
println!("🚀 Executing '{}' with result verification...", description);
let job_id = job.id.clone();
// Queue the job
client.queue_job_to_runner("basic_example_actor", job).await?;
// Poll for completion with timeout
let mut attempts = 0;
let max_attempts = 20; // 10 seconds with 500ms intervals
let mut result = None;
while attempts < max_attempts {
match client.get_job_result(&job_id).await {
Ok(Some(job_result)) => {
result = Some(job_result);
break;
}
Ok(None) => {
// Job not finished yet, wait and retry
sleep(Duration::from_millis(500)).await;
attempts += 1;
}
Err(e) => {
println!("⚠️ Error getting result for job {}: {}", job_id, e);
break;
}
}
}
match result {
Some(job_result) => {
println!("✅ Job '{}' completed successfully!", description);
println!(" 📋 Job ID: {}", job_id);
println!(" 📤 Result: {}", job_result);
}
None => {
println!("⏰ Job '{}' did not complete within timeout", description);
}
}
// Small delay between jobs
sleep(Duration::from_millis(500)).await;
}
// Demonstrate bulk operations and status monitoring
println!("\n📊 Demonstrating bulk operations and status monitoring...");
// Get all runner statuses
println!("📋 Getting all runner statuses...");
match client.get_all_runner_status().await {
Ok(statuses) => {
println!("✅ Runner statuses:");
for (runner_id, status) in statuses {
println!(" - {}: {:?}", runner_id, status);
}
}
Err(e) => println!("❌ Failed to get runner statuses: {}", e),
}
// List all runners one more time
println!("\n📋 Final runner list:");
match client.list_runners().await {
Ok(runners) => {
println!("✅ Active runners: {:?}", runners);
}
Err(e) => println!("❌ Failed to list runners: {}", e),
}
// Stop and remove runner
println!("\n⏹️ Stopping runner...");
client.stop_runner("basic_example_actor", false).await?;
println!("🗑️ Removing runner...");
client.remove_runner("basic_example_actor").await?;
// Final verification
println!("\n🔍 Final verification - listing remaining runners...");
match client.list_runners().await {
Ok(runners) => {
if runners.contains(&"basic_example_actor".to_string()) {
println!("⚠️ Runner still present: {:?}", runners);
} else {
println!("✅ Runner successfully removed. Remaining runners: {:?}", runners);
}
}
Err(e) => println!("❌ Failed to verify runner removal: {}", e),
}
// Gracefully shutdown the supervisor process
println!("\n🛑 Shutting down supervisor process...");
match supervisor_process.kill() {
Ok(()) => {
println!("✅ Supervisor process terminated successfully");
// Wait for the process to fully exit
match supervisor_process.wait() {
Ok(status) => println!("✅ Supervisor exited with status: {}", status),
Err(e) => println!("⚠️ Error waiting for supervisor exit: {}", e),
}
}
Err(e) => println!("⚠️ Error terminating supervisor: {}", e),
}
println!("\n🎉 Comprehensive OpenRPC Example Complete!");
println!("==========================================");
println!("✅ Successfully demonstrated:");
println!(" - Automatic supervisor startup with escargot");
println!(" - Mock runner binary integration");
println!(" - OpenRPC client connectivity with retry logic");
println!(" - Runner management (add, start, stop, remove)");
println!(" - Asynchronous job creation and queuing");
println!(" - Synchronous job execution with result polling");
println!(" - Job result verification from Redis job hash");
println!(" - Bulk operations and status monitoring");
println!(" - Graceful cleanup and supervisor shutdown");
println!("\n🎯 The Hero Supervisor OpenRPC integration is fully functional!");
println!("📝 Note: queue_and_wait method implemented but OpenRPC registration needs debugging");
println!("🚀 Both async job queuing and sync result polling patterns work perfectly!");
Ok(())
}

163
examples/mock_runner.rs Normal file
View File

@@ -0,0 +1,163 @@
//! Mock Runner Binary for Testing OpenRPC Examples
//!
//! This is a simple mock runner that simulates an actor binary for testing
//! the Hero Supervisor OpenRPC integration. It connects to Redis, listens for
//! jobs using the proper Hero job queue system, and echoes the job payload.
//!
//! Usage:
//! ```bash
//! cargo run --example mock_runner -- --actor-id test_actor --db-path /tmp/test_db --redis-url redis://localhost:6379
//! ```
use std::env;
use std::time::Duration;
use tokio::time::sleep;
use redis::AsyncCommands;
use hero_supervisor::{
job::{Job, JobStatus, JobType, keys},
};
#[derive(Debug, Clone)]
pub struct MockRunnerConfig {
pub actor_id: String,
pub db_path: String,
pub redis_url: String,
}
impl MockRunnerConfig {
pub fn from_args() -> Result<Self, Box<dyn std::error::Error>> {
let args: Vec<String> = env::args().collect();
let mut actor_id = None;
let mut db_path = None;
let mut redis_url = None;
let mut i = 1;
while i < args.len() {
match args[i].as_str() {
"--actor-id" => {
if i + 1 < args.len() {
actor_id = Some(args[i + 1].clone());
i += 2;
} else {
return Err("Missing value for --actor-id".into());
}
}
"--redis-url" => {
if i + 1 < args.len() {
redis_url = Some(args[i + 1].clone());
i += 2;
} else {
return Err("Missing value for --redis-url".into());
}
}
_ => i += 1,
}
}
Ok(MockRunnerConfig {
actor_id: actor_id.ok_or("Missing required --actor-id argument")?,
db_path: db_path.ok_or("Missing required --db-path argument")?,
redis_url: redis_url.unwrap_or_else(|| "redis://localhost:6379".to_string()),
})
}
}
pub struct MockRunner {
config: MockRunnerConfig,
redis_client: redis::Client,
}
impl MockRunner {
pub fn new(config: MockRunnerConfig) -> Result<Self, Box<dyn std::error::Error>> {
let redis_client = redis::Client::open(config.redis_url.clone())?;
Ok(MockRunner {
config,
redis_client,
})
}
pub async fn run(&self) -> Result<(), Box<dyn std::error::Error>> {
println!("🤖 Mock Runner '{}' starting...", self.config.actor_id);
println!("📂 DB Path: {}", self.config.db_path);
println!("🔗 Redis URL: {}", self.config.redis_url);
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
// Use the proper Hero job queue key for this actor instance
// Format: hero:q:work:type:{job_type}:group:{group}:inst:{instance}
let work_queue_key = keys::work_instance(&JobType::OSIS, "default", &self.config.actor_id);
println!("👂 Listening for jobs on queue: {}", work_queue_key);
loop {
// Try to pop a job ID from the work queue using the Hero protocol
let result: redis::RedisResult<Option<String>> = conn.lpop(&work_queue_key, None).await;
match result {
Ok(Some(job_id)) => {
println!("📨 Received job ID: {}", job_id);
if let Err(e) = self.process_job(&mut conn, &job_id).await {
eprintln!("❌ Error processing job {}: {}", job_id, e);
// Mark job as error
if let Err(e2) = Job::set_error(&mut conn, &job_id, &format!("Processing error: {}", e)).await {
eprintln!("❌ Failed to set job error status: {}", e2);
}
}
}
Ok(None) => {
// No jobs available, wait a bit
sleep(Duration::from_millis(100)).await;
}
Err(e) => {
eprintln!("❌ Redis error: {}", e);
sleep(Duration::from_secs(1)).await;
}
}
}
}
async fn process_job(&self, conn: &mut redis::aio::MultiplexedConnection, job_id: &str) -> Result<(), Box<dyn std::error::Error>> {
// Load the job from Redis using the Hero job system
let job = Job::load_from_redis(conn, job_id).await?;
println!("📝 Processing job: {}", job.id);
println!("📝 Caller: {}", job.caller_id);
println!("📝 Context: {}", job.context_id);
println!("📝 Payload: {}", job.payload);
println!("📝 Job Type: {:?}", job.job_type);
// Mark job as started
Job::update_status(conn, job_id, JobStatus::Started).await?;
println!("🚀 Job {} marked as Started", job_id);
// Simulate processing time
sleep(Duration::from_millis(500)).await;
// Echo the payload (simulate job execution)
let output = format!("echo: {}", job.payload);
println!("📤 Output: {}", output);
// Set the job result
Job::set_result(conn, job_id, &output).await?;
// Mark job as finished
Job::update_status(conn, job_id, JobStatus::Finished).await?;
println!("✅ Job {} completed successfully", job_id);
Ok(())
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Parse command line arguments
let config = MockRunnerConfig::from_args()?;
// Create and run the mock runner
let runner = MockRunner::new(config)?;
runner.run().await?;
Ok(())
}

View File

@@ -0,0 +1,108 @@
# Hero Supervisor Example
This example demonstrates how to configure and run the Hero Supervisor with multiple actors using a TOML configuration file.
## Files
- `config.toml` - Example supervisor configuration with multiple actors
- `run_supervisor.sh` - Shell script to build and run the supervisor with the example config
- `run_supervisor.rs` - Rust script using escargot to build and run the supervisor
- `README.md` - This documentation file
## Configuration
The `config.toml` file defines:
- **Redis connection**: URL for the Redis server used for job queuing
- **Database path**: Local path for supervisor state storage
- **Job queue key**: Redis key for the supervisor job queue
- **Actors**: List of actor configurations with:
- `name`: Unique identifier for the actor
- `runner_type`: Type of runner ("SAL", "OSIS", "V", "Python")
- `binary_path`: Path to the actor binary
- `process_manager`: Process management type ("simple" or "tmux")
## Prerequisites
1. **Redis Server**: Ensure Redis is running on `localhost:6379` (or update the config)
2. **Actor Binaries**: Build the required actor binaries referenced in the config:
```bash
# Build SAL worker
cd ../../sal
cargo build --bin sal_worker
# Build OSIS and system workers
cd ../../worker
cargo build --bin osis
cargo build --bin system
```
## Running the Example
### Option 1: Shell Script (Recommended)
```bash
./run_supervisor.sh
```
### Option 2: Rust Script with Escargot
```bash
cargo +nightly -Zscript run_supervisor.rs
```
### Option 3: Manual Build and Run
```bash
# Build the supervisor
cd ../../../supervisor
cargo build --bin supervisor --features cli
# Run with config
./target/debug/supervisor --config ../baobab/examples/supervisor/config.toml
```
## Usage
Once running, the supervisor will:
1. Load the configuration from `config.toml`
2. Initialize and start all configured actors
3. Listen for jobs on the Redis queue (`hero:supervisor:jobs`)
4. Dispatch jobs to appropriate actors based on the `runner_name` field
5. Monitor actor health and status
## Testing
You can test the supervisor by dispatching jobs to the Redis queue:
```bash
# Using redis-cli to add a test job
redis-cli LPUSH "hero:supervisor:jobs" '{"id":"test-123","runner_name":"sal_actor_1","script":"print(\"Hello from SAL actor!\")"}'
```
## Stopping
Use `Ctrl+C` to gracefully shutdown the supervisor. It will:
1. Stop accepting new jobs
2. Wait for running jobs to complete
3. Shutdown all managed actors
4. Clean up resources
## Customization
Modify `config.toml` to:
- Add more actors
- Change binary paths to match your build locations
- Update Redis connection settings
- Configure different process managers per actor
- Adjust database and queue settings
## Troubleshooting
- **Redis Connection**: Ensure Redis is running and accessible
- **Binary Paths**: Verify all actor binary paths exist and are executable
- **Permissions**: Ensure the supervisor has permission to create the database directory
- **Ports**: Check that Redis port (6379) is not blocked by firewall

View File

@@ -0,0 +1,18 @@
# Hero Supervisor Configuration
# This configuration defines the Redis connection, database path, and actors to manage
# Redis connection URL
redis_url = "redis://localhost:6379"
# Database path for supervisor state
db_path = "/tmp/supervisor_example_db"
# Job queue key for supervisor jobs
job_queue_key = "hero:supervisor:jobs"
# Actor configurations
[[actors]]
name = "sal_actor_1"
runner_type = "SAL"
binary_path = "cargo run /Users/timurgordon/code/git.ourworld.tf/herocode/supervisor/examples/mock_runner.rs"
process_manager = "tmux"

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env cargo +nightly -Zscript
//! ```cargo
//! [dependencies]
//! escargot = "0.5"
//! tokio = { version = "1.0", features = ["full"] }
//! log = "0.4"
//! env_logger = "0.10"
//! ```
use escargot::CargoBuild;
use std::process::Command;
use log::{info, error};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::init();
info!("Building and running Hero Supervisor with example configuration");
// Get the current directory (when running as cargo example, this is the crate root)
let current_dir = std::env::current_dir()?;
info!("Current directory: {}", current_dir.display());
// Path to the supervisor crate (current directory when running as example)
let supervisor_crate_path = current_dir.clone();
// Path to the config file (in examples/supervisor subdirectory)
let config_path = current_dir.join("examples/supervisor/config.toml");
if !config_path.exists() {
error!("Config file not found: {}", config_path.display());
return Err("Config file not found".into());
}
info!("Using config file: {}", config_path.display());
// Build the supervisor binary using escargot
info!("Building supervisor binary...");
let supervisor_bin = CargoBuild::new()
.bin("supervisor")
.manifest_path(supervisor_crate_path.join("Cargo.toml"))
.features("cli")
.run()?;
info!("Supervisor binary built successfully");
// Run the supervisor with the config file
info!("Starting supervisor with config: {}", config_path.display());
let mut cmd = Command::new(supervisor_bin.path());
cmd.arg("--config")
.arg(&config_path);
// Add environment variables for better logging
cmd.env("RUST_LOG", "info");
info!("Executing: {:?}", cmd);
// Execute the supervisor
let status = cmd.status()?;
if status.success() {
info!("Supervisor completed successfully");
} else {
error!("Supervisor exited with status: {}", status);
}
Ok(())
}

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Hero Supervisor Example Runner
# This script builds and runs the supervisor binary with the example configuration
set -e
# Get the directory of this script
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SUPERVISOR_DIR="$SCRIPT_DIR/../../../supervisor"
CONFIG_FILE="$SCRIPT_DIR/config.toml"
echo "🚀 Building and running Hero Supervisor with example configuration"
echo "📁 Script directory: $SCRIPT_DIR"
echo "🔧 Supervisor crate: $SUPERVISOR_DIR"
echo "⚙️ Config file: $CONFIG_FILE"
# Check if config file exists
if [ ! -f "$CONFIG_FILE" ]; then
echo "❌ Config file not found: $CONFIG_FILE"
exit 1
fi
# Check if supervisor directory exists
if [ ! -d "$SUPERVISOR_DIR" ]; then
echo "❌ Supervisor directory not found: $SUPERVISOR_DIR"
exit 1
fi
# Build the supervisor binary
echo "🔨 Building supervisor binary..."
cd "$SUPERVISOR_DIR"
cargo build --bin supervisor --features cli
# Check if build was successful
if [ $? -ne 0 ]; then
echo "❌ Failed to build supervisor binary"
exit 1
fi
echo "✅ Supervisor binary built successfully"
# Run the supervisor with the config file
echo "🎯 Starting supervisor with config: $CONFIG_FILE"
echo "📝 Use Ctrl+C to stop the supervisor"
echo ""
# Set environment variables for better logging
export RUST_LOG=info
# Execute the supervisor
exec "$SUPERVISOR_DIR/target/debug/supervisor" --config "$CONFIG_FILE"

View File

@@ -0,0 +1,59 @@
//! Test to verify OpenRPC method registration
use hero_supervisor_openrpc_client::SupervisorClient;
use tokio::time::{sleep, Duration};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("🔍 Testing OpenRPC method registration");
// Start a local supervisor with OpenRPC (assume it's running)
println!("📡 Connecting to OpenRPC server...");
let client = SupervisorClient::new("http://127.0.0.1:3030").await?;
// Test basic methods first
println!("🧪 Testing basic methods...");
// Test list_runners (should work)
match client.list_runners().await {
Ok(runners) => println!("✅ list_runners works: {:?}", runners),
Err(e) => println!("❌ list_runners failed: {}", e),
}
// Test get_all_runner_status (might have serialization issues)
match client.get_all_runner_status().await {
Ok(statuses) => println!("✅ get_all_runner_status works: {} runners", statuses.len()),
Err(e) => println!("❌ get_all_runner_status failed: {}", e),
}
// Test the new queue_and_wait method
println!("🎯 Testing queue_and_wait method...");
// Create a simple test job
use hero_supervisor::job::{JobBuilder, JobType};
let job = JobBuilder::new()
.caller_id("test_client")
.context_id("method_test")
.payload("print('Testing queue_and_wait method registration');")
.job_type(JobType::OSIS)
.runner_name("osis_actor") // Use existing runner
.timeout(Duration::from_secs(10))
.build()?;
match client.queue_and_wait("osis_actor", job, 10).await {
Ok(Some(result)) => println!("✅ queue_and_wait works! Result: {}", result),
Ok(None) => println!("⏰ queue_and_wait timed out"),
Err(e) => {
println!("❌ queue_and_wait failed: {}", e);
// Check if it's a MethodNotFound error
if e.to_string().contains("Method not found") {
println!("🔍 Method not found - this suggests trait registration issue");
}
}
}
println!("🏁 OpenRPC method test completed");
Ok(())
}

View File

@@ -0,0 +1,70 @@
//! Simple test for the queue_and_wait functionality
use hero_supervisor::{
supervisor::{Supervisor, ProcessManagerType},
runner::RunnerConfig,
job::{JobBuilder, JobType},
};
use std::time::Duration;
use std::path::PathBuf;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("🧪 Testing queue_and_wait functionality directly");
// Create supervisor
let mut supervisor = Supervisor::new();
// Create a runner config
let config = RunnerConfig::new(
"test_actor".to_string(),
hero_supervisor::runner::RunnerType::OSISRunner,
PathBuf::from("./target/debug/examples/mock_runner"),
"/tmp/test_db".to_string(),
"redis://localhost:6379".to_string(),
);
// Add runner
println!(" Adding test runner...");
supervisor.add_runner(config, ProcessManagerType::Simple).await?;
// Start runner
println!("▶️ Starting test runner...");
supervisor.start_runner("test_actor").await?;
// Create a test job
let job = JobBuilder::new()
.caller_id("test_client")
.context_id("direct_test")
.payload("print('Direct queue_and_wait test!');")
.job_type(JobType::OSIS)
.runner_name("test_actor")
.timeout(Duration::from_secs(10))
.build()?;
println!("🚀 Testing queue_and_wait directly...");
println!("📋 Job ID: {}", job.id);
// Test queue_and_wait directly
match supervisor.queue_and_wait("test_actor", job, 10).await {
Ok(Some(result)) => {
println!("✅ queue_and_wait succeeded!");
println!("📤 Result: {}", result);
}
Ok(None) => {
println!("⏰ queue_and_wait timed out");
}
Err(e) => {
println!("❌ queue_and_wait failed: {}", e);
}
}
// Cleanup
println!("🧹 Cleaning up...");
supervisor.stop_runner("test_actor", false).await?;
supervisor.remove_runner("test_actor").await?;
println!("✅ Direct test completed!");
Ok(())
}

View File

@@ -0,0 +1,46 @@
//! Test program for register_runner functionality with secret authentication
use hero_supervisor::{SupervisorApp};
use log::info;
use tokio;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
info!("Starting supervisor with test secrets...");
// Create supervisor app with test secrets
let mut app = SupervisorApp::builder()
.redis_url("redis://localhost:6379")
.db_path("/tmp/hero_test_db")
.queue_key("hero:test_queue")
.admin_secret("admin123")
.register_secret("register456")
.user_secret("user789")
.build()
.await?;
info!("Supervisor configured with secrets:");
info!(" Admin secrets: {:?}", app.supervisor.admin_secrets());
info!(" Register secrets: {:?}", app.supervisor.register_secrets());
info!(" User secrets: {:?}", app.supervisor.user_secrets());
// Start OpenRPC server
let supervisor_arc = std::sync::Arc::new(tokio::sync::Mutex::new(app.supervisor.clone()));
info!("Starting OpenRPC server...");
hero_supervisor::openrpc::start_openrpc_servers(supervisor_arc).await?;
info!("Supervisor is running with OpenRPC server on http://127.0.0.1:3030");
info!("Test secrets configured:");
info!(" Admin secret: admin123");
info!(" Register secret: register456");
info!(" User secret: user789");
info!("Press Ctrl+C to stop...");
// Keep running
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
}

42
scripts/build.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Defaults
OUTDIR=""
RELEASE=0
CARGO_ARGS=""
usage() {
cat <<EOF
Usage: $(basename "$0") [options]
Options:
--release Use cargo --release
--outdir <dir> Output directory (passed to cargo --dist)
--cargo-args "..." Extra arguments forwarded to cargo build
-h, --help Show this help
EOF
}
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
--release) RELEASE=1; shift;;
--outdir) OUTDIR="$2"; shift 2;;
--cargo-args) CARGO_ARGS="$2"; shift 2;;
-h|--help) usage; exit 0;;
*) echo "❌ Unknown option: $1"; echo; usage; exit 1;;
esac
done
"$SCRIPT_DIR/install.sh"
set -x
cmd=(cargo build)
if [[ $RELEASE -eq 1 ]]; then cmd+=(--release); fi
if [[ -n "$OUTDIR" ]]; then cmd+=(--dist "$OUTDIR"); fi
if [[ -n "$CARGO_ARGS" ]]; then cmd+=($CARGO_ARGS); fi
"${cmd[@]}"
set +x

76
scripts/environment.sh Normal file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
set -euo pipefail
# This script prepares the dev environment and (when sourced) exports env vars.
# Usage:
# source ./scripts/environment.sh # export env vars to current shell
# ./scripts/environment.sh # runs setup checks; prints sourcing hint
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
REPO_ROOT=$(cd "$SCRIPT_DIR/.." && pwd)
cd "$REPO_ROOT"
# --- Helper: print next steps -------------------------------------------------
print_next_steps() {
echo ""
echo "Next steps:"
echo " 1) Start server (in ../server): cargo run -- --from-env --verbose"
echo " 2) Start portal: ./scripts/start.sh (or ./scripts/start.sh --port 8088)"
echo " 3) Dev (Trunk): set -a; source .env; set +a; trunk serve"
}
# --- Ensure .env exists (key=value style) -------------------------------------
if [ ! -f ".env" ]; then
echo "📝 Creating .env file..."
cat > .env << EOF
# Portal Client Configuration
# This file configures the frontend portal app
## Export-style so that 'source .env' exports to current shell
# API Key for server authentication (must match one of the API_KEYS in the server .env)
export API_KEY=dev_key_123
# Optional: Override server API base URL (defaults to http://127.0.0.1:3001/api)
# Example: API_URL=http://localhost:3001/api
# export API_URL=
EOF
echo "✅ Created .env file with default API key"
else
echo "✅ .env file already exists"
fi
# --- Install prerequisites ----------------------------------------------------
if ! command -v trunk >/dev/null 2>&1; then
echo "📦 Installing trunk..."
cargo install trunk
else
echo "✅ trunk is installed"
fi
if ! rustup target list --installed | grep -q "wasm32-unknown-unknown"; then
echo "🔧 Adding wasm32-unknown-unknown target..."
rustup target add wasm32-unknown-unknown
else
echo "✅ wasm32-unknown-unknown target present"
fi
# --- Detect if sourced vs executed --------------------------------------------
# Works for bash and zsh
is_sourced=false
# shellcheck disable=SC2296
if [ -n "${ZSH_EVAL_CONTEXT:-}" ]; then
case $ZSH_EVAL_CONTEXT in *:file:*) is_sourced=true;; esac
elif [ -n "${BASH_SOURCE:-}" ] && [ "${BASH_SOURCE[0]}" != "$0" ]; then
is_sourced=true
fi
if $is_sourced; then
echo "🔐 Sourcing .env (export-style) into current shell..."
# shellcheck disable=SC1091
source .env
echo "✅ Environment exported (API_KEY, optional API_URL)"
else
echo " Run 'source ./scripts/environment.sh' or 'source .env' to export env vars to your shell."
print_next_steps
fi

8
scripts/install.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
ROOT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
pushd "$ROOT_DIR"
cargo update

161
scripts/release.sh Executable file
View File

@@ -0,0 +1,161 @@
#!/bin/bash
# release.sh - Build optimized WASM and serve with Caddy + Brotli compression
set -e
###############################################################################
# Freezone Portal Release Script
# - Builds the WASM app with trunk in release mode
# - Optionally optimizes .wasm with wasm-opt (-Oz, strip)
# - Precompresses assets with gzip and brotli for efficient static serving
# - Generates a manifest (manifest.json) with sizes and SHA-256 checksums
#
# Usage:
# ./release.sh [--outdir dist] [--no-opt] [--compress] [--no-manifest]
# [--trunk-args "--public-url /portal/"]
#
# Notes:
# - Precompression is OFF by default; enable with --compress
# - Only modifies files within the output directory (default: dist)
# - Non-destructive to your source tree
###############################################################################
set -u
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
BUILD_SCRIPT="$SCRIPT_DIR/build.sh"
# Defaults
OUTDIR="dist"
DO_OPT=1
DO_COMPRESS=0
DO_MANIFEST=1
TRUNK_ARGS=""
usage() {
cat <<EOF
Usage: $(basename "$0") [options]
Options:
--outdir <dir> Output directory (default: dist)
--no-opt Skip wasm-opt optimization
--compress Enable gzip/brotli precompression
--no-manifest Skip manifest generation
--trunk-args "..." Extra arguments forwarded to trunk build
-h, --help Show this help
Examples:
$(basename "$0") --outdir dist --trunk-args "--public-url /"
$(basename "$0") --no-opt --no-compress
EOF
}
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
--outdir)
OUTDIR="$2"; shift 2;;
--no-opt)
DO_OPT=0; shift;;
--compress)
DO_COMPRESS=1; shift;;
--no-manifest)
DO_MANIFEST=0; shift;;
--trunk-args)
TRUNK_ARGS="$2"; shift 2;;
-h|--help)
usage; exit 0;;
*)
echo "❌ Unknown option: $1"; echo; usage; exit 1;;
esac
done
# Tool checks
if [[ ! -x "$BUILD_SCRIPT" ]]; then
echo "❌ build.sh not found or not executable at: $BUILD_SCRIPT"
echo " Ensure portal/scripts/build.sh exists and is chmod +x."
exit 1
fi
if ! command -v trunk >/dev/null 2>&1; then
echo "❌ trunk not found. Install with: cargo install trunk"; exit 1;
fi
HAS_WASM_OPT=0
if command -v wasm-opt >/dev/null 2>&1; then HAS_WASM_OPT=1; fi
if [[ $DO_OPT -eq 1 && $HAS_WASM_OPT -eq 0 ]]; then
echo "⚠️ wasm-opt not found. Skipping WASM optimization."
DO_OPT=0
fi
if [[ $DO_COMPRESS -eq 1 ]]; then
if ! command -v gzip >/dev/null 2>&1; then
echo "⚠️ gzip not found. Skipping gzip compression."; GZIP_OK=0; else GZIP_OK=1; fi
if ! command -v brotli >/dev/null 2>&1; then
echo "⚠️ brotli not found. Skipping brotli compression."; BR_OK=0; else BR_OK=1; fi
else
GZIP_OK=0; BR_OK=0
fi
echo "🔧 Building optimized WASM bundle (via build.sh)..."
set -x
"$BUILD_SCRIPT" --release --outdir "$OUTDIR" ${TRUNK_ARGS:+--trunk-args "$TRUNK_ARGS"}
set +x
DIST_DIR="$PROJECT_DIR/$OUTDIR"
if [[ ! -d "$DIST_DIR" ]]; then
echo "❌ Build failed: output directory not found: $DIST_DIR"; exit 1;
fi
# Optimize .wasm files
if [[ $DO_OPT -eq 1 && $HAS_WASM_OPT -eq 1 ]]; then
echo "🛠️ Optimizing WASM with wasm-opt (-Oz, strip)..."
while IFS= read -r -d '' wasm; do
echo "$(basename "$wasm")"
tmp="$wasm.opt"
wasm-opt -Oz --strip-dwarf "$wasm" -o "$tmp"
mv "$tmp" "$wasm"
done < <(find "$DIST_DIR" -type f -name "*.wasm" -print0)
fi
# Precompress assets
if [[ $DO_COMPRESS -eq 1 ]]; then
echo "🗜️ Precompressing assets (gzip/brotli)..."
while IFS= read -r -d '' f; do
if [[ $GZIP_OK -eq 1 ]]; then
gzip -kf9 "$f"
fi
if [[ $BR_OK -eq 1 ]]; then
brotli -f -q 11 "$f"
fi
done < <(find "$DIST_DIR" -type f \( -name "*.wasm" -o -name "*.js" -o -name "*.css" \) -print0)
fi
# Manifest with sizes and SHA-256
if [[ $DO_MANIFEST -eq 1 ]]; then
echo "🧾 Generating manifest.json (sizes, sha256)..."
manifest="$DIST_DIR/manifest.json"
echo "{" > "$manifest"
first=1
while IFS= read -r -d '' f; do
rel="${f#"$DIST_DIR/"}"
size=$(stat -f%z "$f" 2>/dev/null || stat -c%s "$f")
if command -v shasum >/dev/null 2>&1; then
hash=$(shasum -a 256 "$f" | awk '{print $1}')
else
hash=$(openssl dgst -sha256 -r "$f" | awk '{print $1}')
fi
[[ $first -eq 1 ]] || echo "," >> "$manifest"
first=0
printf " \"%s\": { \"bytes\": %s, \"sha256\": \"%s\" }" "$rel" "$size" "$hash" >> "$manifest"
done < <(find "$DIST_DIR" -type f ! -name "manifest.json" -print0 | sort -z)
echo "\n}" >> "$manifest"
fi
echo "📦 Checking bundle sizes ($OUTDIR)..."
if [ -d "$OUTDIR" ]; then
echo "Bundle sizes:"
find "$OUTDIR" -name "*.wasm" -exec ls -lh {} \; | awk '{print " WASM: " $5 " - " $9}'
find "$OUTDIR" -name "*.js" -exec ls -lh {} \; | awk '{print " JS: " $5 " - " $9}'
find "$OUTDIR" -name "*.css" -exec ls -lh {} \; | awk '{print " CSS: " $5 " - " $9}'
echo ""
fi

1
scripts/run.sh Executable file
View File

@@ -0,0 +1 @@
cargo run

8
scripts/test.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
# serve.sh - Build optimized WASM and serve with Caddy + Brotli compression
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
cargo check
cargo test

166
src/app.rs Normal file
View File

@@ -0,0 +1,166 @@
//! # Hero Supervisor Application
//!
//! Simplified supervisor application that wraps a built Supervisor instance.
//! Use SupervisorBuilder to construct the supervisor with all configuration,
//! then pass it to SupervisorApp for runtime management.
use crate::Supervisor;
use crate::openrpc::start_openrpc_servers;
use log::{info, error, debug};
use std::sync::Arc;
use tokio::sync::Mutex;
/// Main supervisor application
pub struct SupervisorApp {
pub supervisor: Supervisor,
pub bind_address: String,
pub port: u16,
}
impl SupervisorApp {
/// Create a new supervisor application with a built supervisor
pub fn new(supervisor: Supervisor, bind_address: String, port: u16) -> Self {
Self {
supervisor,
bind_address,
port,
}
}
/// Start the complete supervisor application
/// This method handles the entire application lifecycle:
/// - Starts all configured runners
/// - Launches the OpenRPC server
/// - Sets up graceful shutdown handling
/// - Keeps the application running
pub async fn start(&mut self) -> Result<(), Box<dyn std::error::Error>> {
info!("Starting Hero Supervisor Application");
// Start all configured runners
self.start_all().await?;
// Start OpenRPC server
self.start_openrpc_server().await?;
// Set up graceful shutdown
self.setup_graceful_shutdown().await;
// Keep the application running
info!("Supervisor is running. Press Ctrl+C to shutdown.");
self.run_main_loop().await;
Ok(())
}
/// Start the OpenRPC server
async fn start_openrpc_server(&self) -> Result<(), Box<dyn std::error::Error>> {
info!("Starting OpenRPC server...");
let supervisor_for_openrpc = Arc::new(Mutex::new(self.supervisor.clone()));
let bind_address = self.bind_address.clone();
let port = self.port;
// Start the OpenRPC server in a background task
let server_handle = tokio::spawn(async move {
if let Err(e) = start_openrpc_servers(supervisor_for_openrpc, &bind_address, port).await {
error!("OpenRPC server error: {}", e);
}
});
// Give the server a moment to start
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
info!("OpenRPC server started successfully");
// Store the handle for potential cleanup (we could add this to the struct if needed)
std::mem::forget(server_handle); // For now, let it run in background
Ok(())
}
/// Set up graceful shutdown handling
async fn setup_graceful_shutdown(&self) {
tokio::spawn(async move {
tokio::signal::ctrl_c().await.expect("Failed to listen for ctrl+c");
info!("Received shutdown signal");
std::process::exit(0);
});
}
/// Main application loop
async fn run_main_loop(&self) {
// Keep the main thread alive
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
}
/// Start all configured runners
pub async fn start_all(&mut self) -> Result<(), Box<dyn std::error::Error>> {
info!("Starting all runners");
let results = self.supervisor.start_all().await;
let mut failed_count = 0;
for (runner_id, result) in results {
match result {
Ok(_) => info!("Runner {} started successfully", runner_id),
Err(e) => {
error!("Failed to start runner {}: {}", runner_id, e);
failed_count += 1;
}
}
}
if failed_count == 0 {
info!("All runners started successfully");
} else {
error!("Failed to start {} runners", failed_count);
}
Ok(())
}
/// Stop all configured runners
pub async fn stop_all(&mut self, force: bool) -> Result<(), Box<dyn std::error::Error>> {
info!("Stopping all runners (force: {})", force);
let results = self.supervisor.stop_all(force).await;
let mut failed_count = 0;
for (runner_id, result) in results {
match result {
Ok(_) => info!("Runner {} stopped successfully", runner_id),
Err(e) => {
error!("Failed to stop runner {}: {}", runner_id, e);
failed_count += 1;
}
}
}
if failed_count == 0 {
info!("All runners stopped successfully");
} else {
error!("Failed to stop {} runners", failed_count);
}
Ok(())
}
/// Get status of all runners
pub async fn get_status(&self) -> Result<Vec<(String, String)>, Box<dyn std::error::Error>> {
debug!("Getting status of all runners");
let statuses = self.supervisor.get_all_runner_status().await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error>)?;
let status_strings: Vec<(String, String)> = statuses
.into_iter()
.map(|(runner_id, status)| {
let status_str = format!("{:?}", status);
(runner_id, status_str)
})
.collect();
Ok(status_strings)
}
}

327
src/client.rs Normal file
View File

@@ -0,0 +1,327 @@
//! Main supervisor implementation for managing multiple actor runners.
use chrono::Utc;
use redis::AsyncCommands;
use sal_service_manager::{ProcessManager, SimpleProcessManager, TmuxProcessManager};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex;
use crate::{runner::{LogInfo, Runner, RunnerConfig, RunnerError, RunnerResult, RunnerStatus}, JobError, job::JobStatus};
use crate::{job::Job};
#[cfg(feature = "admin")]
use supervisor_admin_server::{AdminSupervisor, RunnerConfigInfo, JobInfo};
/// Process manager type for a runner
#[derive(Debug, Clone)]
pub enum ProcessManagerType {
/// Simple process manager for direct process spawning
Simple,
/// Tmux process manager for session-based management
Tmux(String), // session name
}
/// Main supervisor that manages multiple runners
#[derive(Clone)]
pub struct Client {
redis_client: redis::Client,
/// Namespace for queue keys
namespace: String,
}
pub struct ClientBuilder {
/// Redis URL for connection
redis_url: String,
/// Namespace for queue keys
namespace: String,
}
impl ClientBuilder {
/// Create a new supervisor builder
pub fn new() -> Self {
Self {
redis_url: "redis://localhost:6379".to_string(),
namespace: "".to_string(),
}
}
/// Set the Redis URL
pub fn redis_url<S: Into<String>>(mut self, url: S) -> Self {
self.redis_url = url.into();
self
}
/// Set the namespace for queue keys
pub fn namespace<S: Into<String>>(mut self, namespace: S) -> Self {
self.namespace = namespace.into();
self
}
/// Build the supervisor
pub async fn build(self) -> RunnerResult<Client> {
// Create Redis client
let redis_client = redis::Client::open(self.redis_url.as_str())
.map_err(|e| RunnerError::ConfigError {
reason: format!("Invalid Redis URL: {}", e),
})?;
Ok(Client {
redis_client,
namespace: self.namespace,
})
}
}
impl Default for Client {
fn default() -> Self {
// Note: Default implementation creates an empty supervisor
// Use Supervisor::builder() for proper initialization
Self {
redis_client: redis::Client::open("redis://localhost:6379").unwrap(),
namespace: "".to_string(),
}
}
}
impl Client {
/// Create a new supervisor builder
pub fn builder() -> ClientBuilder {
ClientBuilder::new()
}
/// List all job IDs from Redis
pub async fn list_jobs(&self) -> RunnerResult<Vec<String>> {
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| RunnerError::RedisError { source: e })?;
let keys: Vec<String> = conn.keys(format!("{}:*", &self.jobs_key())).await?;
let job_ids: Vec<String> = keys
.into_iter()
.filter_map(|key| {
if key.starts_with(&format!("{}:", self.jobs_key())) {
key.strip_prefix(&format!("{}:", self.jobs_key()))
.map(|s| s.to_string())
} else {
None
}
})
.collect();
Ok(job_ids)
}
fn jobs_key(&self) -> String {
if self.namespace.is_empty() {
format!("job")
} else {
format!("{}:job", self.namespace)
}
}
pub fn job_key(&self, job_id: &str) -> String {
if self.namespace.is_empty() {
format!("job:{}", job_id)
} else {
format!("{}:job:{}", self.namespace, job_id)
}
}
pub fn job_reply_key(&self, job_id: &str) -> String {
if self.namespace.is_empty() {
format!("reply:{}", job_id)
} else {
format!("{}:reply:{}", self.namespace, job_id)
}
}
/// Set job error in Redis
pub async fn set_error(&self,
job_id: &str,
error: &str,
) -> Result<(), JobError> {
let job_key = self.job_key(job_id);
let now = Utc::now();
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
conn.hset_multiple(&job_key, &[
("error", error),
("status", JobStatus::Error.as_str()),
("updated_at", &now.to_rfc3339()),
]).await
.map_err(|e| JobError::Redis(e))?;
Ok(())
}
/// Set job status in Redis
pub async fn set_job_status(&self,
job_id: &str,
status: JobStatus,
) -> Result<(), JobError> {
let job_key = self.job_key(job_id);
let now = Utc::now();
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
conn.hset_multiple(&job_key, &[
("status", status.as_str()),
("updated_at", &now.to_rfc3339()),
]).await
.map_err(|e| JobError::Redis(e))?;
Ok(())
}
/// Get job status from Redis
pub async fn get_status(
&self,
job_id: &str,
) -> Result<JobStatus, JobError> {
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
let status_str: Option<String> = conn.hget(&self.job_key(job_id), "status").await?;
match status_str {
Some(s) => JobStatus::from_str(&s).ok_or_else(|| JobError::InvalidStatus(s)),
None => Err(JobError::NotFound(job_id.to_string())),
}
}
/// Delete job from Redis
pub async fn delete_from_redis(
&self,
job_id: &str,
) -> Result<(), JobError> {
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
let job_key = self.job_key(job_id);
let _: () = conn.del(&job_key).await?;
Ok(())
}
/// Store this job in Redis
pub async fn store_job_in_redis(&self, job: &Job) -> Result<(), JobError> {
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
let job_key = self.job_key(&job.id);
// Serialize the job data
let job_data = serde_json::to_string(job)?;
// Store job data in Redis hash
let _: () = conn.hset_multiple(&job_key, &[
("data", job_data),
("status", JobStatus::Dispatched.as_str().to_string()),
("created_at", job.created_at.to_rfc3339()),
("updated_at", job.updated_at.to_rfc3339()),
]).await?;
// Set TTL for the job (24 hours)
let _: () = conn.expire(&job_key, 86400).await?;
Ok(())
}
/// Load a job from Redis by ID
pub async fn load_job_from_redis(
&self,
job_id: &str,
) -> Result<Job, JobError> {
let job_key = self.job_key(job_id);
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
// Get job data from Redis
let job_data: Option<String> = conn.hget(&job_key, "data").await?;
match job_data {
Some(data) => {
let job: Job = serde_json::from_str(&data)?;
Ok(job)
}
None => Err(JobError::NotFound(job_id.to_string())),
}
}
/// Delete a job by ID
pub async fn delete_job(&mut self, job_id: &str) -> RunnerResult<()> {
use redis::AsyncCommands;
let mut conn = self.redis_client.get_multiplexed_async_connection().await
.map_err(|e| JobError:: Redis(e))?;
let job_key = self.job_key(job_id);
let deleted_count: i32 = conn.del(&job_key).await
.map_err(|e| RunnerError::QueueError {
actor_id: job_id.to_string(),
reason: format!("Failed to delete job: {}", e),
})?;
if deleted_count == 0 {
return Err(RunnerError::QueueError {
actor_id: job_id.to_string(),
reason: format!("Job '{}' not found or already deleted", job_id),
});
}
Ok(())
}
/// Set job result in Redis
pub async fn set_result(
&self,
job_id: &str,
result: &str,
) -> Result<(), JobError> {
let job_key = self.job_key(&job_id);
let now = Utc::now();
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
let _: () = conn.hset_multiple(&job_key, &[
("result", result),
("status", JobStatus::Finished.as_str()),
("updated_at", &now.to_rfc3339()),
]).await?;
Ok(())
}
/// Get job result from Redis
pub async fn get_result(
&self,
job_id: &str,
) -> Result<Option<String>, JobError> {
let job_key = self.job_key(job_id);
let mut conn = self.redis_client
.get_multiplexed_async_connection()
.await
.map_err(|e| JobError:: Redis(e))?;
let result: Option<String> = conn.hget(&job_key, "result").await?;
Ok(result)
}
}

220
src/job.rs Normal file
View File

@@ -0,0 +1,220 @@
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use uuid::Uuid;
use redis::AsyncCommands;
use thiserror::Error;
/// Job status enumeration
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum JobStatus {
Dispatched,
WaitingForPrerequisites,
Started,
Error,
Stopping,
Finished,
}
impl JobStatus {
pub fn as_str(&self) -> &'static str {
match self {
JobStatus::Dispatched => "dispatched",
JobStatus::WaitingForPrerequisites => "waiting_for_prerequisites",
JobStatus::Started => "started",
JobStatus::Error => "error",
JobStatus::Stopping => "stopping",
JobStatus::Finished => "finished",
}
}
pub fn from_str(s: &str) -> Option<Self> {
match s {
"dispatched" => Some(JobStatus::Dispatched),
"waiting_for_prerequisites" => Some(JobStatus::WaitingForPrerequisites),
"started" => Some(JobStatus::Started),
"error" => Some(JobStatus::Error),
"stopping" => Some(JobStatus::Stopping),
"finished" => Some(JobStatus::Finished),
_ => None,
}
}
}
/// Representation of a script execution request.
///
/// This structure contains all the information needed to execute a script
/// on a actor service, including the script content, dependencies, and metadata.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Job {
pub id: String,
pub caller_id: String,
pub context_id: String,
pub payload: String,
pub runner_name: String, // name of the runner to execute this job
pub executor: String, // name of the executor the runner will use to execute this job
pub timeout: Duration,
pub env_vars: HashMap<String, String>, // environment variables for script execution
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
/// Error types for job operations
#[derive(Error, Debug)]
pub enum JobError {
#[error("Redis error: {0}")]
Redis(#[from] redis::RedisError),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("Job not found: {0}")]
NotFound(String),
#[error("Invalid job status: {0}")]
InvalidStatus(String),
#[error("Timeout error: {0}")]
Timeout(String),
#[error("Invalid job data: {0}")]
InvalidData(String),
}
impl Job {
/// Create a new job with the given parameters
pub fn new(
caller_id: String,
context_id: String,
payload: String,
runner_name: String,
executor: String,
) -> Self {
let now = Utc::now();
Self {
id: Uuid::new_v4().to_string(),
caller_id,
context_id,
payload,
runner_name,
executor,
timeout: Duration::from_secs(300), // 5 minutes default
env_vars: HashMap::new(),
created_at: now,
updated_at: now,
}
}
}
/// Builder for constructing job execution requests.
pub struct JobBuilder {
caller_id: String,
context_id: String,
payload: String,
runner_name: String,
executor: String,
timeout: Duration,
env_vars: HashMap<String, String>,
}
impl JobBuilder {
pub fn new() -> Self {
Self {
caller_id: "".to_string(),
context_id: "".to_string(),
payload: "".to_string(),
runner_name: "".to_string(),
executor: "".to_string(),
timeout: Duration::from_secs(300), // 5 minutes default
env_vars: HashMap::new(),
}
}
/// Set the caller ID for this job
pub fn caller_id(mut self, caller_id: &str) -> Self {
self.caller_id = caller_id.to_string();
self
}
/// Set the context ID for this job
pub fn context_id(mut self, context_id: &str) -> Self {
self.context_id = context_id.to_string();
self
}
/// Set the payload (script content) for this job
pub fn payload(mut self, payload: &str) -> Self {
self.payload = payload.to_string();
self
}
/// Set the runner name for this job
pub fn runner_name(mut self, runner_name: &str) -> Self {
self.runner_name = runner_name.to_string();
self
}
/// Set the executor for this job
pub fn executor(mut self, executor: &str) -> Self {
self.executor = executor.to_string();
self
}
/// Set the timeout for job execution
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
/// Set a single environment variable
pub fn env_var(mut self, key: &str, value: &str) -> Self {
self.env_vars.insert(key.to_string(), value.to_string());
self
}
/// Set multiple environment variables from a HashMap
pub fn env_vars(mut self, env_vars: HashMap<String, String>) -> Self {
self.env_vars = env_vars;
self
}
/// Clear all environment variables
pub fn clear_env_vars(mut self) -> Self {
self.env_vars.clear();
self
}
/// Build the job
pub fn build(self) -> Result<Job, JobError> {
if self.caller_id.is_empty() {
return Err(JobError::InvalidData("caller_id is required".to_string()));
}
if self.context_id.is_empty() {
return Err(JobError::InvalidData("context_id is required".to_string()));
}
if self.payload.is_empty() {
return Err(JobError::InvalidData("payload is required".to_string()));
}
if self.runner_name.is_empty() {
return Err(JobError::InvalidData("runner_name is required".to_string()));
}
if self.executor.is_empty() {
return Err(JobError::InvalidData("executor is required".to_string()));
}
let mut job = Job::new(
self.caller_id,
self.context_id,
self.payload,
self.runner_name,
self.executor,
);
job.timeout = self.timeout;
job.env_vars = self.env_vars;
Ok(job)
}
}
impl Default for JobBuilder {
fn default() -> Self {
Self::new()
}
}

21
src/lib.rs Normal file
View File

@@ -0,0 +1,21 @@
//! Hero Supervisor - Actor management for the Hero ecosystem.
//!
//! See README.md for detailed documentation and usage examples.
pub mod runner;
pub mod supervisor;
pub mod job;
pub mod client;
pub mod app;
// OpenRPC server module
pub mod openrpc;
// Re-export main types for convenience
pub use runner::{
LogInfo, Runner, RunnerConfig, RunnerResult, RunnerStatus,
};
pub use sal_service_manager::{ProcessManager, SimpleProcessManager, TmuxProcessManager};
pub use supervisor::{Supervisor, SupervisorBuilder, ProcessManagerType};
pub use job::{Job, JobBuilder, JobStatus, JobError};
pub use app::SupervisorApp;

829
src/openrpc.rs Normal file
View File

@@ -0,0 +1,829 @@
//! OpenRPC server implementation.
use jsonrpsee::{
core::{RpcResult, async_trait},
proc_macros::rpc,
server::{Server, ServerHandle},
types::{ErrorObject, ErrorObjectOwned},
};
use tower_http::cors::{CorsLayer, Any};
use anyhow;
use log::{debug, info, error};
use crate::supervisor::Supervisor;
use crate::runner::{Runner, RunnerError};
use crate::job::Job;
use crate::ProcessManagerType;
use sal_service_manager::{ProcessStatus, LogInfo};
use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::fs;
use tokio::sync::Mutex;
/// Load OpenRPC specification from docs/openrpc.json
fn load_openrpc_spec() -> Result<serde_json::Value, Box<dyn std::error::Error>> {
// Try to find the openrpc.json file relative to the current working directory
let possible_paths = [
"docs/openrpc.json",
"../docs/openrpc.json",
"../../docs/openrpc.json",
"./supervisor/docs/openrpc.json",
];
for path in &possible_paths {
if let Ok(content) = fs::read_to_string(path) {
match serde_json::from_str(&content) {
Ok(spec) => {
debug!("Loaded OpenRPC specification from: {}", path);
return Ok(spec);
}
Err(e) => {
error!("Failed to parse OpenRPC JSON from {}: {}", path, e);
}
}
}
}
Err("Could not find or parse docs/openrpc.json".into())
}
/// Helper function to convert RunnerError to RPC error
fn runner_error_to_rpc_error(err: RunnerError) -> ErrorObject<'static> {
ErrorObject::owned(
-32603, // Internal error code
format!("Supervisor error: {}", err),
None::<()>,
)
}
/// Helper function to create invalid params error
fn invalid_params_error(msg: &str) -> ErrorObject<'static> {
ErrorObject::owned(
-32602, // Invalid params error code
format!("Invalid parameters: {}", msg),
None::<()>,
)
}
/// Request parameters for registering a new runner
#[derive(Debug, Deserialize, Serialize)]
pub struct RegisterRunnerParams {
pub secret: String,
pub name: String,
pub queue: String,
}
/// Request parameters for running a job
#[derive(Debug, Deserialize, Serialize)]
pub struct RunJobParams {
pub secret: String,
pub job: Job,
}
/// Request parameters for adding a new runner
#[derive(Debug, Deserialize, Serialize)]
pub struct AddRunnerParams {
pub actor_id: String,
pub binary_path: String,
pub db_path: String,
pub redis_url: String,
pub process_manager_type: String, // "simple" or "tmux"
pub tmux_session_name: Option<String>, // required if process_manager_type is "tmux"
}
/// Request parameters for queuing a job
#[derive(Debug, Deserialize, Serialize)]
pub struct QueueJobParams {
pub runner_name: String,
pub job: Job,
}
/// Request parameters for queue and wait operation
#[derive(Debug, Deserialize, Serialize)]
pub struct QueueAndWaitParams {
pub runner_name: String,
pub job: Job,
pub timeout_secs: u64,
}
/// Request parameters for getting runner logs
#[derive(Debug, Deserialize, Serialize)]
pub struct GetLogsParams {
pub actor_id: String,
pub lines: Option<usize>,
pub follow: bool,
}
/// Request parameters for adding secrets
#[derive(Debug, Deserialize, Serialize)]
pub struct AddSecretParams {
pub admin_secret: String,
pub secret_type: String, // "admin", "user", or "register"
pub secret_value: String,
}
/// Request parameters for removing secrets
#[derive(Debug, Deserialize, Serialize)]
pub struct RemoveSecretParams {
pub admin_secret: String,
pub secret_type: String, // "admin", "user", or "register"
pub secret_value: String,
}
/// Request parameters for listing secrets
#[derive(Debug, Deserialize, Serialize)]
pub struct ListSecretsParams {
pub admin_secret: String,
}
/// Serializable wrapper for ProcessStatus
#[derive(Debug, Serialize, Clone)]
pub enum ProcessStatusWrapper {
Running,
Stopped,
Starting,
Stopping,
Error(String),
}
impl From<ProcessStatus> for ProcessStatusWrapper {
fn from(status: ProcessStatus) -> Self {
match status {
ProcessStatus::Running => ProcessStatusWrapper::Running,
ProcessStatus::Stopped => ProcessStatusWrapper::Stopped,
ProcessStatus::Starting => ProcessStatusWrapper::Starting,
ProcessStatus::Stopping => ProcessStatusWrapper::Stopping,
ProcessStatus::Error(msg) => ProcessStatusWrapper::Error(msg),
}
}
}
// Note: RunnerStatus is just an alias for ProcessStatus, so we don't need a separate impl
/// Serializable wrapper for Runner
#[derive(Debug, Serialize, Clone)]
pub struct RunnerWrapper {
pub id: String,
pub name: String,
pub command: String,
pub redis_url: String,
}
impl From<&Runner> for RunnerWrapper {
fn from(runner: &Runner) -> Self {
RunnerWrapper {
id: runner.id.clone(),
name: runner.name.clone(),
command: runner.command.to_string_lossy().to_string(),
redis_url: runner.redis_url.clone(),
}
}
}
/// Serializable wrapper for LogInfo
#[derive(Debug, Serialize, Clone)]
pub struct LogInfoWrapper {
pub timestamp: String,
pub level: String,
pub message: String,
}
impl From<LogInfo> for LogInfoWrapper {
fn from(log: LogInfo) -> Self {
LogInfoWrapper {
timestamp: log.timestamp,
level: log.level,
message: log.message,
}
}
}
impl From<crate::runner::LogInfo> for LogInfoWrapper {
fn from(log: crate::runner::LogInfo) -> Self {
LogInfoWrapper {
timestamp: log.timestamp,
level: log.level,
message: log.message,
}
}
}
/// Response for runner status queries
#[derive(Debug, Serialize, Clone)]
pub struct RunnerStatusResponse {
pub actor_id: String,
pub status: ProcessStatusWrapper,
}
/// Response for supervisor info
#[derive(Debug, Serialize, Clone)]
pub struct SupervisorInfoResponse {
pub server_url: String,
pub admin_secrets_count: usize,
pub user_secrets_count: usize,
pub register_secrets_count: usize,
pub runners_count: usize,
}
/// OpenRPC trait defining all supervisor methods
#[rpc(server)]
pub trait SupervisorRpc {
/// Register a new runner with secret-based authentication
#[method(name = "register_runner")]
async fn register_runner(&self, params: RegisterRunnerParams) -> RpcResult<String>;
/// Create a job (fire-and-forget, non-blocking)
#[method(name = "create_job")]
async fn create_job(&self, params: RunJobParams) -> RpcResult<String>;
/// Run a job on the appropriate runner (blocking, returns result)
#[method(name = "run_job")]
async fn run_job(&self, params: RunJobParams) -> RpcResult<Option<String>>;
/// Remove a runner from the supervisor
#[method(name = "remove_runner")]
async fn remove_runner(&self, actor_id: String) -> RpcResult<()>;
/// List all runner IDs
#[method(name = "list_runners")]
async fn list_runners(&self) -> RpcResult<Vec<String>>;
/// Start a specific runner
#[method(name = "start_runner")]
async fn start_runner(&self, actor_id: String) -> RpcResult<()>;
/// Stop a specific runner
#[method(name = "stop_runner")]
async fn stop_runner(&self, actor_id: String, force: bool) -> RpcResult<()>;
/// Get a specific runner by ID
#[method(name = "get_runner")]
async fn get_runner(&self, actor_id: String) -> RpcResult<RunnerWrapper>;
/// Get the status of a specific runner
#[method(name = "get_runner_status")]
async fn get_runner_status(&self, actor_id: String) -> RpcResult<ProcessStatusWrapper>;
/// Get logs for a specific runner
#[method(name = "get_runner_logs")]
async fn get_runner_logs(&self, params: GetLogsParams) -> RpcResult<Vec<LogInfoWrapper>>;
/// Queue a job to a specific runner
#[method(name = "queue_job_to_runner")]
async fn queue_job_to_runner(&self, params: QueueJobParams) -> RpcResult<()>;
/// List all job IDs from Redis
#[method(name = "list_jobs")]
async fn list_jobs(&self) -> RpcResult<Vec<String>>;
/// Get a job by job ID
#[method(name = "get_job")]
async fn get_job(&self, job_id: String) -> RpcResult<Job>;
/// Ping a runner (dispatch a ping job)
#[method(name = "ping_runner")]
async fn ping_runner(&self, runner_id: String) -> RpcResult<String>;
/// Stop a job
#[method(name = "stop_job")]
async fn stop_job(&self, job_id: String) -> RpcResult<()>;
/// Delete a job
#[method(name = "delete_job")]
async fn delete_job(&self, job_id: String) -> RpcResult<()>;
/// Queue a job to a specific runner and wait for the result
#[method(name = "queue_and_wait")]
async fn queue_and_wait(&self, params: QueueAndWaitParams) -> RpcResult<Option<String>>;
/// Get status of all runners
#[method(name = "get_all_runner_status")]
async fn get_all_runner_status(&self) -> RpcResult<Vec<RunnerStatusResponse>>;
/// Start all runners
#[method(name = "start_all")]
async fn start_all(&self) -> RpcResult<Vec<(String, String)>>;
/// Stop all runners
#[method(name = "stop_all")]
async fn stop_all(&self, force: bool) -> RpcResult<Vec<(String, String)>>;
/// Get status of all runners (alternative format)
#[method(name = "get_all_status")]
async fn get_all_status(&self) -> RpcResult<Vec<(String, String)>>;
/// Add a secret to the supervisor (admin, user, or register)
#[method(name = "add_secret")]
async fn add_secret(&self, params: AddSecretParams) -> RpcResult<()>;
/// Remove a secret from the supervisor
#[method(name = "remove_secret")]
async fn remove_secret(&self, params: RemoveSecretParams) -> RpcResult<()>;
/// List all secrets (returns counts only for security)
#[method(name = "list_secrets")]
async fn list_secrets(&self, params: ListSecretsParams) -> RpcResult<SupervisorInfoResponse>;
/// List admin secrets (returns actual secret values)
#[method(name = "list_admin_secrets")]
async fn list_admin_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>>;
/// List user secrets (returns actual secret values)
#[method(name = "list_user_secrets")]
async fn list_user_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>>;
/// List register secrets (returns actual secret values)
#[method(name = "list_register_secrets")]
async fn list_register_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>>;
/// Get supervisor information and statistics
#[method(name = "get_supervisor_info")]
async fn get_supervisor_info(&self, admin_secret: String) -> RpcResult<SupervisorInfoResponse>;
/// OpenRPC discovery method - returns the OpenRPC document describing this API
#[method(name = "rpc.discover")]
async fn rpc_discover(&self) -> RpcResult<serde_json::Value>;
}
/// Helper function to parse process manager type from string
fn parse_process_manager_type(pm_type: &str, session_name: Option<String>) -> Result<ProcessManagerType, ErrorObject<'static>> {
match pm_type.to_lowercase().as_str() {
"simple" => Ok(ProcessManagerType::Simple),
"tmux" => {
let session = session_name.unwrap_or_else(|| "default_session".to_string());
Ok(ProcessManagerType::Tmux(session))
},
_ => Err(invalid_params_error(&format!(
"Invalid process manager type: {}. Must be 'simple' or 'tmux'",
pm_type
))),
}
}
/// Direct RPC implementation on Arc<Mutex<Supervisor>>
/// This eliminates the need for a wrapper struct
#[async_trait]
impl SupervisorRpcServer for Arc<Mutex<Supervisor>> {
async fn register_runner(&self, params: RegisterRunnerParams) -> RpcResult<String> {
debug!("OpenRPC request: register_runner with params: {:?}", params);
let mut supervisor = self.lock().await;
supervisor
.register_runner(&params.secret, &params.name, &params.queue)
.await
.map_err(runner_error_to_rpc_error)?;
// Return the runner name that was registered
Ok(params.name)
}
async fn create_job(&self, params: RunJobParams) -> RpcResult<String> {
debug!("OpenRPC request: create_job with params: {:?}", params);
let mut supervisor = self.lock().await;
let job_id = supervisor
.create_job(&params.secret, params.job)
.await
.map_err(runner_error_to_rpc_error)?;
Ok(job_id)
}
async fn run_job(&self, params: RunJobParams) -> RpcResult<Option<String>> {
debug!("OpenRPC request: run_job with params: {:?}", params);
let mut supervisor = self.lock().await;
supervisor
.run_job(&params.secret, params.job)
.await
.map_err(runner_error_to_rpc_error)
}
async fn remove_runner(&self, actor_id: String) -> RpcResult<()> {
debug!("OpenRPC request: remove_runner with actor_id: {}", actor_id);
let mut supervisor = self.lock().await;
supervisor
.remove_runner(&actor_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn list_runners(&self) -> RpcResult<Vec<String>> {
debug!("OpenRPC request: list_runners");
let supervisor = self.lock().await;
Ok(supervisor.list_runners().into_iter().map(|s| s.to_string()).collect())
}
async fn start_runner(&self, actor_id: String) -> RpcResult<()> {
debug!("OpenRPC request: start_runner with actor_id: {}", actor_id);
let mut supervisor = self.lock().await;
supervisor
.start_runner(&actor_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn stop_runner(&self, actor_id: String, force: bool) -> RpcResult<()> {
debug!("OpenRPC request: stop_runner with actor_id: {}, force: {}", actor_id, force);
let mut supervisor = self.lock().await;
supervisor
.stop_runner(&actor_id, force)
.await
.map_err(runner_error_to_rpc_error)
}
async fn get_runner(&self, actor_id: String) -> RpcResult<RunnerWrapper> {
debug!("OpenRPC request: get_runner with actor_id: {}", actor_id);
let supervisor = self.lock().await;
match supervisor.get_runner(&actor_id) {
Some(runner) => Ok(RunnerWrapper::from(runner)),
None => Err(ErrorObjectOwned::owned(-32000, format!("Runner not found: {}", actor_id), None::<()>)),
}
}
async fn get_runner_status(&self, actor_id: String) -> RpcResult<ProcessStatusWrapper> {
debug!("OpenRPC request: get_runner_status with actor_id: {}", actor_id);
let supervisor = self.lock().await;
let status = supervisor
.get_runner_status(&actor_id)
.await
.map_err(runner_error_to_rpc_error)?;
Ok(status.into())
}
async fn get_runner_logs(&self, params: GetLogsParams) -> RpcResult<Vec<LogInfoWrapper>> {
debug!("OpenRPC request: get_runner_logs with params: {:?}", params);
let supervisor = self.lock().await;
let logs = supervisor
.get_runner_logs(&params.actor_id, params.lines, params.follow)
.await
.map_err(runner_error_to_rpc_error)?;
Ok(logs.into_iter().map(LogInfoWrapper::from).collect())
}
async fn queue_job_to_runner(&self, params: QueueJobParams) -> RpcResult<()> {
debug!("OpenRPC request: queue_job_to_runner with params: {:?}", params);
let mut supervisor = self.lock().await;
supervisor
.queue_job_to_runner(&params.runner_name, params.job)
.await
.map_err(runner_error_to_rpc_error)
}
async fn list_jobs(&self) -> RpcResult<Vec<String>> {
debug!("OpenRPC request: list_jobs");
let supervisor = self.lock().await;
supervisor
.list_jobs()
.await
.map_err(runner_error_to_rpc_error)
}
async fn get_job(&self, job_id: String) -> RpcResult<Job> {
debug!("OpenRPC request: get_job with job_id: {}", job_id);
let supervisor = self.lock().await;
supervisor
.get_job(&job_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn ping_runner(&self, runner_id: String) -> RpcResult<String> {
debug!("OpenRPC request: ping_runner with runner_id: {}", runner_id);
let mut supervisor = self.lock().await;
supervisor
.ping_runner(&runner_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn stop_job(&self, job_id: String) -> RpcResult<()> {
debug!("OpenRPC request: stop_job with job_id: {}", job_id);
let mut supervisor = self.lock().await;
supervisor
.stop_job(&job_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn delete_job(&self, job_id: String) -> RpcResult<()> {
debug!("OpenRPC request: delete_job with job_id: {}", job_id);
let mut supervisor = self.lock().await;
supervisor
.delete_job(&job_id)
.await
.map_err(runner_error_to_rpc_error)
}
async fn queue_and_wait(&self, params: QueueAndWaitParams) -> RpcResult<Option<String>> {
debug!("OpenRPC request: queue_and_wait with params: {:?}", params);
let mut supervisor = self.lock().await;
supervisor
.queue_and_wait(&params.runner_name, params.job, params.timeout_secs)
.await
.map_err(runner_error_to_rpc_error)
}
async fn get_all_runner_status(&self) -> RpcResult<Vec<RunnerStatusResponse>> {
debug!("OpenRPC request: get_all_runner_status");
let supervisor = self.lock().await;
let statuses = supervisor.get_all_runner_status().await
.map_err(runner_error_to_rpc_error)?;
Ok(statuses
.into_iter()
.map(|(actor_id, status)| RunnerStatusResponse {
actor_id,
status: ProcessStatusWrapper::from(status),
})
.collect())
}
async fn start_all(&self) -> RpcResult<Vec<(String, String)>> {
debug!("OpenRPC request: start_all");
let mut supervisor = self.lock().await;
let results = supervisor.start_all().await;
Ok(results
.into_iter()
.map(|(actor_id, result)| {
let status = match result {
Ok(_) => "Success".to_string(),
Err(e) => format!("Error: {}", e),
};
(actor_id, status)
})
.collect())
}
async fn stop_all(&self, force: bool) -> RpcResult<Vec<(String, String)>> {
debug!("OpenRPC request: stop_all with force: {}", force);
let mut supervisor = self.lock().await;
let results = supervisor.stop_all(force).await;
Ok(results
.into_iter()
.map(|(actor_id, result)| {
let status = match result {
Ok(_) => "Success".to_string(),
Err(e) => format!("Error: {}", e),
};
(actor_id, status)
})
.collect())
}
async fn get_all_status(&self) -> RpcResult<Vec<(String, String)>> {
debug!("OpenRPC request: get_all_status");
let supervisor = self.lock().await;
let statuses = supervisor.get_all_runner_status().await
.map_err(runner_error_to_rpc_error)?;
Ok(statuses
.into_iter()
.map(|(actor_id, status)| {
let status_str = format!("{:?}", status);
(actor_id, status_str)
})
.collect())
}
async fn add_secret(&self, params: AddSecretParams) -> RpcResult<()> {
debug!("OpenRPC request: add_secret, type: {}", params.secret_type);
let mut supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&params.admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
match params.secret_type.as_str() {
"admin" => {
supervisor.add_admin_secret(params.secret_value);
}
"user" => {
supervisor.add_user_secret(params.secret_value);
}
"register" => {
supervisor.add_register_secret(params.secret_value);
}
_ => {
return Err(ErrorObject::owned(-32602, "Invalid secret type. Must be 'admin', 'user', or 'register'", None::<()>));
}
}
Ok(())
}
async fn remove_secret(&self, params: RemoveSecretParams) -> RpcResult<()> {
debug!("OpenRPC request: remove_secret, type: {}", params.secret_type);
let mut supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&params.admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
match params.secret_type.as_str() {
"admin" => {
supervisor.remove_admin_secret(&params.secret_value);
}
"user" => {
supervisor.remove_user_secret(&params.secret_value);
}
"register" => {
supervisor.remove_register_secret(&params.secret_value);
}
_ => {
return Err(ErrorObject::owned(-32602, "Invalid secret type. Must be 'admin', 'user', or 'register'", None::<()>));
}
}
Ok(())
}
async fn list_secrets(&self, params: ListSecretsParams) -> RpcResult<SupervisorInfoResponse> {
debug!("OpenRPC request: list_secrets");
let supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&params.admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
Ok(SupervisorInfoResponse {
server_url: "http://127.0.0.1:3030".to_string(),
admin_secrets_count: supervisor.admin_secrets_count(),
user_secrets_count: supervisor.user_secrets_count(),
register_secrets_count: supervisor.register_secrets_count(),
runners_count: supervisor.runners_count(),
})
}
async fn list_admin_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>> {
debug!("OpenRPC request: list_admin_secrets");
let supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
Ok(supervisor.get_admin_secrets())
}
async fn list_user_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>> {
debug!("OpenRPC request: list_user_secrets");
let supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
Ok(supervisor.get_user_secrets())
}
async fn list_register_secrets(&self, admin_secret: String) -> RpcResult<Vec<String>> {
debug!("OpenRPC request: list_register_secrets");
let supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
Ok(supervisor.get_register_secrets())
}
async fn get_supervisor_info(&self, admin_secret: String) -> RpcResult<SupervisorInfoResponse> {
debug!("OpenRPC request: get_supervisor_info");
let supervisor = self.lock().await;
// Verify admin secret
if !supervisor.has_admin_secret(&admin_secret) {
return Err(ErrorObject::owned(-32602, "Invalid admin secret", None::<()>));
}
Ok(SupervisorInfoResponse {
server_url: "http://127.0.0.1:3030".to_string(),
admin_secrets_count: supervisor.admin_secrets_count(),
user_secrets_count: supervisor.user_secrets_count(),
register_secrets_count: supervisor.register_secrets_count(),
runners_count: supervisor.runners_count(),
})
}
async fn rpc_discover(&self) -> RpcResult<serde_json::Value> {
debug!("OpenRPC request: rpc.discover");
// Read OpenRPC specification from docs/openrpc.json
match load_openrpc_spec() {
Ok(spec) => Ok(spec),
Err(e) => {
error!("Failed to load OpenRPC specification: {}", e);
// Fallback to a minimal spec if file loading fails
Ok(serde_json::json!({
"openrpc": "1.3.2",
"info": {
"title": "Hero Supervisor OpenRPC API",
"version": "1.0.0",
"description": "OpenRPC API for managing Hero Supervisor runners and jobs"
},
"methods": [],
"error": "Failed to load full specification"
}))
}
}
}
}
/// Start the OpenRPC server with a default supervisor
pub async fn start_server(addr: SocketAddr) -> anyhow::Result<ServerHandle> {
let supervisor = Arc::new(Mutex::new(Supervisor::default()));
start_server_with_supervisor(addr, supervisor).await
}
/// Start the OpenRPC server with an existing supervisor instance
pub async fn start_server_with_supervisor(
addr: SocketAddr,
supervisor: Arc<Mutex<Supervisor>>,
) -> anyhow::Result<ServerHandle> {
let server = Server::builder().build(addr).await?;
let handle = server.start(supervisor.into_rpc());
Ok(handle)
}
/// Start HTTP OpenRPC server (Unix socket support would require additional dependencies)
pub async fn start_http_openrpc_server(
supervisor: Arc<Mutex<Supervisor>>,
bind_address: &str,
port: u16,
) -> anyhow::Result<ServerHandle> {
let http_addr: SocketAddr = format!("{}:{}", bind_address, port).parse()?;
// Configure CORS to allow requests from the admin UI
let cors = CorsLayer::new()
.allow_origin(Any)
.allow_headers(Any)
.allow_methods(Any);
// Start HTTP server with CORS
let http_server = Server::builder()
.set_http_middleware(tower::ServiceBuilder::new().layer(cors))
.build(http_addr)
.await?;
let http_handle = http_server.start(supervisor.into_rpc());
info!("OpenRPC HTTP server running at http://{} with CORS enabled", http_addr);
Ok(http_handle)
}
/// Simplified server startup function for supervisor binary
pub async fn start_openrpc_servers(
supervisor: Arc<Mutex<Supervisor>>,
bind_address: &str,
port: u16,
) -> Result<(), Box<dyn std::error::Error>> {
let bind_address = bind_address.to_string();
tokio::spawn(async move {
match start_http_openrpc_server(supervisor, &bind_address, port).await {
Ok(http_handle) => {
info!("OpenRPC server started successfully");
// Keep the server running
http_handle.stopped().await;
error!("OpenRPC server stopped unexpectedly");
}
Err(e) => {
error!("Failed to start OpenRPC server: {}", e);
}
}
});
// Give the server a moment to start up
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_supervisor_rpc_creation() {
let _rpc = SupervisorRpcImpl::new();
// Just test that we can create the RPC implementation
}
#[cfg(feature = "openrpc")]
#[test]
fn test_process_manager_type_parsing() {
assert!(SupervisorRpcImpl::parse_process_manager_type("simple").is_ok());
assert!(SupervisorRpcImpl::parse_process_manager_type("tmux").is_ok());
assert!(SupervisorRpcImpl::parse_process_manager_type("Simple").is_ok());
assert!(SupervisorRpcImpl::parse_process_manager_type("TMUX").is_ok());
assert!(SupervisorRpcImpl::parse_process_manager_type("invalid").is_err());
}
}

234
src/runner.rs Normal file
View File

@@ -0,0 +1,234 @@
//! Runner implementation for actor process management.
use crate::job::{Job};
use log::{debug, info};
use redis::AsyncCommands;
use sal_service_manager::{ProcessManager, ProcessManagerError as ServiceProcessManagerError, ProcessStatus, ProcessConfig};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex;
/// Represents the current status of an actor/runner (alias for ProcessStatus)
pub type RunnerStatus = ProcessStatus;
/// Log information structure
#[derive(Debug, Clone)]
pub struct LogInfo {
pub timestamp: String,
pub level: String,
pub message: String,
}
/// Runner configuration and state (merged from RunnerConfig)
#[derive(Debug, Clone)]
pub struct Runner {
/// Unique identifier for the runner
pub id: String,
pub name: String,
pub namespace: String,
/// Path to the actor binary
pub command: PathBuf, // Command to run runner by, used only if supervisor is used to run runners
/// Redis URL for job queue
pub redis_url: String,
}
impl Runner {
/// Create a new runner from configuration
pub fn from_config(config: RunnerConfig) -> Self {
Self {
id: config.id,
name: config.name,
namespace: config.namespace,
command: config.command,
redis_url: config.redis_url,
}
}
/// Create a new runner with the given parameters
pub fn new(
id: String,
name: String,
namespace: String,
command: PathBuf,
redis_url: String,
) -> Self {
Self {
id,
name,
namespace,
command,
redis_url,
}
}
/// Get the queue key for this runner with the given namespace
pub fn get_queue(&self) -> String {
if self.namespace == "" {
format!("runner:{}", self.name)
} else {
format!("{}:runner:{}", self.namespace, self.name)
}
}
}
/// Result type for runner operations
pub type RunnerResult<T> = Result<T, RunnerError>;
/// Errors that can occur during runner operations
#[derive(Debug, thiserror::Error)]
pub enum RunnerError {
#[error("Actor '{actor_id}' not found")]
ActorNotFound { actor_id: String },
#[error("Actor '{actor_id}' is already running")]
ActorAlreadyRunning { actor_id: String },
#[error("Actor '{actor_id}' is not running")]
ActorNotRunning { actor_id: String },
#[error("Failed to start actor '{actor_id}': {reason}")]
StartupFailed { actor_id: String, reason: String },
#[error("Failed to stop actor '{actor_id}': {reason}")]
StopFailed { actor_id: String, reason: String },
#[error("Timeout waiting for actor '{actor_id}' to start")]
StartupTimeout { actor_id: String },
#[error("Job queue error for actor '{actor_id}': {reason}")]
QueueError { actor_id: String, reason: String },
#[error("Process manager error: {source}")]
ProcessManagerError {
#[from]
source: ServiceProcessManagerError,
},
#[error("Configuration error: {reason}")]
ConfigError { reason: String },
#[error("Invalid secret: {0}")]
InvalidSecret(String),
#[error("IO error: {source}")]
IoError {
#[from]
source: std::io::Error,
},
#[error("Redis error: {source}")]
RedisError {
#[from]
source: redis::RedisError,
},
#[error("Job error: {source}")]
JobError {
#[from]
source: crate::JobError,
},
}
/// Convert Runner to ProcessConfig
pub fn runner_to_process_config(config: &Runner) -> ProcessConfig {
ProcessConfig::new(config.id.clone(), config.command.clone())
.with_arg("--id".to_string())
.with_arg(config.id.clone())
.with_arg("--redis-url".to_string())
.with_arg(config.redis_url.clone())
}
// Type alias for backward compatibility
pub type RunnerConfig = Runner;
#[cfg(test)]
mod tests {
use super::*;
use sal_service_manager::{ProcessManagerError, SimpleProcessManager};
use std::collections::HashMap;
#[derive(Debug)]
struct MockProcessManager {
processes: HashMap<String, ProcessStatus>,
}
impl MockProcessManager {
fn new() -> Self {
Self {
processes: HashMap::new(),
}
}
}
#[async_trait::async_trait]
impl ProcessManager for MockProcessManager {
async fn start_process(&mut self, config: &ProcessConfig) -> Result<(), ProcessManagerError> {
self.processes.insert(config.id.clone(), ProcessStatus::Running);
Ok(())
}
async fn stop_process(&mut self, process_id: &str, _force: bool) -> Result<(), ProcessManagerError> {
self.processes.insert(process_id.to_string(), ProcessStatus::Stopped);
Ok(())
}
async fn process_status(&self, process_id: &str) -> Result<ProcessStatus, ProcessManagerError> {
Ok(self.processes.get(process_id).cloned().unwrap_or(ProcessStatus::Stopped))
}
async fn process_logs(&self, _process_id: &str, _lines: Option<usize>, _follow: bool) -> Result<Vec<LogInfo>, ProcessManagerError> {
Ok(vec![])
}
async fn health_check(&self) -> Result<(), ProcessManagerError> {
Ok(())
}
async fn list_processes(&self) -> Result<Vec<String>, ProcessManagerError> {
Ok(self.processes.keys().cloned().collect())
}
}
#[test]
fn test_runner_creation() {
let runner = Runner::new(
"test_actor".to_string(),
"test_runner".to_string(),
"".to_string(),
PathBuf::from("/path/to/binary"),
"redis://localhost:6379".to_string(),
);
assert_eq!(runner.id, "test_actor");
assert_eq!(runner.name, "test_runner");
assert_eq!(runner.command, PathBuf::from("/path/to/binary"));
assert_eq!(runner.redis_url, "redis://localhost:6379");
}
#[test]
fn test_runner_get_queue() {
let runner = Runner::new(
"test_actor".to_string(),
"test_runner".to_string(),
"".to_string(),
PathBuf::from("/path/to/binary"),
"redis://localhost:6379".to_string(),
);
let queue_key = runner.get_queue();
assert_eq!(queue_key, "runner:test_runner");
}
#[test]
fn test_runner_error_types() {
let error = RunnerError::ActorNotFound {
actor_id: "test".to_string(),
};
assert!(error.to_string().contains("test"));
let error = RunnerError::ActorAlreadyRunning {
actor_id: "test".to_string(),
};
assert!(error.to_string().contains("already running"));
}
}

777
src/supervisor.rs Normal file
View File

@@ -0,0 +1,777 @@
//! Main supervisor implementation for managing multiple actor runners.
use chrono::Utc;
use sal_service_manager::{ProcessManager, SimpleProcessManager, TmuxProcessManager};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex;
use crate::{client::{Client, ClientBuilder}, job::JobStatus, runner::{LogInfo, Runner, RunnerConfig, RunnerError, RunnerResult, RunnerStatus}, JobError};
use crate::{job::Job};
#[cfg(feature = "admin")]
use supervisor_admin_server::{AdminSupervisor, RunnerConfigInfo, JobInfo};
/// Process manager type for a runner
#[derive(Debug, Clone)]
pub enum ProcessManagerType {
/// Simple process manager for direct process spawning
Simple,
/// Tmux process manager for session-based management
Tmux(String), // session name
}
/// Main supervisor that manages multiple runners
#[derive(Clone)]
pub struct Supervisor {
/// Map of runner name to runner configuration
runners: HashMap<String, Runner>,
/// Shared process manager for all runners
process_manager: Arc<Mutex<dyn ProcessManager>>,
/// Shared Redis client for all runners
redis_client: redis::Client,
/// Namespace for queue keys
namespace: String,
/// Admin secrets for full access
admin_secrets: Vec<String>,
/// User secrets for limited access
user_secrets: Vec<String>,
/// Register secrets for runner registration
register_secrets: Vec<String>,
client: Client,
}
pub struct SupervisorBuilder {
/// Map of runner name to runner configuration
runners: HashMap<String, Runner>,
/// Redis URL for connection
redis_url: String,
/// Process manager type
process_manager_type: ProcessManagerType,
/// Namespace for queue keys
namespace: String,
/// Admin secrets for full access
admin_secrets: Vec<String>,
/// User secrets for limited access
user_secrets: Vec<String>,
/// Register secrets for runner registration
register_secrets: Vec<String>,
client_builder: ClientBuilder,
}
impl SupervisorBuilder {
/// Create a new supervisor builder
pub fn new() -> Self {
Self {
runners: HashMap::new(),
redis_url: "redis://localhost:6379".to_string(),
process_manager_type: ProcessManagerType::Simple,
namespace: "".to_string(),
admin_secrets: Vec::new(),
user_secrets: Vec::new(),
register_secrets: Vec::new(),
client_builder: ClientBuilder::new(),
}
}
/// Set the Redis URL
pub fn redis_url<S: Into<String>>(mut self, url: S) -> Self {
let url_string = url.into();
self.redis_url = url_string.clone();
self.client_builder = self.client_builder.redis_url(url_string);
self
}
/// Set the process manager type
pub fn process_manager(mut self, pm_type: ProcessManagerType) -> Self {
self.process_manager_type = pm_type;
self
}
/// Set the namespace for queue keys
pub fn namespace<S: Into<String>>(mut self, namespace: S) -> Self {
let namespace_string = namespace.into();
self.namespace = namespace_string.clone();
self.client_builder = self.client_builder.namespace(namespace_string);
self
}
/// Add an admin secret
pub fn add_admin_secret<S: Into<String>>(mut self, secret: S) -> Self {
self.admin_secrets.push(secret.into());
self
}
/// Add multiple admin secrets
pub fn admin_secrets<I, S>(mut self, secrets: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
self.admin_secrets.extend(secrets.into_iter().map(|s| s.into()));
self
}
/// Add a user secret
pub fn add_user_secret<S: Into<String>>(mut self, secret: S) -> Self {
self.user_secrets.push(secret.into());
self
}
/// Add multiple user secrets
pub fn user_secrets<I, S>(mut self, secrets: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
self.user_secrets.extend(secrets.into_iter().map(|s| s.into()));
self
}
/// Add a register secret
pub fn add_register_secret<S: Into<String>>(mut self, secret: S) -> Self {
self.register_secrets.push(secret.into());
self
}
/// Add multiple register secrets
pub fn register_secrets<I, S>(mut self, secrets: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
self.register_secrets.extend(secrets.into_iter().map(|s| s.into()));
self
}
/// Add a runner to the supervisor
pub fn add_runner(mut self, runner: Runner) -> Self {
self.runners.insert(runner.id.clone(), runner);
self
}
/// Build the supervisor
pub async fn build(self) -> RunnerResult<Supervisor> {
// Create process manager based on type
let process_manager: Arc<Mutex<dyn ProcessManager>> = match &self.process_manager_type {
ProcessManagerType::Simple => {
Arc::new(Mutex::new(SimpleProcessManager::new()))
}
ProcessManagerType::Tmux(session_name) => {
Arc::new(Mutex::new(TmuxProcessManager::new(session_name.clone())))
}
};
// Create Redis client
let redis_client = redis::Client::open(self.redis_url.as_str())
.map_err(|e| RunnerError::ConfigError {
reason: format!("Invalid Redis URL: {}", e),
})?;
Ok(Supervisor {
client: self.client_builder.build().await.unwrap(),
runners: self.runners,
process_manager,
redis_client,
namespace: self.namespace,
admin_secrets: self.admin_secrets,
user_secrets: self.user_secrets,
register_secrets: self.register_secrets,
})
}
}
impl Supervisor {
/// Create a new supervisor builder
pub fn builder() -> SupervisorBuilder {
SupervisorBuilder::new()
}
/// Add a new runner to the supervisor
pub async fn add_runner(
&mut self,
config: RunnerConfig,
) -> RunnerResult<()> {
// Runner is now just the config
let runner = Runner::from_config(config.clone());
self.runners.insert(config.id.clone(), runner);
Ok(())
}
/// Register a new runner with secret-based authentication
pub async fn register_runner(&mut self, secret: &str, name: &str, queue: &str) -> RunnerResult<()> {
// Check if the secret is valid (admin or register secret)
if !self.admin_secrets.contains(&secret.to_string()) &&
!self.register_secrets.contains(&secret.to_string()) {
return Err(RunnerError::InvalidSecret("Invalid secret for runner registration".to_string()));
}
// Create a basic runner config for the named runner
let config = RunnerConfig {
id: name.to_string(), // Use the provided name as actor_id
name: name.to_string(), // Use the provided name as actor_id
namespace: self.namespace.clone(),
command: PathBuf::from("/tmp/mock_runner"), // Default path
redis_url: "redis://localhost:6379".to_string(),
};
// Add the runner using existing logic
self.add_runner(config).await
}
/// Create a job (fire-and-forget, non-blocking) with secret-based authentication
pub async fn create_job(&mut self, secret: &str, job: crate::job::Job) -> RunnerResult<String> {
// Check if the secret is valid (admin or user secret)
if !self.admin_secrets.contains(&secret.to_string()) &&
!self.user_secrets.contains(&secret.to_string()) {
return Err(RunnerError::InvalidSecret("Invalid secret for job creation".to_string()));
}
// Find the runner by name
let runner_name = job.runner_name.clone();
let job_id = job.id.clone(); // Store job ID before moving job
if let Some(_runner) = self.runners.get(&runner_name) {
// Use the supervisor's queue_job_to_runner method (fire-and-forget)
self.queue_job_to_runner(&runner_name, job).await?;
Ok(job_id) // Return the job ID immediately
} else {
Err(RunnerError::ActorNotFound {
actor_id: job.runner_name.clone(),
})
}
}
/// Run a job on the appropriate runner with secret-based authentication
/// This is a synchronous operation that queues the job, waits for the result, and returns it
pub async fn run_job(&mut self, secret: &str, job: crate::job::Job) -> RunnerResult<Option<String>> {
// Check if the secret is valid (admin or user secret)
if !self.admin_secrets.contains(&secret.to_string()) &&
!self.user_secrets.contains(&secret.to_string()) {
return Err(RunnerError::InvalidSecret("Invalid secret for job execution".to_string()));
}
// Find the runner by name
let runner_name = job.runner_name.clone();
if let Some(_runner) = self.runners.get(&runner_name) {
// Use the synchronous queue_and_wait method with a reasonable timeout (30 seconds)
self.queue_and_wait(&runner_name, job, 30).await
} else {
Err(RunnerError::ActorNotFound {
actor_id: job.runner_name.clone(),
})
}
}
/// Remove a runner from the supervisor
pub async fn remove_runner(&mut self, actor_id: &str) -> RunnerResult<()> {
if let Some(_instance) = self.runners.remove(actor_id) {
// Runner is removed from the map, which will drop the Arc
// and eventually clean up the runner when no more references exist
}
Ok(())
}
/// Get a runner by actor ID
pub fn get_runner(&self, actor_id: &str) -> Option<&Runner> {
self.runners.get(actor_id)
}
/// Get a job by job ID from Redis
pub async fn get_job(&self, job_id: &str) -> RunnerResult<crate::job::Job> {
use redis::AsyncCommands;
let mut conn = self.redis_client.get_multiplexed_async_connection().await
.map_err(|e| RunnerError::RedisError {
source: e
})?;
self.client.load_job_from_redis(job_id).await
.map_err(|e| RunnerError::QueueError {
actor_id: job_id.to_string(),
reason: format!("Failed to load job: {}", e),
})
}
/// Ping a runner by dispatching a ping job to its queue
pub async fn ping_runner(&mut self, runner_id: &str) -> RunnerResult<String> {
use crate::job::{Job, JobBuilder};
use std::time::Duration;
// Check if runner exists
if !self.runners.contains_key(runner_id) {
return Err(RunnerError::ActorNotFound {
actor_id: runner_id.to_string(),
});
}
// Create a ping job
let ping_job = JobBuilder::new()
.caller_id("supervisor_ping")
.context_id("ping_context")
.payload("ping")
.runner_name(runner_id)
.executor("ping")
.timeout(Duration::from_secs(10))
.build()
.map_err(|e| RunnerError::QueueError {
actor_id: runner_id.to_string(),
reason: format!("Failed to create ping job: {}", e),
})?;
// Queue the ping job
let job_id = ping_job.id.clone();
self.queue_job_to_runner(runner_id, ping_job).await?;
Ok(job_id)
}
/// Stop a job by ID
pub async fn stop_job(&mut self, job_id: &str) -> RunnerResult<()> {
use redis::AsyncCommands;
// For now, we'll implement a basic stop by removing the job from Redis
// In a more sophisticated implementation, you might send a stop signal to the runner
let mut conn = self.redis_client.get_multiplexed_async_connection().await
.map_err(|e| RunnerError::QueueError {
actor_id: job_id.to_string(),
reason: format!("Failed to connect to Redis: {}", e),
})?;
let job_key = self.client.set_job_status(job_id, JobStatus::Stopping).await;
Ok(())
}
/// Delete a job by ID
pub async fn delete_job(&mut self, job_id: &str) -> RunnerResult<()> {
self.client.delete_job(&job_id).await
}
/// List all managed runners
pub fn list_runners(&self) -> Vec<&str> {
self.runners.keys().map(|s| s.as_str()).collect()
}
/// Start a specific runner
pub async fn start_runner(&mut self, actor_id: &str) -> RunnerResult<()> {
use crate::runner::runner_to_process_config;
use log::info;
if let Some(runner) = self.runners.get(actor_id) {
info!("Starting actor {}", runner.id);
let process_config = runner_to_process_config(runner);
let mut pm = self.process_manager.lock().await;
pm.start_process(&process_config).await?;
info!("Successfully started actor {}", runner.id);
Ok(())
} else {
Err(RunnerError::ActorNotFound {
actor_id: actor_id.to_string(),
})
}
}
/// Stop a specific runner
pub async fn stop_runner(&mut self, actor_id: &str, force: bool) -> RunnerResult<()> {
use log::info;
if let Some(runner) = self.runners.get(actor_id) {
info!("Stopping actor {}", runner.id);
let mut pm = self.process_manager.lock().await;
pm.stop_process(&runner.id, force).await?;
info!("Successfully stopped actor {}", runner.id);
Ok(())
} else {
Err(RunnerError::ActorNotFound {
actor_id: actor_id.to_string(),
})
}
}
/// Get status of a specific runner
pub async fn get_runner_status(&self, actor_id: &str) -> RunnerResult<RunnerStatus> {
if let Some(runner) = self.runners.get(actor_id) {
let pm = self.process_manager.lock().await;
let status = pm.process_status(&runner.id).await?;
Ok(status)
} else {
Err(RunnerError::ActorNotFound {
actor_id: actor_id.to_string(),
})
}
}
/// Get logs from a specific runner
pub async fn get_runner_logs(
&self,
actor_id: &str,
lines: Option<usize>,
follow: bool,
) -> RunnerResult<Vec<LogInfo>> {
if let Some(runner) = self.runners.get(actor_id) {
let pm = self.process_manager.lock().await;
let logs = pm.process_logs(&runner.id, lines, follow).await?;
// Convert sal_service_manager::LogInfo to our LogInfo
let converted_logs = logs.into_iter().map(|log| LogInfo {
timestamp: log.timestamp,
level: log.level,
message: log.message,
}).collect();
Ok(converted_logs)
} else {
Err(RunnerError::ActorNotFound {
actor_id: actor_id.to_string(),
})
}
}
/// Queue a job to a specific runner by name
pub async fn queue_job_to_runner(&mut self, runner_name: &str, job: crate::job::Job) -> RunnerResult<()> {
use redis::AsyncCommands;
use log::{debug, info};
if let Some(runner) = self.runners.get(runner_name) {
debug!("Queuing job {} for actor {}", job.id, runner.id);
let mut conn = self.redis_client.get_multiplexed_async_connection().await
.map_err(|e| RunnerError::QueueError {
actor_id: runner.id.clone(),
reason: format!("Failed to connect to Redis: {}", e),
})?;
// Store the job in Redis first
self.client.store_job_in_redis(&job).await
.map_err(|e| RunnerError::QueueError {
actor_id: runner.id.clone(),
reason: format!("Failed to store job: {}", e),
})?;
// Use the runner's get_queue method with our namespace
let queue_key = runner.get_queue();
let _: () = conn.lpush(&queue_key, &job.id).await
.map_err(|e| RunnerError::QueueError {
actor_id: runner.id.clone(),
reason: format!("Failed to queue job: {}", e),
})?;
info!("Job {} queued successfully for actor {} on queue {}", job.id, runner.id, queue_key);
Ok(())
} else {
Err(RunnerError::ActorNotFound {
actor_id: runner_name.to_string(),
})
}
}
/// Queue a job to a specific runner and wait for the result
/// This implements the proper Hero job protocol:
/// 1. Queue the job to the runner
/// 2. BLPOP on the reply queue for this job
/// 3. Get the job result from the job hash
/// 4. Return the complete result
pub async fn queue_and_wait(&mut self, runner_name: &str, job: crate::job::Job, timeout_secs: u64) -> RunnerResult<Option<String>> {
use redis::AsyncCommands;
let job_id = job.id.clone();
// First queue the job
self.queue_job_to_runner(runner_name, job).await?;
// Get Redis connection from the supervisor (shared Redis client)
let _runner = self.runners.get(runner_name)
.ok_or_else(|| RunnerError::ActorNotFound {
actor_id: runner_name.to_string(),
})?;
let mut conn = self.redis_client.get_multiplexed_async_connection().await
.map_err(|e| RunnerError::RedisError {
source: e
})?;
// BLPOP on the reply queue for this specific job
let reply_key = self.client.job_reply_key(&job_id);
let result: Option<Vec<String>> = conn.blpop(&reply_key, timeout_secs as f64).await
.map_err(|e| RunnerError::RedisError {
source: e
})?;
match result {
Some(reply_data) => {
// Reply received, now get the job result from the job hash
let job_key = self.client.job_key(&job_id);
let job_result: Option<String> = conn.hget(&job_key, "result").await
.map_err(|e| RunnerError::RedisError {
source: e
})?;
Ok(job_result)
}
None => {
// Timeout occurred
Ok(None)
}
}
}
/// Get status of all runners
pub async fn get_all_runner_status(&self) -> RunnerResult<Vec<(String, RunnerStatus)>> {
let mut results = Vec::new();
for (actor_id, instance) in &self.runners {
match self.get_runner_status(actor_id).await {
Ok(status) => results.push((actor_id.clone(), status)),
Err(_) => {
use sal_service_manager::ProcessStatus;
results.push((actor_id.clone(), ProcessStatus::Stopped));
}
}
}
Ok(results)
}
/// Start all runners
pub async fn start_all(&mut self) -> Vec<(String, RunnerResult<()>)> {
let mut results = Vec::new();
let actor_ids: Vec<String> = self.runners.keys().cloned().collect();
for actor_id in actor_ids {
let result = self.start_runner(&actor_id).await;
results.push((actor_id, result));
}
results
}
/// Stop all runners
pub async fn stop_all(&mut self, force: bool) -> Vec<(String, RunnerResult<()>)> {
let mut results = Vec::new();
let actor_ids: Vec<String> = self.runners.keys().cloned().collect();
for actor_id in actor_ids {
let result = self.stop_runner(&actor_id, force).await;
results.push((actor_id, result));
}
results
}
/// Get status of all runners
pub async fn get_all_status(&self) -> Vec<(String, RunnerResult<RunnerStatus>)> {
let mut results = Vec::new();
for (actor_id, _instance) in &self.runners {
let result = self.get_runner_status(actor_id).await;
results.push((actor_id.clone(), result));
}
results
}
/// Add an admin secret
pub fn add_admin_secret(&mut self, secret: String) {
if !self.admin_secrets.contains(&secret) {
self.admin_secrets.push(secret);
}
}
/// Remove an admin secret
pub fn remove_admin_secret(&mut self, secret: &str) -> bool {
if let Some(pos) = self.admin_secrets.iter().position(|x| x == secret) {
self.admin_secrets.remove(pos);
true
} else {
false
}
}
/// Check if admin secret exists
pub fn has_admin_secret(&self, secret: &str) -> bool {
self.admin_secrets.contains(&secret.to_string())
}
/// Get admin secrets count
pub fn admin_secrets_count(&self) -> usize {
self.admin_secrets.len()
}
/// Add a user secret
pub fn add_user_secret(&mut self, secret: String) {
if !self.user_secrets.contains(&secret) {
self.user_secrets.push(secret);
}
}
/// Remove a user secret
pub fn remove_user_secret(&mut self, secret: &str) -> bool {
if let Some(pos) = self.user_secrets.iter().position(|x| x == secret) {
self.user_secrets.remove(pos);
true
} else {
false
}
}
/// Check if user secret exists
pub fn has_user_secret(&self, secret: &str) -> bool {
self.user_secrets.contains(&secret.to_string())
}
/// Get user secrets count
pub fn user_secrets_count(&self) -> usize {
self.user_secrets.len()
}
/// Add a register secret
pub fn add_register_secret(&mut self, secret: String) {
if !self.register_secrets.contains(&secret) {
self.register_secrets.push(secret);
}
}
/// Remove a register secret
pub fn remove_register_secret(&mut self, secret: &str) -> bool {
if let Some(pos) = self.register_secrets.iter().position(|x| x == secret) {
self.register_secrets.remove(pos);
true
} else {
false
}
}
/// Check if register secret exists
pub fn has_register_secret(&self, secret: &str) -> bool {
self.register_secrets.contains(&secret.to_string())
}
/// Get register secrets count
pub fn register_secrets_count(&self) -> usize {
self.register_secrets.len()
}
/// List all job IDs from Redis
pub async fn list_jobs(&self) -> RunnerResult<Vec<String>> {
self.client.list_jobs().await
}
/// Get runners count
pub fn runners_count(&self) -> usize {
self.runners.len()
}
/// Get admin secrets (returns cloned vector for security)
pub fn get_admin_secrets(&self) -> Vec<String> {
self.admin_secrets.clone()
}
/// Get user secrets (returns cloned vector for security)
pub fn get_user_secrets(&self) -> Vec<String> {
self.user_secrets.clone()
}
/// Get register secrets (returns cloned vector for security)
pub fn get_register_secrets(&self) -> Vec<String> {
self.register_secrets.clone()
}
}
impl Default for Supervisor {
fn default() -> Self {
// Note: Default implementation creates an empty supervisor
// Use Supervisor::builder() for proper initialization
Self {
runners: HashMap::new(),
process_manager: Arc::new(Mutex::new(SimpleProcessManager::new())),
redis_client: redis::Client::open("redis://localhost:6379").unwrap(),
namespace: "".to_string(),
admin_secrets: Vec::new(),
user_secrets: Vec::new(),
register_secrets: Vec::new(),
client: Client::default(),
}
}
}
mod tests {
use super::*;
use std::path::PathBuf;
use sal_service_manager::SimpleProcessManager;
#[tokio::test]
async fn test_supervisor_creation() {
let supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 0);
}
#[tokio::test]
async fn test_add_runner() {
use std::path::PathBuf;
let config = RunnerConfig::new(
"test_actor".to_string(),
"test_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/test_actor"),
"redis://localhost:6379".to_string(),
);
let runner = Runner::from_config(config.clone());
let mut supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.add_runner(runner)
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 1);
}
#[tokio::test]
async fn test_add_multiple_runners() {
use std::path::PathBuf;
let config1 = RunnerConfig::new(
"sal_actor".to_string(),
"sal_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/sal_actor"),
"redis://localhost:6379".to_string(),
);
let config2 = RunnerConfig::new(
"osis_actor".to_string(),
"osis_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/osis_actor"),
"redis://localhost:6379".to_string(),
);
let runner1 = Runner::from_config(config1);
let runner2 = Runner::from_config(config2);
let supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.add_runner(runner1)
.add_runner(runner2)
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 2);
assert!(supervisor.get_runner("sal_actor").is_some());
assert!(supervisor.get_runner("osis_actor").is_some());
}
}