Compare commits

..

8 Commits

Author SHA1 Message Date
Timur Gordon
43ad9b60aa Fix auth_verify to accept admin/user/register secrets directly
- Check secrets (admin_secrets, user_secrets, register_secrets) before API keys
- Allow UI to authenticate with the secrets provided in .env
- Secrets now work as expected for authentication
- API keys still supported as fallback for backward compatibility
2025-11-07 00:33:09 +01:00
Timur Gordon
43750b32d7 Fix Admin UI status detection to catch port binding errors
- Increase sleep time to 2 seconds to allow error detection
- Check log file for 'Address already in use' error
- Show helpful message to run --kill-ports when port is in use
- Kill the failed process to prevent zombie processes
2025-11-07 00:26:07 +01:00
Timur Gordon
4f4d1c0832 Add --kill-ports flag and improve summary output
- Added --kill-ports flag to kill processes on supervisor and admin UI ports
- Simplified summary: show Admin UI status inline (green URL or red error)
- Combined log viewing into single command
- Removed redundant error message during startup
2025-11-07 00:24:46 +01:00
Timur Gordon
0df5956575 Improve run script output with better error messages and summary
- Show helpful error message when Admin UI fails with log location
- Add summary section with all URLs and log file locations
- Better visual separation with dividers
- Clear instructions for viewing logs
2025-11-07 00:21:30 +01:00
Timur Gordon
278ba18d20 Add support for pre-configured runners from environment variables
- Added RUNNERS env var to .env.example (comma-separated runner names)
- Updated supervisor binary to accept --runners flag
- Runners are automatically registered on startup using first admin secret
- Updated run.sh to pass RUNNERS from env to supervisor binary
2025-11-07 00:15:22 +01:00
Timur Gordon
609af6ec15 Refactor supervisor to use environment variables and simplify binary
- Created scripts/generate_secret.sh to generate supervisor secrets
- Added .env.example with all configuration options
- Created scripts/environment.sh to load env vars from .env
- Updated scripts/run.sh to use env vars and pass as flags
- Simplified supervisor binary:
  - Removed bootstrap-admin-key and config file support
  - Made admin-secret required
  - Minimal output (only URLs)
  - Clean startup with no verbose logging
- Updated .gitignore for .env and log files
2025-11-07 00:08:32 +01:00
Timur Gordon
8a02fffcca Update build/test scripts with clean progress indicators and minimal output 2025-11-06 23:55:17 +01:00
Timur Gordon
bbced35996 Update build and test scripts to handle all three crates (core, client, ui) 2025-11-06 23:40:09 +01:00
21 changed files with 427 additions and 1839 deletions

23
.env.example Normal file
View File

@@ -0,0 +1,23 @@
# Hero Supervisor Configuration
# Redis connection URL
REDIS_URL=redis://127.0.0.1:6379
# OpenRPC Server Configuration
BIND_ADDRESS=127.0.0.1
PORT=3030
# Authentication Secrets (generate with: ./scripts/generate_secret.sh)
# At least one admin secret is required
ADMIN_SECRETS=your_admin_secret_here
# Optional: Additional secrets for different access levels
# USER_SECRETS=user_secret_1,user_secret_2
# REGISTER_SECRETS=register_secret_1
# Optional: Pre-configured runners (comma-separated names)
# These runners will be automatically registered on startup
# RUNNERS=runner1,runner2,runner3
# Optional: Mycelium network URL (requires mycelium feature)
# MYCELIUM_URL=http://127.0.0.1:8989

2
.gitignore vendored
View File

@@ -1,2 +1,4 @@
target
.bin
.env
/tmp/supervisor-*.log

747
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,12 @@
[workspace]
members = ["core", "client", "ui"]
members = ["core", "client"]
exclude = ["ui"]
resolver = "2"
[workspace.package]
version = "0.1.0"
edition = "2021"
# Note: The UI crate is excluded from the workspace because it targets wasm32-unknown-unknown
# and requires WASM-specific features. Build it separately with: cd ui && trunk build

View File

@@ -1,78 +0,0 @@
/// Generate test secp256k1 keypairs for supervisor authentication testing
///
/// Run with: cargo run --example generate_test_keys
use secp256k1::{Secp256k1, SecretKey, PublicKey};
use hex;
fn main() {
let secp = Secp256k1::new();
println!("\n╔════════════════════════════════════════════════════════════╗");
println!("║ Test Keypairs for Supervisor Auth ║");
println!("╚════════════════════════════════════════════════════════════╝\n");
println!("⚠️ WARNING: These are TEST keypairs only! Never use in production!\n");
// Generate 5 keypairs with simple private keys for testing
let test_keys = vec![
("Alice (Admin)", "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"),
("Bob (User)", "fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
("Charlie (Register)", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
("Dave (Test)", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
("Eve (Test)", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
];
for (i, (name, privkey_hex)) in test_keys.iter().enumerate() {
println!("## Keypair {} - {}", i + 1, name);
println!("─────────────────────────────────────────────────────────────");
// Parse private key
let privkey_bytes = hex::decode(privkey_hex).expect("Invalid hex");
let secret_key = SecretKey::from_slice(&privkey_bytes).expect("Invalid private key");
// Derive public key
let public_key = PublicKey::from_secret_key(&secp, &secret_key);
// Serialize keys
let pubkey_uncompressed = hex::encode(public_key.serialize_uncompressed());
let pubkey_compressed = hex::encode(public_key.serialize());
println!("Private Key (hex): 0x{}", privkey_hex);
println!("Public Key (uncomp): 0x{}", pubkey_uncompressed);
println!("Public Key (comp): 0x{}", pubkey_compressed);
println!();
}
println!("\n╔════════════════════════════════════════════════════════════╗");
println!("║ Usage Examples ║");
println!("╚════════════════════════════════════════════════════════════╝\n");
println!("### Using with OpenRPC Client (Rust)\n");
println!("```rust");
println!("use secp256k1::{{Secp256k1, SecretKey}};");
println!("use hex;");
println!();
println!("// Alice's private key for admin access");
println!("let privkey_hex = \"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\";");
println!("let privkey_bytes = hex::decode(privkey_hex).unwrap();");
println!("let secret_key = SecretKey::from_slice(&privkey_bytes).unwrap();");
println!();
println!("// Use with client");
println!("let client = SupervisorClient::new_with_keypair(");
println!(" \"http://127.0.0.1:3030\",");
println!(" secret_key");
println!(");");
println!("```\n");
println!("### Testing Different Scopes\n");
println!("1. **Admin Scope** - Use Alice's keypair for full admin access");
println!("2. **User Scope** - Use Bob's keypair for limited user access");
println!("3. **Register Scope** - Use Charlie's keypair for runner registration\n");
println!("### Quick Copy-Paste Keys\n");
println!("Alice (Admin): 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef");
println!("Bob (User): fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321");
println!("Charlie (Reg): aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
println!("Dave (Test): bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb");
println!("Eve (Test): cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\n");
}

View File

@@ -630,347 +630,3 @@ impl SupervisorClient {
Ok(info)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_client_creation() {
let client = SupervisorClient::new("http://127.0.0.1:3030");
assert!(client.is_ok());
let client = client.unwrap();
assert_eq!(client.server_url(), "http://127.0.0.1:3030");
}
#[test]
fn test_job_builder() {
let job = JobBuilder::new()
.caller_id("test_client")
.context_id("test_context")
.payload("print('Hello, World!');")
.executor("osis")
.runner("test_runner")
.timeout(60)
.env_var("TEST_VAR", "test_value")
.build();
assert!(job.is_ok());
let job = job.unwrap();
assert_eq!(job.caller_id, "test_client");
assert_eq!(job.context_id, "test_context");
assert_eq!(job.payload, "print('Hello, World!');");
assert_eq!(job.executor, "osis");
assert_eq!(job.runner, "test_runner");
assert_eq!(job.timeout, 60);
assert_eq!(job.env_vars.get("TEST_VAR"), Some(&"test_value".to_string()));
}
#[test]
fn test_job_builder_validation() {
// Missing caller_id
let result = JobBuilder::new()
.context_id("test")
.payload("test")
.runner("test")
.build();
assert!(result.is_err());
// Missing context_id
let result = JobBuilder::new()
.caller_id("test")
.payload("test")
.runner("test")
.build();
assert!(result.is_err());
// Missing payload
let result = JobBuilder::new()
.caller_id("test")
.context_id("test")
.runner("test")
.executor("test")
.build();
assert!(result.is_err());
// Missing runner
let result = JobBuilder::new()
.caller_id("test")
.context_id("test")
.payload("test")
.executor("test")
.build();
assert!(result.is_err());
// Missing executor
let result = JobBuilder::new()
.caller_id("test")
.context_id("test")
.payload("test")
.runner("test")
.build();
assert!(result.is_err());
}
}
#[cfg(test)]
mod client_tests {
use super::*;
#[cfg(not(target_arch = "wasm32"))]
mod native_tests {
use super::*;
#[test]
fn test_client_creation() {
let client = SupervisorClient::new("http://localhost:3030");
assert!(client.is_ok());
let client = client.unwrap();
assert_eq!(client.server_url(), "http://localhost:3030");
}
#[test]
fn test_client_creation_invalid_url() {
let client = SupervisorClient::new("invalid-url");
// HTTP client builder validates URLs and should fail on invalid ones
assert!(client.is_err());
}
#[test]
fn test_process_status_wrapper_serialization() {
let status = ProcessStatusWrapper::Running;
let serialized = serde_json::to_string(&status).unwrap();
assert_eq!(serialized, "\"Running\"");
let status = ProcessStatusWrapper::Error("test error".to_string());
let serialized = serde_json::to_string(&status).unwrap();
assert!(serialized.contains("Error"));
assert!(serialized.contains("test error"));
}
#[test]
fn test_log_info_wrapper_serialization() {
let log = LogInfoWrapper {
timestamp: "2023-01-01T00:00:00Z".to_string(),
level: "INFO".to_string(),
message: "test message".to_string(),
};
let serialized = serde_json::to_string(&log).unwrap();
assert!(serialized.contains("2023-01-01T00:00:00Z"));
assert!(serialized.contains("INFO"));
assert!(serialized.contains("test message"));
let deserialized: LogInfoWrapper = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized.timestamp, log.timestamp);
assert_eq!(deserialized.level, log.level);
assert_eq!(deserialized.message, log.message);
}
#[test]
fn test_runner_type_serialization() {
let runner_type = RunnerType::SALRunner;
let serialized = serde_json::to_string(&runner_type).unwrap();
assert_eq!(serialized, "\"SALRunner\"");
let deserialized: RunnerType = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized, RunnerType::SALRunner);
}
#[test]
fn test_job_type_conversion() {
assert_eq!(JobType::SAL, JobType::SAL);
assert_eq!(JobType::OSIS, JobType::OSIS);
assert_eq!(JobType::V, JobType::V);
assert_eq!(JobType::Python, JobType::Python);
}
#[test]
fn test_job_status_serialization() {
let status = JobStatus::Started;
let serialized = serde_json::to_string(&status).unwrap();
assert_eq!(serialized, "\"Started\"");
let deserialized: JobStatus = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized, JobStatus::Started);
}
}
#[cfg(target_arch = "wasm32")]
mod wasm_tests {
use super::*;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn test_wasm_client_creation() {
let client = crate::wasm::WasmSupervisorClient::new("http://localhost:3030".to_string());
assert_eq!(client.server_url(), "http://localhost:3030");
}
#[wasm_bindgen_test]
fn test_wasm_job_creation() {
let job = crate::wasm::hero_job::Job::new(
"test-id".to_string(),
"test payload".to_string(),
"SAL".to_string(),
"test-runner".to_string(),
);
assert_eq!(job.id(), "test-id");
assert_eq!(job.payload(), "test payload");
assert_eq!(job.job_type(), "SAL");
assert_eq!(job.runner(), "test-runner");
assert_eq!(job.caller_id(), "wasm_client");
assert_eq!(job.context_id(), "wasm_context");
assert_eq!(job.timeout_secs(), 30);
}
#[wasm_bindgen_test]
fn test_wasm_job_setters() {
let mut job = crate::wasm::hero_job::Job::new(
"test-id".to_string(),
"test payload".to_string(),
"SAL".to_string(),
"test-runner".to_string(),
);
job.set_caller_id("custom-caller".to_string());
job.set_context_id("custom-context".to_string());
job.set_timeout_secs(60);
job.set_env_vars("{\"KEY\":\"VALUE\"}".to_string());
assert_eq!(job.caller_id(), "custom-caller");
assert_eq!(job.context_id(), "custom-context");
assert_eq!(job.timeout_secs(), 60);
assert_eq!(job.env_vars(), "{\"KEY\":\"VALUE\"}");
}
#[wasm_bindgen_test]
fn test_wasm_job_id_generation() {
let mut job = crate::wasm::hero_job::Job::new(
"original-id".to_string(),
"test payload".to_string(),
"SAL".to_string(),
"test-runner".to_string(),
);
let original_id = job.id();
job.generate_id();
let new_id = job.id();
assert_ne!(original_id, new_id);
assert!(new_id.len() > 0);
}
#[wasm_bindgen_test]
fn test_create_job_function() {
let job = crate::wasm::create_job(
"func-test-id".to_string(),
"func test payload".to_string(),
"OSIS".to_string(),
"func-test-runner".to_string(),
);
assert_eq!(job.id(), "func-test-id");
assert_eq!(job.payload(), "func test payload");
assert_eq!(job.job_type(), "OSIS");
assert_eq!(job.runner(), "func-test-runner");
}
#[wasm_bindgen_test]
fn test_wasm_job_type_enum() {
use crate::wasm::hero_job::JobType;
// Test that enum variants exist and can be created
let sal = hero_job::JobType::SAL;
let osis = hero_job::JobType::OSIS;
let v = hero_job::JobType::V;
// Test equality
assert_eq!(sal, hero_job::JobType::SAL);
assert_eq!(osis, hero_job::JobType::OSIS);
assert_eq!(v, hero_job::JobType::V);
// Test inequality
assert_ne!(sal, osis);
assert_ne!(osis, v);
assert_ne!(v, sal);
}
}
// Common tests that work on both native and WASM
#[test]
fn test_process_status_wrapper_variants() {
let running = ProcessStatusWrapper::Running;
let stopped = ProcessStatusWrapper::Stopped;
let starting = ProcessStatusWrapper::Starting;
let stopping = ProcessStatusWrapper::Stopping;
let error = ProcessStatusWrapper::Error("test".to_string());
// Test that all variants can be created
assert_eq!(running, ProcessStatusWrapper::Running);
assert_eq!(stopped, ProcessStatusWrapper::Stopped);
assert_eq!(starting, ProcessStatusWrapper::Starting);
assert_eq!(stopping, ProcessStatusWrapper::Stopping);
if let ProcessStatusWrapper::Error(msg) = error {
assert_eq!(msg, "test");
} else {
panic!("Expected Error variant");
}
}
#[test]
fn test_job_type_variants() {
assert_eq!(JobType::SAL, JobType::SAL);
assert_eq!(JobType::OSIS, JobType::OSIS);
assert_eq!(JobType::V, JobType::V);
assert_eq!(JobType::Python, JobType::Python);
assert_ne!(JobType::SAL, JobType::OSIS);
assert_ne!(JobType::OSIS, JobType::V);
assert_ne!(JobType::V, JobType::Python);
}
#[test]
fn test_job_status_variants() {
assert_eq!(JobStatus::Created, JobStatus::Created);
assert_eq!(JobStatus::Dispatched, JobStatus::Dispatched);
assert_eq!(JobStatus::Started, JobStatus::Started);
assert_eq!(JobStatus::Finished, JobStatus::Finished);
assert_eq!(JobStatus::Error, JobStatus::Error);
assert_ne!(JobStatus::Created, JobStatus::Dispatched);
assert_ne!(JobStatus::Started, JobStatus::Finished);
}
#[test]
fn test_runner_type_variants() {
assert_eq!(RunnerType::SALRunner, RunnerType::SALRunner);
assert_eq!(RunnerType::OSISRunner, RunnerType::OSISRunner);
assert_eq!(RunnerType::VRunner, RunnerType::VRunner);
assert_eq!(RunnerType::PyRunner, RunnerType::PyRunner);
assert_ne!(RunnerType::SALRunner, RunnerType::OSISRunner);
assert_ne!(RunnerType::VRunner, RunnerType::PyRunner);
}
#[test]
fn test_process_manager_type_variants() {
let simple = ProcessManagerType::Simple;
let tmux = ProcessManagerType::Tmux("test-session".to_string());
assert_eq!(simple, ProcessManagerType::Simple);
if let ProcessManagerType::Tmux(session) = tmux {
assert_eq!(session, "test-session");
} else {
panic!("Expected Tmux variant");
}
}
}

View File

@@ -1,65 +0,0 @@
/// Generate test secp256k1 keypairs for supervisor authentication testing
///
/// Run with: cargo run --example generate_keypairs
use secp256k1::{Secp256k1, SecretKey, PublicKey};
use hex;
fn main() {
let secp = Secp256k1::new();
println!("# Test Keypairs for Supervisor Auth\n");
println!("These are secp256k1 keypairs for testing the supervisor authentication system.\n");
println!("⚠️ WARNING: These are TEST keypairs only! Never use these in production!\n");
// Generate 5 keypairs with simple private keys for testing
let test_keys = vec![
("Alice (Admin)", "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"),
("Bob (User)", "fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
("Charlie (Register)", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
("Dave (Test)", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
("Eve (Test)", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
];
for (i, (name, privkey_hex)) in test_keys.iter().enumerate() {
println!("## Keypair {} ({})", i + 1, name);
println!("```");
// Parse private key
let privkey_bytes = hex::decode(privkey_hex).expect("Invalid hex");
let secret_key = SecretKey::from_slice(&privkey_bytes).expect("Invalid private key");
// Derive public key
let public_key = PublicKey::from_secret_key(&secp, &secret_key);
// Serialize keys
let pubkey_hex = hex::encode(public_key.serialize_uncompressed());
println!("Private Key: 0x{}", privkey_hex);
println!("Public Key: 0x{}", pubkey_hex);
println!("```\n");
}
println!("\n## Usage Examples\n");
println!("### Using with OpenRPC Client\n");
println!("```rust");
println!("use secp256k1::{{Secp256k1, SecretKey}};");
println!("use hex;");
println!();
println!("// Alice's private key");
println!("let alice_privkey = SecretKey::from_slice(");
println!(" &hex::decode(\"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\").unwrap()");
println!(").unwrap();");
println!();
println!("// Create client with signature");
println!("let client = WasmSupervisorClient::new_with_keypair(");
println!(" \"http://127.0.0.1:3030\",");
println!(" alice_privkey");
println!(");");
println!("```\n");
println!("### Testing Different Scopes\n");
println!("1. **Admin Scope** - Use Alice's keypair for full admin access");
println!("2. **User Scope** - Use Bob's keypair for limited user access");
println!("3. **Register Scope** - Use Charlie's keypair for runner registration only\n");
}

View File

@@ -1,38 +1,24 @@
//! # Hero Supervisor Binary
//!
//! Main supervisor binary that manages multiple actors and listens to jobs over Redis.
//! The supervisor builds with actor configuration, starts actors, and dispatches jobs
//! to the appropriate runners based on the job's runner field.
//! Hero Supervisor Binary
use hero_supervisor::{SupervisorApp, SupervisorBuilder};
use clap::Parser;
use log::{info, error};
use std::path::PathBuf;
use log::error;
/// Command line arguments for the supervisor
/// Hero Supervisor - manages actors and dispatches jobs
#[derive(Parser, Debug)]
#[command(name = "supervisor")]
#[command(about = "Hero Supervisor - manages multiple actors and dispatches jobs")]
#[command(about = "Hero Supervisor - manages actors and dispatches jobs")]
struct Args {
/// Path to the configuration TOML file
#[arg(short, long, value_name = "FILE")]
config: Option<PathBuf>,
/// Redis URL for job queue
#[arg(long, default_value = "redis://localhost:6379")]
#[arg(long, default_value = "redis://127.0.0.1:6379")]
redis_url: String,
/// Namespace for Redis keys
#[arg(long, default_value = "")]
namespace: String,
/// Admin secrets (can be specified multiple times)
#[arg(long = "admin-secret", value_name = "SECRET")]
/// Admin secrets (required, can be specified multiple times)
#[arg(long = "admin-secret", value_name = "SECRET", required = true)]
admin_secrets: Vec<String>,
/// User secrets (can be specified multiple times)
@@ -43,14 +29,6 @@ struct Args {
#[arg(long = "register-secret", value_name = "SECRET")]
register_secrets: Vec<String>,
/// Mycelium daemon URL
#[arg(long, default_value = "http://127.0.0.1:8990")]
mycelium_url: String,
/// Mycelium topic for supervisor RPC messages
#[arg(long, default_value = "supervisor.rpc")]
topic: String,
/// Port for OpenRPC HTTP server
#[arg(long, default_value = "3030")]
port: u16,
@@ -59,88 +37,54 @@ struct Args {
#[arg(long, default_value = "127.0.0.1")]
bind_address: String,
/// Bootstrap an initial admin API key with the given name
#[arg(long = "bootstrap-admin-key", value_name = "NAME")]
bootstrap_admin_key: Option<String>,
/// Mycelium daemon URL (optional)
#[arg(long, default_value = "")]
mycelium_url: String,
/// Mycelium topic for supervisor RPC messages
#[arg(long, default_value = "supervisor.rpc")]
topic: String,
/// Pre-configured runner names (comma-separated)
#[arg(long, value_name = "NAMES", value_delimiter = ',')]
runners: Vec<String>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
env_logger::init();
info!("Starting Hero Supervisor");
// Parse command line arguments
let args = Args::parse();
// Store first admin secret for runner registration
let admin_secret = args.admin_secrets[0].clone();
// Create and initialize supervisor using builder pattern
// Build supervisor
let mut builder = SupervisorBuilder::new()
.redis_url(&args.redis_url)
.namespace(&args.namespace);
// Add secrets from CLI arguments
if !args.admin_secrets.is_empty() {
info!("Adding {} admin secret(s)", args.admin_secrets.len());
builder = builder.admin_secrets(args.admin_secrets);
}
.namespace(&args.namespace)
.admin_secrets(args.admin_secrets);
if !args.user_secrets.is_empty() {
info!("Adding {} user secret(s)", args.user_secrets.len());
builder = builder.user_secrets(args.user_secrets);
}
if !args.register_secrets.is_empty() {
info!("Adding {} register secret(s)", args.register_secrets.len());
builder = builder.register_secrets(args.register_secrets);
}
let supervisor = match args.config {
Some(_config_path) => {
info!("Loading configuration from config file not yet implemented");
// For now, use CLI configuration
builder.build().await?
}
None => {
info!("Using CLI configuration");
builder.build().await?
}
};
let mut supervisor = builder.build().await?;
// Bootstrap admin key if requested
if let Some(admin_key_name) = args.bootstrap_admin_key {
info!("Bootstrapping admin API key: {}", admin_key_name);
let admin_key = supervisor.bootstrap_admin_key(admin_key_name).await;
println!("\n╔════════════════════════════════════════════════════════════╗");
println!("║ 🔑 Admin API Key Created ║");
println!("╚════════════════════════════════════════════════════════════╝");
println!(" Name: {}", admin_key.name);
println!(" Key: {}", admin_key.key);
println!(" Scope: {}", admin_key.scope.as_str());
println!(" ⚠️ SAVE THIS KEY - IT WILL NOT BE SHOWN AGAIN!");
println!("╚════════════════════════════════════════════════════════════╝\n");
// Register pre-configured runners using first admin secret
if !args.runners.is_empty() {
for runner_name in &args.runners {
match supervisor.register_runner(&admin_secret, runner_name, &format!("queue:{}", runner_name)).await {
Ok(_) => {},
Err(e) => error!("Failed to register runner '{}': {}", runner_name, e),
}
}
}
// Print startup information
let server_url = format!("http://{}:{}", args.bind_address, args.port);
println!("\n╔════════════════════════════════════════════════════════════╗");
println!("║ Hero Supervisor Started ║");
println!("╚════════════════════════════════════════════════════════════╝");
println!(" 📡 OpenRPC Server: {}", server_url);
println!(" 🔗 Redis: {}", args.redis_url);
#[cfg(feature = "mycelium")]
if !args.mycelium_url.is_empty() {
println!(" 🌐 Mycelium: {}", args.mycelium_url);
} else {
println!(" 🌐 Mycelium: Disabled");
}
#[cfg(not(feature = "mycelium"))]
println!(" 🌐 Mycelium: Not compiled (use --features mycelium)");
println!("╚════════════════════════════════════════════════════════════╝\n");
// Start OpenRPC server in background
// Start OpenRPC server
use std::sync::Arc;
use tokio::sync::Mutex;
use hero_supervisor::openrpc::start_http_openrpc_server;
@@ -150,11 +94,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let port = args.port;
tokio::spawn(async move {
info!("Starting OpenRPC server on {}:{}", bind_addr, port);
match start_http_openrpc_server(supervisor_arc, &bind_addr, port).await {
Ok(handle) => {
info!("OpenRPC server started successfully");
// Keep the server running by holding the handle
handle.stopped().await;
error!("OpenRPC server stopped unexpectedly");
}
@@ -164,12 +105,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}
});
// Give the server a moment to start
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
let mut app = SupervisorApp::new(supervisor, args.mycelium_url, args.topic);
// Print minimal startup info
println!("📡 http://{}:{}", args.bind_address, args.port);
#[cfg(feature = "mycelium")]
if !args.mycelium_url.is_empty() {
println!("🌐 {}", args.mycelium_url);
}
// Start the complete supervisor application
let mut app = SupervisorApp::new(supervisor, args.mycelium_url, args.topic);
app.start().await?;
Ok(())

View File

@@ -964,6 +964,34 @@ impl SupervisorRpcServer for Arc<Mutex<Supervisor>> {
let key = get_current_api_key()
.ok_or_else(|| ErrorObject::owned(-32602, "Missing Authorization header", None::<()>))?;
// Check if it's an admin secret
if supervisor.has_admin_secret(&key) {
return Ok(crate::auth::AuthVerifyResponse {
valid: true,
name: "Admin Secret".to_string(),
scope: "admin".to_string(),
});
}
// Check if it's a user secret
if supervisor.has_user_secret(&key) {
return Ok(crate::auth::AuthVerifyResponse {
valid: true,
name: "User Secret".to_string(),
scope: "user".to_string(),
});
}
// Check if it's a register secret
if supervisor.has_register_secret(&key) {
return Ok(crate::auth::AuthVerifyResponse {
valid: true,
name: "Register Secret".to_string(),
scope: "register".to_string(),
});
}
// Check if it's an API key
match supervisor.verify_api_key(&key).await {
Some(api_key) => {
Ok(crate::auth::AuthVerifyResponse {
@@ -1188,112 +1216,3 @@ pub async fn start_openrpc_servers(
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::supervisor::Supervisor;
#[tokio::test]
async fn test_supervisor_rpc_creation() {
// Test that we can create a supervisor and use it with RPC
use crate::supervisor::SupervisorBuilder;
let supervisor = SupervisorBuilder::new()
.redis_url("redis://localhost:6379")
.namespace("test")
.build()
.await;
// Just test that we can build a supervisor
assert!(supervisor.is_ok() || supervisor.is_err()); // Either way is fine for this test
}
#[test]
fn test_process_manager_type_parsing() {
assert!(parse_process_manager_type("simple", None).is_ok());
assert!(parse_process_manager_type("tmux", Some("session".to_string())).is_ok());
assert!(parse_process_manager_type("Simple", None).is_ok());
assert!(parse_process_manager_type("TMUX", Some("session".to_string())).is_ok());
assert!(parse_process_manager_type("invalid", None).is_err());
}
#[tokio::test]
async fn test_job_api_methods() {
let supervisor = Arc::new(Mutex::new(Supervisor::default()));
let mut sup = supervisor.lock().await;
sup.add_user_secret("test-secret".to_string());
drop(sup);
// Test jobs.create
let job = crate::job::JobBuilder::new()
.caller_id("test")
.context_id("test")
.payload("test")
.runner("test_runner")
.executor("osis")
.build()
.unwrap();
let params = RunJobParams {
job: job.clone(),
};
// Set the API key in thread-local for the test
set_current_api_key(Some("test-secret".to_string()));
let result = supervisor.jobs_create(params).await;
// Should work or fail gracefully without Redis
assert!(result.is_ok() || result.is_err());
// Test job.start
let start_params = StartJobParams {
job_id: "test-job".to_string(),
};
let result = supervisor.job_start(start_params).await;
// Should fail gracefully without Redis/job
assert!(result.is_err());
// Test invalid secret
let invalid_params = StartJobParams {
job_id: "test-job".to_string(),
};
let result = supervisor.job_start(invalid_params).await;
assert!(result.is_err());
}
#[test]
fn test_job_result_serialization() {
let success = JobResult::Success { success: "test output".to_string() };
let json = serde_json::to_string(&success).unwrap();
assert!(json.contains("success"));
assert!(json.contains("test output"));
let error = JobResult::Error { error: "test error".to_string() };
let json = serde_json::to_string(&error).unwrap();
assert!(json.contains("error"));
assert!(json.contains("test error"));
}
#[test]
fn test_job_status_response_serialization() {
let status = JobStatusResponse {
job_id: "test-job".to_string(),
status: "running".to_string(),
created_at: "2023-01-01T00:00:00Z".to_string(),
started_at: Some("2023-01-01T00:00:05Z".to_string()),
completed_at: None,
};
let json = serde_json::to_string(&status).unwrap();
assert!(json.contains("test-job"));
assert!(json.contains("running"));
assert!(json.contains("2023-01-01T00:00:00Z"));
let deserialized: JobStatusResponse = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.job_id, "test-job");
assert_eq!(deserialized.status, "running");
}
}

View File

@@ -267,46 +267,3 @@ impl Default for Services {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_api_key_service() {
let service = ApiKeyService::new();
let key = ApiKey {
key: "test-key".to_string(),
name: "test".to_string(),
scope: ApiKeyScope::User,
};
service.store(key.clone()).await.unwrap();
assert_eq!(service.get("test-key").await.unwrap().name, "test");
assert_eq!(service.list().await.len(), 1);
service.remove("test-key").await;
assert!(service.get("test-key").await.is_none());
}
#[tokio::test]
async fn test_runner_service() {
let service = RunnerService::new();
let metadata = RunnerMetadata {
id: "runner1".to_string(),
name: "runner1".to_string(),
queue: "queue1".to_string(),
registered_at: "2024-01-01".to_string(),
registered_by: "admin".to_string(),
};
service.store(metadata.clone()).await.unwrap();
assert_eq!(service.get("runner1").await.unwrap().name, "runner1");
assert_eq!(service.count().await, 1);
service.remove("runner1").await;
assert!(service.get("runner1").await.is_none());
}
}

View File

@@ -1031,77 +1031,3 @@ impl Default for Supervisor {
}
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[tokio::test]
async fn test_supervisor_creation() {
let supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 0);
}
#[tokio::test]
async fn test_add_runner() {
use std::path::PathBuf;
let config = RunnerConfig::new(
"test_actor".to_string(),
"test_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/test_actor"),
"redis://localhost:6379".to_string(),
);
let runner = Runner::from_config(config.clone());
let mut supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.add_runner(runner)
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 1);
}
#[tokio::test]
async fn test_add_multiple_runners() {
use std::path::PathBuf;
let config1 = RunnerConfig::new(
"sal_actor".to_string(),
"sal_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/sal_actor"),
"redis://localhost:6379".to_string(),
);
let config2 = RunnerConfig::new(
"osis_actor".to_string(),
"osis_actor".to_string(),
"".to_string(),
PathBuf::from("/usr/bin/osis_actor"),
"redis://localhost:6379".to_string(),
);
let runner1 = Runner::from_config(config1);
let runner2 = Runner::from_config(config2);
let supervisor = Supervisor::builder()
.redis_url("redis://localhost:6379")
.add_runner(runner1)
.add_runner(runner2)
.build()
.await
.unwrap();
assert_eq!(supervisor.list_runners().len(), 2);
assert!(supervisor.get_runner("sal_actor").is_some());
assert!(supervisor.get_runner("osis_actor").is_some());
}
}

View File

@@ -29,251 +29,3 @@ async fn is_supervisor_available() -> bool {
Err(_) => false,
}
}
#[tokio::test]
async fn test_jobs_create_and_start() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job = create_test_job("create_and_start").unwrap();
// Test jobs.create
let job_id = client.jobs_create(secret, job).await.unwrap();
assert!(!job_id.is_empty());
// Test job.start
let result = client.job_start(secret, &job_id).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_job_status_monitoring() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job = create_test_job("status_monitoring").unwrap();
let job_id = client.jobs_create(secret, job).await.unwrap();
client.job_start(secret, &job_id).await.unwrap();
// Test job.status
let mut attempts = 0;
let max_attempts = 10;
while attempts < max_attempts {
let status = client.job_status(&job_id).await.unwrap();
assert!(!status.job_id.is_empty());
assert!(!status.status.is_empty());
assert!(!status.created_at.is_empty());
if status.status == "completed" || status.status == "failed" {
break;
}
attempts += 1;
sleep(Duration::from_secs(1)).await;
}
}
#[tokio::test]
async fn test_job_result_retrieval() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job = create_test_job("result_retrieval").unwrap();
let job_id = client.jobs_create(secret, job).await.unwrap();
client.job_start(secret, &job_id).await.unwrap();
// Wait a bit for job to complete
sleep(Duration::from_secs(3)).await;
// Test job.result
let result = client.job_result(&job_id).await.unwrap();
match result {
JobResult::Success { success } => {
assert!(!success.is_empty());
},
JobResult::Error { error } => {
assert!(!error.is_empty());
}
}
}
#[tokio::test]
async fn test_job_run_immediate() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job = create_test_job("immediate_run").unwrap();
// Test job.run (immediate execution)
let result = client.job_run(secret, job).await.unwrap();
match result {
JobResult::Success { success } => {
assert!(!success.is_empty());
},
JobResult::Error { error } => {
assert!(!error.is_empty());
}
}
}
#[tokio::test]
async fn test_jobs_list() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
// Test jobs.list
let job_ids = client.jobs_list().await.unwrap();
// Should return a vector (might be empty)
assert!(job_ids.len() >= 0);
}
#[tokio::test]
async fn test_authentication_failures() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let invalid_secret = "invalid-secret-123";
let job = create_test_job("auth_failure").unwrap();
// Test that invalid secrets fail
let result = client.jobs_create(invalid_secret, job.clone()).await;
assert!(result.is_err());
let result = client.job_run(invalid_secret, job.clone()).await;
assert!(result.is_err());
let result = client.job_start(invalid_secret, "fake-job-id").await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_nonexistent_job_operations() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let fake_job_id = format!("nonexistent-{}", Uuid::new_v4());
// Test operations on nonexistent job should fail
let result = client.job_status(&fake_job_id).await;
assert!(result.is_err());
let result = client.job_result(&fake_job_id).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_complete_workflow() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job = create_test_job("complete_workflow").unwrap();
// Complete workflow test
let job_id = client.jobs_create(secret, job).await.unwrap();
client.job_start(secret, &job_id).await.unwrap();
// Monitor until completion
let mut final_status = String::new();
for _ in 0..15 {
let status = client.job_status(&job_id).await.unwrap();
final_status = status.status.clone();
if final_status == "completed" || final_status == "failed" || final_status == "timeout" {
break;
}
sleep(Duration::from_secs(1)).await;
}
// Get final result
let result = client.job_result(&job_id).await.unwrap();
match result {
JobResult::Success { .. } => {
assert_eq!(final_status, "completed");
},
JobResult::Error { .. } => {
assert!(final_status == "failed" || final_status == "timeout");
}
}
}
#[tokio::test]
async fn test_batch_job_processing() {
if !is_supervisor_available().await {
println!("Skipping test - supervisor not available");
return;
}
let client = SupervisorClient::new("http://localhost:3030").unwrap();
let secret = "user-secret-456";
let job_count = 3;
let mut job_ids = Vec::new();
// Create multiple jobs
for i in 0..job_count {
let job = JobBuilder::new()
.caller_id("integration_test")
.context_id(&format!("batch_job_{}", i))
.payload(&format!("echo 'Batch job {}'", i))
.executor("osis")
.runner("osis_runner_1")
.timeout(30)
.build()
.unwrap();
let job_id = client.jobs_create(secret, job).await.unwrap();
job_ids.push(job_id);
}
// Start all jobs
for job_id in &job_ids {
client.job_start(secret, job_id).await.unwrap();
}
// Wait for all jobs to complete
sleep(Duration::from_secs(5)).await;
// Collect all results
let mut results = Vec::new();
for job_id in &job_ids {
let result = client.job_result(job_id).await.unwrap();
results.push(result);
}
// Verify we got results for all jobs
assert_eq!(results.len(), job_count);
}

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Run Hero Supervisor with OpenRPC server only (no Mycelium)
#
# This starts the supervisor with:
# - OpenRPC HTTP server on port 3030
# - Redis connection for job queuing
# - No Mycelium integration
#
# Usage:
# ./run_supervisor_simple.sh
echo "🚀 Starting Hero Supervisor (OpenRPC only)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " OpenRPC Server: http://localhost:3030"
echo " Redis: redis://localhost:6379"
echo " Mycelium: Disabled"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Set environment variables
export RUST_LOG=info
export MYCELIUM_URL="" # Disable Mycelium
# Build and run
cargo run --bin supervisor --no-default-features --features cli -- \
--redis-url redis://localhost:6379 \
--port 3030

View File

@@ -1,11 +1,53 @@
#!/bin/bash
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
echo "Building Hero Supervisor..."
cd "$PROJECT_DIR"
RUSTFLAGS="-A warnings" cargo build --release
# Spinner function
spinner() {
local pid=$1
local delay=0.1
local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
while ps -p $pid > /dev/null 2>&1; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
echo " Hero Supervisor built successfully"
echo "Building Hero Supervisor Workspace"
echo ""
# Build core and client
printf "📦 Core & Client... "
cd "$PROJECT_DIR"
if RUSTFLAGS="-A warnings" cargo build --release --workspace > /tmp/supervisor-build-core.log 2>&1 & spinner $!; wait $!; then
echo "✅"
else
echo "❌"
echo " Error: Build failed. Run 'cd $PROJECT_DIR && cargo build --release --workspace' for details"
exit 1
fi
# Build UI
printf "📦 UI (WASM)... "
cd "$PROJECT_DIR/ui"
if ! command -v trunk &> /dev/null; then
echo "⚠️ (trunk not installed)"
echo " Install with: cargo install trunk"
else
if trunk build --release > /tmp/supervisor-build-ui.log 2>&1 & spinner $!; wait $!; then
echo "✅"
else
echo "❌"
echo " Error: Build failed. Run 'cd $PROJECT_DIR/ui && trunk build --release' for details"
exit 1
fi
fi
echo ""
echo "✅ All builds completed"

18
scripts/environment.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Load environment variables from .env file
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
ENV_FILE="$PROJECT_DIR/.env"
if [ -f "$ENV_FILE" ]; then
# Export variables from .env file
set -a
source "$ENV_FILE"
set +a
echo "✅ Loaded environment from .env"
else
echo "⚠️ No .env file found at $ENV_FILE"
echo " Copy .env.example to .env and configure your settings"
exit 1
fi

11
scripts/generate_secret.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# Generate a supervisor secret key in the correct format
# Generate a random 32-byte hex string
SECRET=$(openssl rand -hex 32)
echo "Generated supervisor secret:"
echo "$SECRET"
echo ""
echo "Add this to your .env file:"
echo "SUPERVISOR_ADMIN_SECRET=$SECRET"

View File

@@ -1,64 +1,185 @@
#!/bin/bash
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
# Build first
"$SCRIPT_DIR/build.sh"
# Check for --kill-ports flag
if [ "$1" = "--kill-ports" ]; then
echo "Killing processes on ports..."
PORT="${PORT:-3030}"
ADMIN_UI_PORT="${ADMIN_UI_PORT:-8080}"
# Configuration
REDIS_URL="${REDIS_URL:-redis://localhost:6379}"
# Kill process on supervisor port
SUPERVISOR_PID=$(lsof -ti:$PORT)
if [ ! -z "$SUPERVISOR_PID" ]; then
kill -9 $SUPERVISOR_PID 2>/dev/null
echo " ✅ Killed process on port $PORT"
fi
# Kill process on admin UI port
UI_PID=$(lsof -ti:$ADMIN_UI_PORT)
if [ ! -z "$UI_PID" ]; then
kill -9 $UI_PID 2>/dev/null
echo " ✅ Killed process on port $ADMIN_UI_PORT"
fi
echo "Done"
exit 0
fi
# Load environment variables
source "$SCRIPT_DIR/environment.sh"
# Spinner function
spinner() {
local pid=$1
local delay=0.1
local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
while ps -p $pid > /dev/null 2>&1; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
echo "Starting Hero Supervisor"
echo ""
# Build
printf "🔨 Building... "
if "$SCRIPT_DIR/build.sh" > /tmp/supervisor-run-build.log 2>&1 & spinner $!; wait $!; then
echo "✅"
else
echo "❌"
echo " Error: Build failed. Check /tmp/supervisor-run-build.log"
exit 1
fi
# Validate required environment variables
if [ -z "$ADMIN_SECRETS" ]; then
echo "❌ Error: ADMIN_SECRETS not set in .env"
echo " Generate a secret with: ./scripts/generate_secret.sh"
exit 1
fi
# Set defaults
REDIS_URL="${REDIS_URL:-redis://127.0.0.1:6379}"
PORT="${PORT:-3030}"
BIND_ADDRESS="${BIND_ADDRESS:-127.0.0.1}"
BOOTSTRAP_ADMIN_KEY="${BOOTSTRAP_ADMIN_KEY:-admin}"
ADMIN_UI_PORT="${ADMIN_UI_PORT:-8080}"
LOG_LEVEL="${LOG_LEVEL:-info}"
LOG_LEVEL="${LOG_LEVEL:-error}"
# Cleanup function
cleanup() {
echo "Shutting down..."
echo ""
printf "🛑 Stopping... "
kill $(jobs -p) 2>/dev/null || true
echo "✅"
exit 0
}
trap cleanup SIGINT SIGTERM
echo "Starting Hero Supervisor..."
# Start supervisor
printf "📡 Supervisor... "
cd "$PROJECT_DIR"
# Start supervisor in background
RUST_LOG="$LOG_LEVEL" RUST_LOG_STYLE=never \
target/release/supervisor \
--bootstrap-admin-key "$BOOTSTRAP_ADMIN_KEY" \
--redis-url "$REDIS_URL" \
--port "$PORT" \
--bind-address "$BIND_ADDRESS" &
# Build command with flags
SUPERVISOR_CMD="target/release/supervisor --redis-url $REDIS_URL --port $PORT --bind-address $BIND_ADDRESS"
SUPERVISOR_PID=$!
# Add admin secrets
IFS=',' read -ra SECRETS <<< "$ADMIN_SECRETS"
for secret in "${SECRETS[@]}"; do
SUPERVISOR_CMD="$SUPERVISOR_CMD --admin-secret $secret"
done
# Wait for supervisor to start
sleep 2
# Check if supervisor is running
if ! ps -p $SUPERVISOR_PID > /dev/null 2>&1; then
echo "Failed to start supervisor"
exit 1
# Add user secrets if provided
if [ ! -z "$USER_SECRETS" ]; then
IFS=',' read -ra SECRETS <<< "$USER_SECRETS"
for secret in "${SECRETS[@]}"; do
SUPERVISOR_CMD="$SUPERVISOR_CMD --user-secret $secret"
done
fi
# Start admin UI
echo "Starting Admin UI on port $ADMIN_UI_PORT..."
cd "$PROJECT_DIR/ui"
trunk serve --port "$ADMIN_UI_PORT" &
# Add register secrets if provided
if [ ! -z "$REGISTER_SECRETS" ]; then
IFS=',' read -ra SECRETS <<< "$REGISTER_SECRETS"
for secret in "${SECRETS[@]}"; do
SUPERVISOR_CMD="$SUPERVISOR_CMD --register-secret $secret"
done
fi
# Add mycelium URL if provided
if [ ! -z "$MYCELIUM_URL" ]; then
SUPERVISOR_CMD="$SUPERVISOR_CMD --mycelium-url $MYCELIUM_URL"
fi
# Add runners if provided
if [ ! -z "$RUNNERS" ]; then
SUPERVISOR_CMD="$SUPERVISOR_CMD --runners $RUNNERS"
fi
RUST_LOG="$LOG_LEVEL" RUST_LOG_STYLE=never $SUPERVISOR_CMD > /tmp/supervisor-run.log 2>&1 &
SUPERVISOR_PID=$!
sleep 2
if ! ps -p $SUPERVISOR_PID > /dev/null 2>&1; then
echo "❌"
echo " Error: Supervisor failed to start. Check /tmp/supervisor-run.log"
exit 1
fi
echo "✅"
# Start admin UI
printf "🎨 Admin UI... "
cd "$PROJECT_DIR/ui"
UI_STARTED=false
UI_ERROR=""
if ! command -v trunk &> /dev/null; then
echo "⚠️"
UI_ERROR="Trunk not installed. Run: cargo install trunk"
else
trunk serve --port "$ADMIN_UI_PORT" > /tmp/supervisor-ui.log 2>&1 &
ADMIN_UI_PID=$!
sleep 2
# Check if process is still running
if ps -p $ADMIN_UI_PID > /dev/null 2>&1; then
# Check for port binding errors in log
if grep -q "Address already in use" /tmp/supervisor-ui.log 2>/dev/null; then
echo "❌"
UI_ERROR="Port $ADMIN_UI_PORT already in use. Run: ./scripts/run.sh --kill-ports"
kill $ADMIN_UI_PID 2>/dev/null
else
echo "✅"
UI_STARTED=true
fi
else
echo "❌"
UI_ERROR="Failed to start"
fi
fi
echo ""
echo "✅ Hero Supervisor system started"
echo " 📡 Supervisor API: http://$BIND_ADDRESS:$PORT"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Hero Supervisor Running"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📡 OpenRPC Server: http://$BIND_ADDRESS:$PORT"
if [ "$UI_STARTED" = true ]; then
echo "🎨 Admin UI: http://127.0.0.1:$ADMIN_UI_PORT"
else
echo "🎨 Admin UI: ❌ $UI_ERROR"
fi
echo ""
echo "Logs: tail -f /tmp/supervisor-run.log /tmp/supervisor-ui.log"
echo ""
echo "Press Ctrl+C to stop all services"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Wait for both processes
# Wait for processes
wait

View File

@@ -1,8 +1,53 @@
#!/bin/bash
# serve.sh - Build optimized WASM and serve with Caddy + Brotli compression
set -e
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
PROJECT_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
cargo check
cargo test
# Spinner function
spinner() {
local pid=$1
local delay=0.1
local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
while ps -p $pid > /dev/null 2>&1; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
echo "Testing Hero Supervisor Workspace"
echo ""
# Test core and client
printf "🧪 Core & Client... "
cd "$PROJECT_DIR"
if cargo test --workspace > /tmp/supervisor-test-core.log 2>&1 & spinner $!; wait $!; then
echo "✅"
else
echo "❌"
echo " Error: Tests failed. Run 'cd $PROJECT_DIR && cargo test --workspace' for details"
exit 1
fi
# Test UI
printf "🧪 UI (WASM)... "
cd "$PROJECT_DIR/ui"
if ! command -v wasm-pack &> /dev/null; then
echo "⚠️ (wasm-pack not installed)"
echo " Install with: cargo install wasm-pack"
else
if wasm-pack test --headless --firefox > /tmp/supervisor-test-ui.log 2>&1 & spinner $!; wait $!; then
echo "✅"
else
echo "❌"
echo " Error: Tests failed. Run 'cd $PROJECT_DIR/ui && wasm-pack test --headless --firefox' for details"
exit 1
fi
fi
echo ""
echo "✅ All tests completed"

2
ui/Cargo.lock generated
View File

@@ -985,6 +985,7 @@ checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "hero-job"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/job.git#7b9420f3e67802e34de1337bac4e2728ed321657"
dependencies = [
"chrono",
"hex",
@@ -1002,6 +1003,7 @@ dependencies = [
[[package]]
name = "hero-job-client"
version = "0.1.0"
source = "git+https://git.ourworld.tf/herocode/job.git#7b9420f3e67802e34de1337bac4e2728ed321657"
dependencies = [
"chrono",
"hero-job",

View File

@@ -1,7 +1,7 @@
[package]
name = "supervisor-admin-ui"
version.workspace = true
edition.workspace = true
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]