Merge branch 'development' into development_heropods

This commit is contained in:
Mahmoud-Emad
2025-11-23 13:06:50 +02:00
75 changed files with 11421 additions and 389 deletions

391
examples/builder/zosbuilder.vsh Executable file
View File

@@ -0,0 +1,391 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.builder
import incubaid.herolib.core.pathlib
// Configuration for the remote builder
// Update these values for your remote machine
const remote_host = 'root@65.109.31.171' // Change to your remote host
const remote_port = 22 // SSH port
// Build configuration
const build_dir = '/root/zosbuilder'
const repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Set to true to upload kernel to S3
const upload_kernel = false
fn main() {
println('=== Zero OS Builder - Remote Build System ===\n')
// Initialize builder
mut b := builder.new() or {
eprintln('Failed to initialize builder: ${err}')
exit(1)
}
// Connect to remote node
println('Connecting to remote builder: ${remote_host}:${remote_port}')
mut node := b.node_new(
ipaddr: '${remote_host}:${remote_port}'
name: 'zosbuilder'
) or {
eprintln('Failed to connect to remote node: ${err}')
exit(1)
}
// Run the build process
build_zos(mut node) or {
eprintln('Build failed: ${err}')
exit(1)
}
println('\n=== Build completed successfully! ===')
}
fn build_zos(mut node builder.Node) ! {
println('\n--- Step 1: Installing prerequisites ---')
install_prerequisites(mut node)!
println('\n--- Step 2: Cloning zosbuilder repository ---')
clone_repository(mut node)!
println('\n--- Step 3: Creating RFS configuration ---')
create_rfs_config(mut node)!
println('\n--- Step 4: Running build ---')
run_build(mut node)!
println('\n--- Step 5: Checking build artifacts ---')
check_artifacts(mut node)!
println('\n=== Build completed successfully! ===')
}
fn install_prerequisites(mut node builder.Node) ! {
println('Detecting platform...')
// Check platform type
if node.platform == .ubuntu {
println('Installing Ubuntu/Debian prerequisites...')
// Update package list and install all required packages
node.exec_cmd(
cmd: '
apt-get update
apt-get install -y \\
build-essential \\
upx-ucl \\
binutils \\
git \\
wget \\
curl \\
qemu-system-x86 \\
podman \\
musl-tools \\
cpio \\
xz-utils \\
bc \\
flex \\
bison \\
libelf-dev \\
libssl-dev
# Install rustup and Rust toolchain
if ! command -v rustup &> /dev/null; then
echo "Installing rustup..."
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
source "\$HOME/.cargo/env"
fi
# Add Rust musl target
source "\$HOME/.cargo/env"
rustup target add x86_64-unknown-linux-musl
'
name: 'install_ubuntu_packages'
reset: true
)!
} else if node.platform == .alpine {
println('Installing Alpine prerequisites...')
node.exec_cmd(
cmd: '
apk add --no-cache \\
build-base \\
rust \\
cargo \\
upx \\
git \\
wget \\
qemu-system-x86 \\
podman
# Add Rust musl target
rustup target add x86_64-unknown-linux-musl || echo "rustup not available"
'
name: 'install_alpine_packages'
reset: true
)!
} else {
return error('Unsupported platform: ${node.platform}. Only Ubuntu/Debian and Alpine are supported.')
}
println('Prerequisites installed successfully')
}
fn clone_repository(mut node builder.Node) ! {
// Clean up disk space first
println('Cleaning up disk space...')
node.exec_cmd(
cmd: '
# Remove old build directories if they exist
rm -rf ${build_dir} || true
# Clean up podman/docker cache to free space
podman system prune -af || true
# Clean up package manager cache
if command -v apt-get &> /dev/null; then
apt-get clean || true
fi
# Show disk space
df -h /
'
name: 'cleanup_disk_space'
stdout: true
)!
// Clone the repository
println('Cloning from ${repo_url}...')
node.exec_cmd(
cmd: '
git clone ${repo_url} ${build_dir}
cd ${build_dir}
git log -1 --oneline
'
name: 'clone_zosbuilder'
stdout: true
)!
println('Repository cloned successfully')
}
fn create_rfs_config(mut node builder.Node) ! {
println('Creating config/rfs.conf...')
rfs_config := 'S3_ENDPOINT="http://wizenoze.grid.tf:3900"
S3_REGION="garage"
S3_BUCKET="zos"
S3_PREFIX="store"
S3_ACCESS_KEY="<put key here>"
S3_SECRET_KEY="<put key here>"
WEB_ENDPOINT=""
MANIFESTS_SUBPATH="flists"
READ_ACCESS_KEY="<put key here>"
READ_SECRET_KEY="<put key here>"
ROUTE_ENDPOINT="http://wizenoze.grid.tf:3900"
ROUTE_PATH="/zos/store"
ROUTE_REGION="garage"
KEEP_S3_FALLBACK="false"
UPLOAD_MANIFESTS="true"
'
// Create config directory if it doesn't exist
node.exec_cmd(
cmd: 'mkdir -p ${build_dir}/config'
name: 'create_config_dir'
stdout: false
)!
// Write the RFS configuration file
node.file_write('${build_dir}/config/rfs.conf', rfs_config)!
// Verify the file was created
result := node.exec(
cmd: 'cat ${build_dir}/config/rfs.conf'
stdout: false
)!
println('RFS configuration created successfully')
println('Config preview:')
println(result)
// Skip youki component by removing it from sources.conf
println('\nRemoving youki from sources.conf (requires SSH keys)...')
node.exec_cmd(
cmd: '
# Remove any line containing youki from sources.conf
grep -v "youki" ${build_dir}/config/sources.conf > ${build_dir}/config/sources.conf.tmp
mv ${build_dir}/config/sources.conf.tmp ${build_dir}/config/sources.conf
# Verify it was removed
echo "Updated sources.conf:"
cat ${build_dir}/config/sources.conf
'
name: 'remove_youki'
stdout: true
)!
println('youki component skipped')
}
fn run_build(mut node builder.Node) ! {
println('Starting build process...')
println('This may take 15-30 minutes depending on your system...')
println('Status updates will be printed every 2 minutes...\n')
// Check disk space before building
println('Checking disk space...')
disk_info := node.exec(
cmd: 'df -h ${build_dir}'
stdout: false
)!
println(disk_info)
// Clean up any previous build artifacts and corrupted databases
println('Cleaning up previous build artifacts...')
node.exec_cmd(
cmd: '
cd ${build_dir}
# Remove dist directory to clean up any corrupted databases
rm -rf dist/
# Clean up any temporary files
rm -rf /tmp/rfs-* || true
# Show available disk space after cleanup
df -h ${build_dir}
'
name: 'cleanup_before_build'
stdout: true
)!
// Make scripts executable and run build with periodic status messages
mut build_cmd := '
cd ${build_dir}
# Source Rust environment
source "\$HOME/.cargo/env"
# Make scripts executable
chmod +x scripts/build.sh scripts/clean.sh
# Set environment variables
export UPLOAD_KERNEL=${upload_kernel}
export UPLOAD_MANIFESTS=false
# Create a wrapper script that prints status every 2 minutes
cat > /tmp/build_with_status.sh << "EOF"
#!/bin/bash
set -e
# Source Rust environment
source "\$HOME/.cargo/env"
# Start the build in background
./scripts/build.sh &
BUILD_PID=\$!
# Print status every 2 minutes while build is running
COUNTER=0
while kill -0 \$BUILD_PID 2>/dev/null; do
sleep 120
COUNTER=\$((COUNTER + 2))
echo ""
echo "=== Build still in progress... (\${COUNTER} minutes elapsed) ==="
echo ""
done
# Wait for build to complete and get exit code
wait \$BUILD_PID
EXIT_CODE=\$?
if [ \$EXIT_CODE -eq 0 ]; then
echo ""
echo "=== Build completed successfully after \${COUNTER} minutes ==="
else
echo ""
echo "=== Build failed after \${COUNTER} minutes with exit code \$EXIT_CODE ==="
fi
exit \$EXIT_CODE
EOF
chmod +x /tmp/build_with_status.sh
/tmp/build_with_status.sh
' // Execute build with output
result := node.exec_cmd(
cmd: build_cmd
name: 'zos_build'
stdout: true
reset: true
period: 0 // Don't cache, always rebuild
)!
println('\nBuild completed!')
println(result)
}
fn check_artifacts(mut node builder.Node) ! {
println('Checking build artifacts in ${build_dir}/dist/...')
// List the dist directory
result := node.exec(
cmd: 'ls -lh ${build_dir}/dist/'
stdout: true
)!
println('\nBuild artifacts:')
println(result)
// Check for expected files
vmlinuz_exists := node.file_exists('${build_dir}/dist/vmlinuz.efi')
initramfs_exists := node.file_exists('${build_dir}/dist/initramfs.cpio.xz')
if vmlinuz_exists && initramfs_exists {
println('\n Build artifacts created successfully:')
println(' - vmlinuz.efi (Kernel with embedded initramfs)')
println(' - initramfs.cpio.xz (Standalone initramfs archive)')
// Get file sizes
size_info := node.exec(
cmd: 'du -h ${build_dir}/dist/vmlinuz.efi ${build_dir}/dist/initramfs.cpio.xz'
stdout: false
)!
println('\nFile sizes:')
println(size_info)
} else {
return error('Build artifacts not found. Build may have failed.')
}
}
// Download artifacts to local machine
fn download_artifacts(mut node builder.Node, local_dest string) ! {
println('Downloading artifacts to local machine...')
mut dest_path := pathlib.get_dir(path: local_dest, create: true)!
println('Downloading to ${dest_path.path}...')
// Download the entire dist directory
node.download(
source: '${build_dir}/dist/'
dest: dest_path.path
)!
println('\n Artifacts downloaded successfully to ${dest_path.path}')
// List downloaded files
println('\nDownloaded files:')
result := node.exec(
cmd: 'ls -lh ${dest_path.path}'
stdout: false
) or {
println('Could not list local files')
return
}
println(result)
}

View File

@@ -0,0 +1,224 @@
# Zero OS Builder - Remote Build System
This example demonstrates how to build [Zero OS (zosbuilder)](https://git.ourworld.tf/tfgrid/zosbuilder) on a remote machine using the herolib builder module.
## Overview
The zosbuilder creates a Zero OS Alpine Initramfs with:
- Alpine Linux 3.22 base
- Custom kernel with embedded initramfs
- ThreeFold components (zinit, rfs, mycelium, zosstorage)
- Optimized size with UPX compression
- Two-stage module loading
## Prerequisites
### Local Machine
- V compiler installed
- SSH access to a remote build machine
- herolib installed
### Remote Build Machine
The script will automatically install these on the remote machine:
- **Ubuntu/Debian**: build-essential, rustc, cargo, upx-ucl, binutils, git, wget, qemu-system-x86, podman, musl-tools
- **Alpine Linux**: build-base, rust, cargo, upx, git, wget, qemu-system-x86, podman
- Rust musl target (x86_64-unknown-linux-musl)
## Configuration
Edit the constants in `zosbuilder.vsh`:
```v
const (
// Remote machine connection
remote_host = 'root@195.192.213.2' // Your remote host
remote_port = 22 // SSH port
// Build configuration
build_dir = '/root/zosbuilder' // Build directory on remote
repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Upload kernel to S3
upload_kernel = false
)
```
## Usage
### Basic Build
```bash
# Make the script executable
chmod +x zosbuilder.vsh
# Run the build
./zosbuilder.vsh
```
### What the Script Does
1. **Connects to Remote Machine**: Establishes SSH connection to the build server
2. **Installs Prerequisites**: Automatically installs all required build tools
3. **Clones Repository**: Fetches the latest zosbuilder code
4. **Runs Build**: Executes the build process (takes 15-30 minutes)
5. **Verifies Artifacts**: Checks that build outputs were created successfully
### Build Output
The build creates two main artifacts in `${build_dir}/dist/`:
- `vmlinuz.efi` - Kernel with embedded initramfs (bootable)
- `initramfs.cpio.xz` - Standalone initramfs archive
## Build Process Details
The zosbuilder follows these phases:
### Phase 1: Environment Setup
- Creates build directories
- Installs build dependencies
- Sets up Rust musl target
### Phase 2: Alpine Base
- Downloads Alpine 3.22 miniroot
- Extracts to initramfs directory
- Installs packages from config/packages.list
### Phase 3: Component Building
- Builds zinit (init system)
- Builds rfs (remote filesystem)
- Builds mycelium (networking)
- Builds zosstorage (storage orchestration)
### Phase 4: System Configuration
- Replaces /sbin/init with zinit
- Copies zinit configuration
- Sets up 2-stage module loading
- Configures system services
### Phase 5: Optimization
- Removes docs, man pages, locales
- Strips executables and libraries
- UPX compresses all binaries
- Aggressive cleanup
### Phase 6: Packaging
- Creates initramfs.cpio.xz with XZ compression
- Builds kernel with embedded initramfs
- Generates vmlinuz.efi
- Optionally uploads to S3
## Advanced Usage
### Download Artifacts to Local Machine
Add this to your script after the build completes:
```v
// Download artifacts to local machine
download_artifacts(mut node, '/tmp/zos-artifacts') or {
eprintln('Failed to download artifacts: ${err}')
}
```
### Custom Build Configuration
You can modify the build by editing files on the remote machine before building:
```v
// After cloning, before building
node.file_write('${build_dir}/config/packages.list', 'your custom packages')!
```
### Rebuild Without Re-cloning
To rebuild without re-cloning the repository, modify the script to skip the clone step:
```v
// Comment out the clone_repository call
// clone_repository(mut node)!
// Or just run the build directly
node.exec_cmd(
cmd: 'cd ${build_dir} && ./scripts/build.sh'
name: 'zos_rebuild'
)!
```
## Testing the Build
After building, you can test the kernel with QEMU:
```bash
# On the remote machine
cd /root/zosbuilder
./scripts/test-qemu.sh
```
## Troubleshooting
### Build Fails
1. Check the build output for specific errors
2. Verify all prerequisites are installed
3. Ensure sufficient disk space (at least 5GB)
4. Check internet connectivity for downloading components
### SSH Connection Issues
1. Verify SSH access: `ssh root@195.192.213.2`
2. Check SSH key authentication is set up
3. Verify the remote host and port are correct
### Missing Dependencies
The script automatically installs dependencies, but if manual installation is needed:
**Ubuntu/Debian:**
```bash
sudo apt-get update
sudo apt-get install -y build-essential rustc cargo upx-ucl binutils git wget qemu-system-x86 podman musl-tools
rustup target add x86_64-unknown-linux-musl
```
**Alpine Linux:**
```bash
apk add --no-cache build-base rust cargo upx git wget qemu-system-x86 podman
rustup target add x86_64-unknown-linux-musl
```
## Integration with CI/CD
This builder can be integrated into CI/CD pipelines:
```v
// Example: Build and upload to artifact storage
fn ci_build() ! {
mut b := builder.new()!
mut node := b.node_new(ipaddr: '${ci_builder_host}')!
build_zos(mut node)!
// Upload to artifact storage
node.exec_cmd(
cmd: 's3cmd put ${build_dir}/dist/* s3://artifacts/zos/'
name: 'upload_artifacts'
)!
}
```
## Related Examples
- `simple.vsh` - Basic builder usage
- `remote_executor/` - Remote code execution
- `simple_ip4.vsh` - IPv4 connection example
- `simple_ip6.vsh` - IPv6 connection example
## References
- [zosbuilder Repository](https://git.ourworld.tf/tfgrid/zosbuilder)
- [herolib Builder Documentation](../../lib/builder/readme.md)
- [Zero OS Documentation](https://manual.grid.tf/)
## License
This example follows the same license as herolib.

View File

@@ -5,7 +5,7 @@ import incubaid.herolib.schemas.openrpc
import os
// 1. Create a new server instance
mut server := heroserver.new(port: 8080, auth_enabled: false)!
mut server := heroserver.new(port: 8081, auth_enabled: false)!
// 2. Create and register your OpenRPC handlers
// These handlers must conform to the `openrpc.OpenRPCHandler` interface.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Comment Service",
"description": "A simple service for managing comments.",
"version": "1.0.0"
},
"methods": [
{
"name": "add_comment",
"summary": "Add a new comment",
"params": [
{
"name": "text",
"description": "The content of the comment.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "comment_id",
"description": "The ID of the newly created comment.",
"schema": {
"type": "string"
}
}
},
{
"name": "get_comment",
"summary": "Get a comment by ID",
"description": "Retrieves a specific comment using its unique identifier.",
"params": [
{
"name": "id",
"description": "The unique identifier of the comment to retrieve.",
"required": true,
"schema": {
"type": "number",
"example": "1"
}
},
{
"name": "include_metadata",
"description": "Whether to include metadata in the response.",
"required": false,
"schema": {
"type": "boolean",
"example": true
}
}
],
"result": {
"name": "comment",
"description": "The requested comment object.",
"schema": {
"type": "object",
"example": {
"id": 1,
"text": "This is a sample comment",
"created_at": "2024-01-15T10:30:00Z"
}
}
}
}
],
"components": {}
}

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.base.redis
println('=== Redis Installer Example ===\n')
// Create configuration
// You can customize port, datadir, and ipaddr as needed
config := redis.RedisInstall{
port: 6379 // Redis port
datadir: '/var/lib/redis' // Data directory (standard location)
ipaddr: 'localhost' // Bind address
}
// Check if Redis is already running
if redis.check(config) {
println('INFO: Redis is already running on port ${config.port}')
println(' To reinstall, stop Redis first: redis.stop()!')
} else {
// Install and start Redis
println('Installing and starting Redis...')
println(' Port: ${config.port}')
println(' Data directory: ${config.datadir}')
println(' Bind address: ${config.ipaddr}\n')
redis.redis_install(config)!
// Verify installation
if redis.check(config) {
println('\nSUCCESS: Redis installed and started successfully!')
println(' You can now connect to Redis on port ${config.port}')
println(' Test with: redis-cli ping')
} else {
println('\nERROR: Redis installation completed but failed to start')
println(' Check logs: journalctl -u redis-server -n 20')
}
}
println('\n=== Available Functions ===')
println(' redis.redis_install(config)! - Install and start Redis')
println(' redis.start(config)! - Start Redis')
println(' redis.stop()! - Stop Redis')
println(' redis.restart(config)! - Restart Redis')
println(' redis.check(config) - Check if running')
println('\nDone!')

View File

@@ -0,0 +1,209 @@
# Horus Installation Examples
This directory contains example scripts for installing and managing all Horus components using the herolib installer framework.
## Components
The Horus ecosystem consists of the following components:
1. **Coordinator** - Central coordination service (HTTP: 8081, WS: 9653)
2. **Supervisor** - Supervision and monitoring service (HTTP: 8082, WS: 9654)
3. **Hero Runner** - Command execution runner for Hero jobs
4. **Osiris Runner** - Database-backed runner
5. **SAL Runner** - System Abstraction Layer runner
## Quick Start
### Full Installation and Start
To install and start all Horus components:
```bash
# 1. Install all components (this will take several minutes)
./horus_full_install.vsh
# 2. Start all services
./horus_start_all.vsh
# 3. Check status
./horus_status.vsh
```
### Stop All Services
```bash
./horus_stop_all.vsh
```
## Available Scripts
### `horus_full_install.vsh`
Installs all Horus components:
- Checks and installs Redis if needed
- Checks and installs Rust if needed
- Clones the horus repository
- Builds all binaries from source
**Note:** This script can take 10-30 minutes depending on your system, as it compiles Rust code.
### `horus_start_all.vsh`
Starts all Horus services in the correct order:
1. Coordinator
2. Supervisor
3. Hero Runner
4. Osiris Runner
5. SAL Runner
### `horus_stop_all.vsh`
Stops all running Horus services in reverse order.
### `horus_status.vsh`
Checks and displays the status of all Horus services.
## Prerequisites
- **Operating System**: Linux or macOS
- **Dependencies** (automatically installed):
- Redis (required for all components)
- Rust toolchain (for building from source)
- Git (for cloning repositories)
## Configuration
All components use default configurations:
### Coordinator
- Binary: `/hero/var/bin/coordinator`
- HTTP Port: `8081`
- WebSocket Port: `9653`
- Redis: `127.0.0.1:6379`
### Supervisor
- Binary: `/hero/var/bin/supervisor`
- HTTP Port: `8082`
- WebSocket Port: `9654`
- Redis: `127.0.0.1:6379`
### Runners
- Hero Runner: `/hero/var/bin/herorunner`
- Osiris Runner: `/hero/var/bin/runner_osiris`
- SAL Runner: `/hero/var/bin/runner_sal`
## Custom Configuration
To customize the configuration, you can use heroscript:
```v
import incubaid.herolib.installers.horus.coordinator
mut coordinator := herocoordinator.get(create: true)!
coordinator.http_port = 9000
coordinator.ws_port = 9001
coordinator.log_level = 'debug'
herocoordinator.set(coordinator)!
coordinator.install()!
coordinator.start()!
```
## Testing
After starting the services, you can test them:
```bash
# Test Coordinator HTTP endpoint
curl http://127.0.0.1:8081
# Test Supervisor HTTP endpoint
curl http://127.0.0.1:8082
# Check running processes
pgrep -f coordinator
pgrep -f supervisor
pgrep -f herorunner
pgrep -f runner_osiris
pgrep -f runner_sal
```
## Troubleshooting
### Redis Not Running
If you get Redis connection errors:
```bash
# Check if Redis is running
redis-cli ping
# Start Redis (Ubuntu/Debian)
sudo systemctl start redis-server
# Start Redis (macOS with Homebrew)
brew services start redis
```
### Build Failures
If the build fails:
1. Ensure you have enough disk space (at least 5GB free)
2. Check that Rust is properly installed: `rustc --version`
3. Try cleaning the build: `cd /root/code/git.ourworld.tf/herocode/horus && cargo clean`
### Port Conflicts
If ports 8081 or 8082 are already in use, you can customize the ports in the configuration.
## Advanced Usage
### Individual Component Installation
You can install components individually:
```bash
# Install only coordinator
v run coordinator_only.vsh
# Install only supervisor
v run supervisor_only.vsh
```
### Using with Heroscript
You can also use heroscript files for configuration:
```heroscript
!!herocoordinator.configure
name:'production'
http_port:8081
ws_port:9653
log_level:'info'
!!herocoordinator.install
!!herocoordinator.start
```
## Service Management
Services are managed using the system's startup manager (zinit or systemd):
```bash
# Check service status with systemd
systemctl status coordinator
# View logs
journalctl -u coordinator -f
```
## Cleanup
To completely remove all Horus components:
```bash
# Stop all services
./horus_stop_all.vsh
# Destroy all components (removes binaries)
v run horus_destroy_all.vsh
```
## Support
For issues or questions:
- Check the main Horus repository: https://git.ourworld.tf/herocode/horus
- Review the installer code in `lib/installers/horus/`

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
// Example usage of coordinator installer
// This will:
// 1. Check if Rust is installed (installs if not present)
// 2. Clone the horus repository
// 3. Build the coordinator binary
//
// Note: Redis must be pre-installed and running before using the coordinator
println('Building coordinator from horus repository...')
println('(This will install Rust if not already installed)\n')
// Create coordinator instance
mut coord := coordinator.new()!
// Build and install
// Note: This will skip the build if the binary already exists
coord.install()!
// To force a rebuild even if binary exists, use:
// coord.install(reset: true)!
println('\nCoordinator built and installed successfully!')
println('Binary location: ${coord.binary_path}')
// Note: To start the service, uncomment the lines below
// (requires proper zinit or screen session setup and Redis running)
// coord.start()!
// if coord.running()! {
// println('Coordinator is running!')
// }
// coord.stop()!
// coord.destroy()!

View File

@@ -0,0 +1,60 @@
// Horus Configuration Heroscript
// This file demonstrates how to configure all Horus components using heroscript
// Configure Coordinator
!!coordinator.configure
name:'default'
binary_path:'/hero/var/bin/coordinator'
redis_addr:'127.0.0.1:6379'
http_port:8081
ws_port:9653
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Supervisor
!!supervisor.configure
name:'default'
binary_path:'/hero/var/bin/supervisor'
redis_addr:'127.0.0.1:6379'
http_port:8082
ws_port:9654
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Hero Runner
!!herorunner.configure
name:'default'
binary_path:'/hero/var/bin/herorunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Osiris Runner
!!osirisrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_osiris'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure SAL Runner
!!salrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_sal'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Install all components
!!herocoordinator.install
!!supervisor.install
!!herorunner.install
!!osirisrunner.install
!!salrunner.install
// Start all services
!!herocoordinator.start name:'default'
!!supervisor.start name:'default'
!!herorunner.start name:'default'
!!osirisrunner.start name:'default'
!!salrunner.start name:'default'

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Full Horus Installation Example
// This script installs and configures all Horus components:
// - Coordinator (port 8081)
// - Supervisor (port 8082)
// - Hero Runner
// - Osiris Runner
// - SAL Runner
println('🚀 Starting Full Horus Installation')
// Step 1: Install Coordinator
println('\n📦 Step 1/5: Installing Coordinator...')
mut coordinator_installer := coordinator.get(create: true)!
coordinator_installer.install()!
println(' Coordinator installed at ${coordinator_installer.binary_path}')
// Step 2: Install Supervisor
println('\n📦 Step 2/5: Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed at ${supervisor_inst.binary_path}')
// Step 3: Install Hero Runner
println('\n📦 Step 3/5: Installing Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.install()!
println(' Hero Runner installed at ${hero_runner.binary_path}')
// Step 4: Install Osiris Runner
println('\n📦 Step 4/5: Installing Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.install()!
println(' Osiris Runner installed at ${osiris_runner.binary_path}')
// Step 5: Install SAL Runner
println('\n📦 Step 5/5: Installing SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.install()!
println(' SAL Runner installed at ${sal_runner.binary_path}')
println('🎉 All Horus components installed successfully!')
println('\n📋 Installation Summary:')
println(' Coordinator: ${coordinator_installer.binary_path} (HTTP: ${coordinator_installer.http_port}, WS: ${coordinator_installer.ws_port})')
println(' Supervisor: ${supervisor_inst.binary_path} (HTTP: ${supervisor_inst.http_port}, WS: ${supervisor_inst.ws_port})')
println(' Hero Runner: ${hero_runner.binary_path}')
println(' Osiris Runner: ${osiris_runner.binary_path}')
println(' SAL Runner: ${sal_runner.binary_path}')
println('\n💡 Next Steps:')
println(' To start services, run: ./horus_start_all.vsh')
println(' To test individual components, see the other example scripts')

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import time
// Start All Horus Services
// This script starts all Horus components in the correct order
println('🚀 Starting All Horus Services')
// Step 1: Start Coordinator
println('\n Step 1/5: Starting Coordinator...')
mut coordinator_installer := coordinator.get(name: 'ayman', create: true)!
coordinator_installer.start()!
if coordinator_installer.running()! {
println(' Coordinator is running on HTTP:${coordinator_installer.http_port} WS:${coordinator_installer.ws_port}')
} else {
println(' Coordinator failed to start')
}
// Step 2: Start Supervisor
println('\n Step 2/5: Starting Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on HTTP:${supervisor_inst.http_port} WS:${supervisor_inst.ws_port}')
} else {
println(' Supervisor failed to start')
}
// Step 3: Start Hero Runner
println('\n Step 3/5: Starting Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.start()!
if hero_runner.running()! {
println(' Hero Runner is running')
} else {
println(' Hero Runner failed to start')
}
// Step 4: Start Osiris Runner
println('\n Step 4/5: Starting Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.start()!
if osiris_runner.running()! {
println(' Osiris Runner is running')
} else {
println(' Osiris Runner failed to start')
}
// Step 5: Start SAL Runner
println('\n Step 5/5: Starting SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.start()!
if sal_runner.running()! {
println(' SAL Runner is running')
} else {
println(' SAL Runner failed to start')
}
println('🎉 All Horus services started!')
println('\n📊 Service Status:')
coordinator_status := if coordinator_installer.running()! { ' Running' } else { ' Stopped' }
println(' Coordinator: ${coordinator_status} (http://127.0.0.1:${coordinator_installer.http_port})')
supervisor_status := if supervisor_inst.running()! { ' Running' } else { ' Stopped' }
println(' Supervisor: ${supervisor_status} (http://127.0.0.1:${supervisor_inst.http_port})')
hero_runner_status := if hero_runner.running()! { ' Running' } else { ' Stopped' }
println(' Hero Runner: ${hero_runner_status}')
osiris_runner_status := if osiris_runner.running()! { ' Running' } else { ' Stopped' }
println(' Osiris Runner: ${osiris_runner_status}')
sal_runner_status := if sal_runner.running()! { ' Running' } else { ' Stopped' }
println(' SAL Runner: ${sal_runner_status}')
println('\n💡 Next Steps:')
println(' To stop services, run: ./horus_stop_all.vsh')
println(' To check status, run: ./horus_status.vsh')

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Check Status of All Horus Services
println('📊 Horus Services Status')
println('=' * 60)
// Get all services
mut coordinator := herocoordinator.get()!
mut supervisor_inst := supervisor.get()!
mut hero_runner := herorunner.get()!
mut osiris_runner := osirisrunner.get()!
mut sal_runner := salrunner.get()!
// Check status
println('\n🔍 Checking service status...\n')
coord_running := coordinator.running()!
super_running := supervisor_inst.running()!
hero_running := hero_runner.running()!
osiris_running := osiris_runner.running()!
sal_running := sal_runner.running()!
println('Service Status Details')
println('-' * 60)
println('Coordinator ${if coord_running { " Running" } else { " Stopped" }} http://127.0.0.1:${coordinator.http_port}')
println('Supervisor ${if super_running { " Running" } else { " Stopped" }} http://127.0.0.1:${supervisor_inst.http_port}')
println('Hero Runner ${if hero_running { " Running" } else { " Stopped" }}')
println('Osiris Runner ${if osiris_running { " Running" } else { " Stopped" }}')
println('SAL Runner ${if sal_running { " Running" } else { " Stopped" }}')
println('\n' + '=' * 60)
// Count running services
mut running_count := 0
if coord_running { running_count++ }
if super_running { running_count++ }
if hero_running { running_count++ }
if osiris_running { running_count++ }
if sal_running { running_count++ }
println('Summary: ${running_count}/5 services running')
if running_count == 5 {
println('🎉 All services are running!')
} else if running_count == 0 {
println('💤 All services are stopped')
} else {
println(' Some services are not running')
}

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Stop All Horus Services
// This script stops all running Horus components
println('🛑 Stopping All Horus Services')
println('=' * 60)
// Stop in reverse order
println('\n Stopping SAL Runner...')
mut sal_runner := salrunner.get()!
sal_runner.stop()!
println(' SAL Runner stopped')
println('\n Stopping Osiris Runner...')
mut osiris_runner := osirisrunner.get()!
osiris_runner.stop()!
println(' Osiris Runner stopped')
println('\n Stopping Hero Runner...')
mut hero_runner := herorunner.get()!
hero_runner.stop()!
println(' Hero Runner stopped')
println('\n Stopping Supervisor...')
mut supervisor_inst := supervisor.get()!
supervisor_inst.stop()!
println(' Supervisor stopped')
println('\n Stopping Coordinator...')
mut coordinator := herocoordinator.get()!
coordinator.stop()!
println(' Coordinator stopped')
println('\n' + '=' * 60)
println(' All Horus services stopped!')
println('=' * 60)

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
// Quick Start Example - Install and Start Coordinator and Supervisor
// This is a minimal example to get started with Horus
println('🚀 Horus Quick Start')
println('=' * 60)
println('This will install and start Coordinator and Supervisor')
println('(Runners can be added later using the full install script)')
println('=' * 60)
// Install Coordinator
println('\n📦 Installing Coordinator...')
mut coordinator := herocoordinator.get(create: true)!
coordinator.install()!
println(' Coordinator installed')
// Install Supervisor
println('\n📦 Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed')
// Start services
println('\n Starting Coordinator...')
coordinator.start()!
if coordinator.running()! {
println(' Coordinator is running on http://127.0.0.1:${coordinator.http_port}')
}
println('\n Starting Supervisor...')
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on http://127.0.0.1:${supervisor_inst.http_port}')
}
println('\n' + '=' * 60)
println('🎉 Quick Start Complete!')
println('=' * 60)
println('\n📊 Services Running:')
println(' Coordinator: http://127.0.0.1:${coordinator.http_port}')
println(' Supervisor: http://127.0.0.1:${supervisor_inst.http_port}')
println('\n💡 Next Steps:')
println(' Test coordinator: curl http://127.0.0.1:${coordinator.http_port}')
println(' Test supervisor: curl http://127.0.0.1:${supervisor_inst.http_port}')
println(' Install runners: ./horus_full_install.vsh')
println(' Check status: ./horus_status.vsh')
println(' Stop services: ./horus_stop_all.vsh')

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.virt.crun_installer
mut crun := crun_installer.get()!
// To install
crun.install()!
// To remove
crun.destroy()!