Merge branch 'development' into development_heropods

This commit is contained in:
Mahmoud-Emad
2025-11-23 13:06:50 +02:00
75 changed files with 11421 additions and 389 deletions

47
compare_dirs.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Usage: ./compare_dirs.sh <branch1> <branch2> <dir_path>
# Example: ./compare_dirs.sh main feature-branch src
if [ "$#" -ne 3 ]; then
echo "Usage: $0 <branch1> <branch2> <dir_path>"
exit 1
fi
BRANCH1=$1
BRANCH2=$2
DIR_PATH=$3
TMP_DIR1=$(mktemp -d)
TMP_DIR2=$(mktemp -d)
# Ensure we're in a Git repo
if ! git rev-parse --is-inside-work-tree > /dev/null 2>&1; then
echo "Error: Not inside a Git repository"
exit 1
fi
# Fetch branch contents without switching branches
git worktree add "$TMP_DIR1" "$BRANCH1" > /dev/null 2>&1
git worktree add "$TMP_DIR2" "$BRANCH2" > /dev/null 2>&1
# Check if the directory exists in both branches
if [ ! -d "$TMP_DIR1/$DIR_PATH" ]; then
echo "Error: $DIR_PATH does not exist in $BRANCH1"
exit 1
fi
if [ ! -d "$TMP_DIR2/$DIR_PATH" ]; then
echo "Error: $DIR_PATH does not exist in $BRANCH2"
exit 1
fi
# Compare directories
echo "Comparing $DIR_PATH between $BRANCH1 and $BRANCH2..."
diff -qr "$TMP_DIR1/$DIR_PATH" "$TMP_DIR2/$DIR_PATH"
# Detailed differences
diff -u -r "$TMP_DIR1/$DIR_PATH" "$TMP_DIR2/$DIR_PATH"
# Clean up temporary worktrees
git worktree remove "$TMP_DIR1" --force
git worktree remove "$TMP_DIR2" --force

391
examples/builder/zosbuilder.vsh Executable file
View File

@@ -0,0 +1,391 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.builder
import incubaid.herolib.core.pathlib
// Configuration for the remote builder
// Update these values for your remote machine
const remote_host = 'root@65.109.31.171' // Change to your remote host
const remote_port = 22 // SSH port
// Build configuration
const build_dir = '/root/zosbuilder'
const repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Set to true to upload kernel to S3
const upload_kernel = false
fn main() {
println('=== Zero OS Builder - Remote Build System ===\n')
// Initialize builder
mut b := builder.new() or {
eprintln('Failed to initialize builder: ${err}')
exit(1)
}
// Connect to remote node
println('Connecting to remote builder: ${remote_host}:${remote_port}')
mut node := b.node_new(
ipaddr: '${remote_host}:${remote_port}'
name: 'zosbuilder'
) or {
eprintln('Failed to connect to remote node: ${err}')
exit(1)
}
// Run the build process
build_zos(mut node) or {
eprintln('Build failed: ${err}')
exit(1)
}
println('\n=== Build completed successfully! ===')
}
fn build_zos(mut node builder.Node) ! {
println('\n--- Step 1: Installing prerequisites ---')
install_prerequisites(mut node)!
println('\n--- Step 2: Cloning zosbuilder repository ---')
clone_repository(mut node)!
println('\n--- Step 3: Creating RFS configuration ---')
create_rfs_config(mut node)!
println('\n--- Step 4: Running build ---')
run_build(mut node)!
println('\n--- Step 5: Checking build artifacts ---')
check_artifacts(mut node)!
println('\n=== Build completed successfully! ===')
}
fn install_prerequisites(mut node builder.Node) ! {
println('Detecting platform...')
// Check platform type
if node.platform == .ubuntu {
println('Installing Ubuntu/Debian prerequisites...')
// Update package list and install all required packages
node.exec_cmd(
cmd: '
apt-get update
apt-get install -y \\
build-essential \\
upx-ucl \\
binutils \\
git \\
wget \\
curl \\
qemu-system-x86 \\
podman \\
musl-tools \\
cpio \\
xz-utils \\
bc \\
flex \\
bison \\
libelf-dev \\
libssl-dev
# Install rustup and Rust toolchain
if ! command -v rustup &> /dev/null; then
echo "Installing rustup..."
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
source "\$HOME/.cargo/env"
fi
# Add Rust musl target
source "\$HOME/.cargo/env"
rustup target add x86_64-unknown-linux-musl
'
name: 'install_ubuntu_packages'
reset: true
)!
} else if node.platform == .alpine {
println('Installing Alpine prerequisites...')
node.exec_cmd(
cmd: '
apk add --no-cache \\
build-base \\
rust \\
cargo \\
upx \\
git \\
wget \\
qemu-system-x86 \\
podman
# Add Rust musl target
rustup target add x86_64-unknown-linux-musl || echo "rustup not available"
'
name: 'install_alpine_packages'
reset: true
)!
} else {
return error('Unsupported platform: ${node.platform}. Only Ubuntu/Debian and Alpine are supported.')
}
println('Prerequisites installed successfully')
}
fn clone_repository(mut node builder.Node) ! {
// Clean up disk space first
println('Cleaning up disk space...')
node.exec_cmd(
cmd: '
# Remove old build directories if they exist
rm -rf ${build_dir} || true
# Clean up podman/docker cache to free space
podman system prune -af || true
# Clean up package manager cache
if command -v apt-get &> /dev/null; then
apt-get clean || true
fi
# Show disk space
df -h /
'
name: 'cleanup_disk_space'
stdout: true
)!
// Clone the repository
println('Cloning from ${repo_url}...')
node.exec_cmd(
cmd: '
git clone ${repo_url} ${build_dir}
cd ${build_dir}
git log -1 --oneline
'
name: 'clone_zosbuilder'
stdout: true
)!
println('Repository cloned successfully')
}
fn create_rfs_config(mut node builder.Node) ! {
println('Creating config/rfs.conf...')
rfs_config := 'S3_ENDPOINT="http://wizenoze.grid.tf:3900"
S3_REGION="garage"
S3_BUCKET="zos"
S3_PREFIX="store"
S3_ACCESS_KEY="<put key here>"
S3_SECRET_KEY="<put key here>"
WEB_ENDPOINT=""
MANIFESTS_SUBPATH="flists"
READ_ACCESS_KEY="<put key here>"
READ_SECRET_KEY="<put key here>"
ROUTE_ENDPOINT="http://wizenoze.grid.tf:3900"
ROUTE_PATH="/zos/store"
ROUTE_REGION="garage"
KEEP_S3_FALLBACK="false"
UPLOAD_MANIFESTS="true"
'
// Create config directory if it doesn't exist
node.exec_cmd(
cmd: 'mkdir -p ${build_dir}/config'
name: 'create_config_dir'
stdout: false
)!
// Write the RFS configuration file
node.file_write('${build_dir}/config/rfs.conf', rfs_config)!
// Verify the file was created
result := node.exec(
cmd: 'cat ${build_dir}/config/rfs.conf'
stdout: false
)!
println('RFS configuration created successfully')
println('Config preview:')
println(result)
// Skip youki component by removing it from sources.conf
println('\nRemoving youki from sources.conf (requires SSH keys)...')
node.exec_cmd(
cmd: '
# Remove any line containing youki from sources.conf
grep -v "youki" ${build_dir}/config/sources.conf > ${build_dir}/config/sources.conf.tmp
mv ${build_dir}/config/sources.conf.tmp ${build_dir}/config/sources.conf
# Verify it was removed
echo "Updated sources.conf:"
cat ${build_dir}/config/sources.conf
'
name: 'remove_youki'
stdout: true
)!
println('youki component skipped')
}
fn run_build(mut node builder.Node) ! {
println('Starting build process...')
println('This may take 15-30 minutes depending on your system...')
println('Status updates will be printed every 2 minutes...\n')
// Check disk space before building
println('Checking disk space...')
disk_info := node.exec(
cmd: 'df -h ${build_dir}'
stdout: false
)!
println(disk_info)
// Clean up any previous build artifacts and corrupted databases
println('Cleaning up previous build artifacts...')
node.exec_cmd(
cmd: '
cd ${build_dir}
# Remove dist directory to clean up any corrupted databases
rm -rf dist/
# Clean up any temporary files
rm -rf /tmp/rfs-* || true
# Show available disk space after cleanup
df -h ${build_dir}
'
name: 'cleanup_before_build'
stdout: true
)!
// Make scripts executable and run build with periodic status messages
mut build_cmd := '
cd ${build_dir}
# Source Rust environment
source "\$HOME/.cargo/env"
# Make scripts executable
chmod +x scripts/build.sh scripts/clean.sh
# Set environment variables
export UPLOAD_KERNEL=${upload_kernel}
export UPLOAD_MANIFESTS=false
# Create a wrapper script that prints status every 2 minutes
cat > /tmp/build_with_status.sh << "EOF"
#!/bin/bash
set -e
# Source Rust environment
source "\$HOME/.cargo/env"
# Start the build in background
./scripts/build.sh &
BUILD_PID=\$!
# Print status every 2 minutes while build is running
COUNTER=0
while kill -0 \$BUILD_PID 2>/dev/null; do
sleep 120
COUNTER=\$((COUNTER + 2))
echo ""
echo "=== Build still in progress... (\${COUNTER} minutes elapsed) ==="
echo ""
done
# Wait for build to complete and get exit code
wait \$BUILD_PID
EXIT_CODE=\$?
if [ \$EXIT_CODE -eq 0 ]; then
echo ""
echo "=== Build completed successfully after \${COUNTER} minutes ==="
else
echo ""
echo "=== Build failed after \${COUNTER} minutes with exit code \$EXIT_CODE ==="
fi
exit \$EXIT_CODE
EOF
chmod +x /tmp/build_with_status.sh
/tmp/build_with_status.sh
' // Execute build with output
result := node.exec_cmd(
cmd: build_cmd
name: 'zos_build'
stdout: true
reset: true
period: 0 // Don't cache, always rebuild
)!
println('\nBuild completed!')
println(result)
}
fn check_artifacts(mut node builder.Node) ! {
println('Checking build artifacts in ${build_dir}/dist/...')
// List the dist directory
result := node.exec(
cmd: 'ls -lh ${build_dir}/dist/'
stdout: true
)!
println('\nBuild artifacts:')
println(result)
// Check for expected files
vmlinuz_exists := node.file_exists('${build_dir}/dist/vmlinuz.efi')
initramfs_exists := node.file_exists('${build_dir}/dist/initramfs.cpio.xz')
if vmlinuz_exists && initramfs_exists {
println('\n Build artifacts created successfully:')
println(' - vmlinuz.efi (Kernel with embedded initramfs)')
println(' - initramfs.cpio.xz (Standalone initramfs archive)')
// Get file sizes
size_info := node.exec(
cmd: 'du -h ${build_dir}/dist/vmlinuz.efi ${build_dir}/dist/initramfs.cpio.xz'
stdout: false
)!
println('\nFile sizes:')
println(size_info)
} else {
return error('Build artifacts not found. Build may have failed.')
}
}
// Download artifacts to local machine
fn download_artifacts(mut node builder.Node, local_dest string) ! {
println('Downloading artifacts to local machine...')
mut dest_path := pathlib.get_dir(path: local_dest, create: true)!
println('Downloading to ${dest_path.path}...')
// Download the entire dist directory
node.download(
source: '${build_dir}/dist/'
dest: dest_path.path
)!
println('\n Artifacts downloaded successfully to ${dest_path.path}')
// List downloaded files
println('\nDownloaded files:')
result := node.exec(
cmd: 'ls -lh ${dest_path.path}'
stdout: false
) or {
println('Could not list local files')
return
}
println(result)
}

View File

@@ -0,0 +1,224 @@
# Zero OS Builder - Remote Build System
This example demonstrates how to build [Zero OS (zosbuilder)](https://git.ourworld.tf/tfgrid/zosbuilder) on a remote machine using the herolib builder module.
## Overview
The zosbuilder creates a Zero OS Alpine Initramfs with:
- Alpine Linux 3.22 base
- Custom kernel with embedded initramfs
- ThreeFold components (zinit, rfs, mycelium, zosstorage)
- Optimized size with UPX compression
- Two-stage module loading
## Prerequisites
### Local Machine
- V compiler installed
- SSH access to a remote build machine
- herolib installed
### Remote Build Machine
The script will automatically install these on the remote machine:
- **Ubuntu/Debian**: build-essential, rustc, cargo, upx-ucl, binutils, git, wget, qemu-system-x86, podman, musl-tools
- **Alpine Linux**: build-base, rust, cargo, upx, git, wget, qemu-system-x86, podman
- Rust musl target (x86_64-unknown-linux-musl)
## Configuration
Edit the constants in `zosbuilder.vsh`:
```v
const (
// Remote machine connection
remote_host = 'root@195.192.213.2' // Your remote host
remote_port = 22 // SSH port
// Build configuration
build_dir = '/root/zosbuilder' // Build directory on remote
repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Upload kernel to S3
upload_kernel = false
)
```
## Usage
### Basic Build
```bash
# Make the script executable
chmod +x zosbuilder.vsh
# Run the build
./zosbuilder.vsh
```
### What the Script Does
1. **Connects to Remote Machine**: Establishes SSH connection to the build server
2. **Installs Prerequisites**: Automatically installs all required build tools
3. **Clones Repository**: Fetches the latest zosbuilder code
4. **Runs Build**: Executes the build process (takes 15-30 minutes)
5. **Verifies Artifacts**: Checks that build outputs were created successfully
### Build Output
The build creates two main artifacts in `${build_dir}/dist/`:
- `vmlinuz.efi` - Kernel with embedded initramfs (bootable)
- `initramfs.cpio.xz` - Standalone initramfs archive
## Build Process Details
The zosbuilder follows these phases:
### Phase 1: Environment Setup
- Creates build directories
- Installs build dependencies
- Sets up Rust musl target
### Phase 2: Alpine Base
- Downloads Alpine 3.22 miniroot
- Extracts to initramfs directory
- Installs packages from config/packages.list
### Phase 3: Component Building
- Builds zinit (init system)
- Builds rfs (remote filesystem)
- Builds mycelium (networking)
- Builds zosstorage (storage orchestration)
### Phase 4: System Configuration
- Replaces /sbin/init with zinit
- Copies zinit configuration
- Sets up 2-stage module loading
- Configures system services
### Phase 5: Optimization
- Removes docs, man pages, locales
- Strips executables and libraries
- UPX compresses all binaries
- Aggressive cleanup
### Phase 6: Packaging
- Creates initramfs.cpio.xz with XZ compression
- Builds kernel with embedded initramfs
- Generates vmlinuz.efi
- Optionally uploads to S3
## Advanced Usage
### Download Artifacts to Local Machine
Add this to your script after the build completes:
```v
// Download artifacts to local machine
download_artifacts(mut node, '/tmp/zos-artifacts') or {
eprintln('Failed to download artifacts: ${err}')
}
```
### Custom Build Configuration
You can modify the build by editing files on the remote machine before building:
```v
// After cloning, before building
node.file_write('${build_dir}/config/packages.list', 'your custom packages')!
```
### Rebuild Without Re-cloning
To rebuild without re-cloning the repository, modify the script to skip the clone step:
```v
// Comment out the clone_repository call
// clone_repository(mut node)!
// Or just run the build directly
node.exec_cmd(
cmd: 'cd ${build_dir} && ./scripts/build.sh'
name: 'zos_rebuild'
)!
```
## Testing the Build
After building, you can test the kernel with QEMU:
```bash
# On the remote machine
cd /root/zosbuilder
./scripts/test-qemu.sh
```
## Troubleshooting
### Build Fails
1. Check the build output for specific errors
2. Verify all prerequisites are installed
3. Ensure sufficient disk space (at least 5GB)
4. Check internet connectivity for downloading components
### SSH Connection Issues
1. Verify SSH access: `ssh root@195.192.213.2`
2. Check SSH key authentication is set up
3. Verify the remote host and port are correct
### Missing Dependencies
The script automatically installs dependencies, but if manual installation is needed:
**Ubuntu/Debian:**
```bash
sudo apt-get update
sudo apt-get install -y build-essential rustc cargo upx-ucl binutils git wget qemu-system-x86 podman musl-tools
rustup target add x86_64-unknown-linux-musl
```
**Alpine Linux:**
```bash
apk add --no-cache build-base rust cargo upx git wget qemu-system-x86 podman
rustup target add x86_64-unknown-linux-musl
```
## Integration with CI/CD
This builder can be integrated into CI/CD pipelines:
```v
// Example: Build and upload to artifact storage
fn ci_build() ! {
mut b := builder.new()!
mut node := b.node_new(ipaddr: '${ci_builder_host}')!
build_zos(mut node)!
// Upload to artifact storage
node.exec_cmd(
cmd: 's3cmd put ${build_dir}/dist/* s3://artifacts/zos/'
name: 'upload_artifacts'
)!
}
```
## Related Examples
- `simple.vsh` - Basic builder usage
- `remote_executor/` - Remote code execution
- `simple_ip4.vsh` - IPv4 connection example
- `simple_ip6.vsh` - IPv6 connection example
## References
- [zosbuilder Repository](https://git.ourworld.tf/tfgrid/zosbuilder)
- [herolib Builder Documentation](../../lib/builder/readme.md)
- [Zero OS Documentation](https://manual.grid.tf/)
## License
This example follows the same license as herolib.

View File

@@ -5,7 +5,7 @@ import incubaid.herolib.schemas.openrpc
import os
// 1. Create a new server instance
mut server := heroserver.new(port: 8080, auth_enabled: false)!
mut server := heroserver.new(port: 8081, auth_enabled: false)!
// 2. Create and register your OpenRPC handlers
// These handlers must conform to the `openrpc.OpenRPCHandler` interface.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Comment Service",
"description": "A simple service for managing comments.",
"version": "1.0.0"
},
"methods": [
{
"name": "add_comment",
"summary": "Add a new comment",
"params": [
{
"name": "text",
"description": "The content of the comment.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "comment_id",
"description": "The ID of the newly created comment.",
"schema": {
"type": "string"
}
}
},
{
"name": "get_comment",
"summary": "Get a comment by ID",
"description": "Retrieves a specific comment using its unique identifier.",
"params": [
{
"name": "id",
"description": "The unique identifier of the comment to retrieve.",
"required": true,
"schema": {
"type": "number",
"example": "1"
}
},
{
"name": "include_metadata",
"description": "Whether to include metadata in the response.",
"required": false,
"schema": {
"type": "boolean",
"example": true
}
}
],
"result": {
"name": "comment",
"description": "The requested comment object.",
"schema": {
"type": "object",
"example": {
"id": 1,
"text": "This is a sample comment",
"created_at": "2024-01-15T10:30:00Z"
}
}
}
}
],
"components": {}
}

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.base.redis
println('=== Redis Installer Example ===\n')
// Create configuration
// You can customize port, datadir, and ipaddr as needed
config := redis.RedisInstall{
port: 6379 // Redis port
datadir: '/var/lib/redis' // Data directory (standard location)
ipaddr: 'localhost' // Bind address
}
// Check if Redis is already running
if redis.check(config) {
println('INFO: Redis is already running on port ${config.port}')
println(' To reinstall, stop Redis first: redis.stop()!')
} else {
// Install and start Redis
println('Installing and starting Redis...')
println(' Port: ${config.port}')
println(' Data directory: ${config.datadir}')
println(' Bind address: ${config.ipaddr}\n')
redis.redis_install(config)!
// Verify installation
if redis.check(config) {
println('\nSUCCESS: Redis installed and started successfully!')
println(' You can now connect to Redis on port ${config.port}')
println(' Test with: redis-cli ping')
} else {
println('\nERROR: Redis installation completed but failed to start')
println(' Check logs: journalctl -u redis-server -n 20')
}
}
println('\n=== Available Functions ===')
println(' redis.redis_install(config)! - Install and start Redis')
println(' redis.start(config)! - Start Redis')
println(' redis.stop()! - Stop Redis')
println(' redis.restart(config)! - Restart Redis')
println(' redis.check(config) - Check if running')
println('\nDone!')

View File

@@ -0,0 +1,209 @@
# Horus Installation Examples
This directory contains example scripts for installing and managing all Horus components using the herolib installer framework.
## Components
The Horus ecosystem consists of the following components:
1. **Coordinator** - Central coordination service (HTTP: 8081, WS: 9653)
2. **Supervisor** - Supervision and monitoring service (HTTP: 8082, WS: 9654)
3. **Hero Runner** - Command execution runner for Hero jobs
4. **Osiris Runner** - Database-backed runner
5. **SAL Runner** - System Abstraction Layer runner
## Quick Start
### Full Installation and Start
To install and start all Horus components:
```bash
# 1. Install all components (this will take several minutes)
./horus_full_install.vsh
# 2. Start all services
./horus_start_all.vsh
# 3. Check status
./horus_status.vsh
```
### Stop All Services
```bash
./horus_stop_all.vsh
```
## Available Scripts
### `horus_full_install.vsh`
Installs all Horus components:
- Checks and installs Redis if needed
- Checks and installs Rust if needed
- Clones the horus repository
- Builds all binaries from source
**Note:** This script can take 10-30 minutes depending on your system, as it compiles Rust code.
### `horus_start_all.vsh`
Starts all Horus services in the correct order:
1. Coordinator
2. Supervisor
3. Hero Runner
4. Osiris Runner
5. SAL Runner
### `horus_stop_all.vsh`
Stops all running Horus services in reverse order.
### `horus_status.vsh`
Checks and displays the status of all Horus services.
## Prerequisites
- **Operating System**: Linux or macOS
- **Dependencies** (automatically installed):
- Redis (required for all components)
- Rust toolchain (for building from source)
- Git (for cloning repositories)
## Configuration
All components use default configurations:
### Coordinator
- Binary: `/hero/var/bin/coordinator`
- HTTP Port: `8081`
- WebSocket Port: `9653`
- Redis: `127.0.0.1:6379`
### Supervisor
- Binary: `/hero/var/bin/supervisor`
- HTTP Port: `8082`
- WebSocket Port: `9654`
- Redis: `127.0.0.1:6379`
### Runners
- Hero Runner: `/hero/var/bin/herorunner`
- Osiris Runner: `/hero/var/bin/runner_osiris`
- SAL Runner: `/hero/var/bin/runner_sal`
## Custom Configuration
To customize the configuration, you can use heroscript:
```v
import incubaid.herolib.installers.horus.coordinator
mut coordinator := herocoordinator.get(create: true)!
coordinator.http_port = 9000
coordinator.ws_port = 9001
coordinator.log_level = 'debug'
herocoordinator.set(coordinator)!
coordinator.install()!
coordinator.start()!
```
## Testing
After starting the services, you can test them:
```bash
# Test Coordinator HTTP endpoint
curl http://127.0.0.1:8081
# Test Supervisor HTTP endpoint
curl http://127.0.0.1:8082
# Check running processes
pgrep -f coordinator
pgrep -f supervisor
pgrep -f herorunner
pgrep -f runner_osiris
pgrep -f runner_sal
```
## Troubleshooting
### Redis Not Running
If you get Redis connection errors:
```bash
# Check if Redis is running
redis-cli ping
# Start Redis (Ubuntu/Debian)
sudo systemctl start redis-server
# Start Redis (macOS with Homebrew)
brew services start redis
```
### Build Failures
If the build fails:
1. Ensure you have enough disk space (at least 5GB free)
2. Check that Rust is properly installed: `rustc --version`
3. Try cleaning the build: `cd /root/code/git.ourworld.tf/herocode/horus && cargo clean`
### Port Conflicts
If ports 8081 or 8082 are already in use, you can customize the ports in the configuration.
## Advanced Usage
### Individual Component Installation
You can install components individually:
```bash
# Install only coordinator
v run coordinator_only.vsh
# Install only supervisor
v run supervisor_only.vsh
```
### Using with Heroscript
You can also use heroscript files for configuration:
```heroscript
!!herocoordinator.configure
name:'production'
http_port:8081
ws_port:9653
log_level:'info'
!!herocoordinator.install
!!herocoordinator.start
```
## Service Management
Services are managed using the system's startup manager (zinit or systemd):
```bash
# Check service status with systemd
systemctl status coordinator
# View logs
journalctl -u coordinator -f
```
## Cleanup
To completely remove all Horus components:
```bash
# Stop all services
./horus_stop_all.vsh
# Destroy all components (removes binaries)
v run horus_destroy_all.vsh
```
## Support
For issues or questions:
- Check the main Horus repository: https://git.ourworld.tf/herocode/horus
- Review the installer code in `lib/installers/horus/`

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
// Example usage of coordinator installer
// This will:
// 1. Check if Rust is installed (installs if not present)
// 2. Clone the horus repository
// 3. Build the coordinator binary
//
// Note: Redis must be pre-installed and running before using the coordinator
println('Building coordinator from horus repository...')
println('(This will install Rust if not already installed)\n')
// Create coordinator instance
mut coord := coordinator.new()!
// Build and install
// Note: This will skip the build if the binary already exists
coord.install()!
// To force a rebuild even if binary exists, use:
// coord.install(reset: true)!
println('\nCoordinator built and installed successfully!')
println('Binary location: ${coord.binary_path}')
// Note: To start the service, uncomment the lines below
// (requires proper zinit or screen session setup and Redis running)
// coord.start()!
// if coord.running()! {
// println('Coordinator is running!')
// }
// coord.stop()!
// coord.destroy()!

View File

@@ -0,0 +1,60 @@
// Horus Configuration Heroscript
// This file demonstrates how to configure all Horus components using heroscript
// Configure Coordinator
!!coordinator.configure
name:'default'
binary_path:'/hero/var/bin/coordinator'
redis_addr:'127.0.0.1:6379'
http_port:8081
ws_port:9653
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Supervisor
!!supervisor.configure
name:'default'
binary_path:'/hero/var/bin/supervisor'
redis_addr:'127.0.0.1:6379'
http_port:8082
ws_port:9654
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Hero Runner
!!herorunner.configure
name:'default'
binary_path:'/hero/var/bin/herorunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Osiris Runner
!!osirisrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_osiris'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure SAL Runner
!!salrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_sal'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Install all components
!!herocoordinator.install
!!supervisor.install
!!herorunner.install
!!osirisrunner.install
!!salrunner.install
// Start all services
!!herocoordinator.start name:'default'
!!supervisor.start name:'default'
!!herorunner.start name:'default'
!!osirisrunner.start name:'default'
!!salrunner.start name:'default'

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Full Horus Installation Example
// This script installs and configures all Horus components:
// - Coordinator (port 8081)
// - Supervisor (port 8082)
// - Hero Runner
// - Osiris Runner
// - SAL Runner
println('🚀 Starting Full Horus Installation')
// Step 1: Install Coordinator
println('\n📦 Step 1/5: Installing Coordinator...')
mut coordinator_installer := coordinator.get(create: true)!
coordinator_installer.install()!
println(' Coordinator installed at ${coordinator_installer.binary_path}')
// Step 2: Install Supervisor
println('\n📦 Step 2/5: Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed at ${supervisor_inst.binary_path}')
// Step 3: Install Hero Runner
println('\n📦 Step 3/5: Installing Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.install()!
println(' Hero Runner installed at ${hero_runner.binary_path}')
// Step 4: Install Osiris Runner
println('\n📦 Step 4/5: Installing Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.install()!
println(' Osiris Runner installed at ${osiris_runner.binary_path}')
// Step 5: Install SAL Runner
println('\n📦 Step 5/5: Installing SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.install()!
println(' SAL Runner installed at ${sal_runner.binary_path}')
println('🎉 All Horus components installed successfully!')
println('\n📋 Installation Summary:')
println(' Coordinator: ${coordinator_installer.binary_path} (HTTP: ${coordinator_installer.http_port}, WS: ${coordinator_installer.ws_port})')
println(' Supervisor: ${supervisor_inst.binary_path} (HTTP: ${supervisor_inst.http_port}, WS: ${supervisor_inst.ws_port})')
println(' Hero Runner: ${hero_runner.binary_path}')
println(' Osiris Runner: ${osiris_runner.binary_path}')
println(' SAL Runner: ${sal_runner.binary_path}')
println('\n💡 Next Steps:')
println(' To start services, run: ./horus_start_all.vsh')
println(' To test individual components, see the other example scripts')

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import time
// Start All Horus Services
// This script starts all Horus components in the correct order
println('🚀 Starting All Horus Services')
// Step 1: Start Coordinator
println('\n Step 1/5: Starting Coordinator...')
mut coordinator_installer := coordinator.get(name: 'ayman', create: true)!
coordinator_installer.start()!
if coordinator_installer.running()! {
println(' Coordinator is running on HTTP:${coordinator_installer.http_port} WS:${coordinator_installer.ws_port}')
} else {
println(' Coordinator failed to start')
}
// Step 2: Start Supervisor
println('\n Step 2/5: Starting Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on HTTP:${supervisor_inst.http_port} WS:${supervisor_inst.ws_port}')
} else {
println(' Supervisor failed to start')
}
// Step 3: Start Hero Runner
println('\n Step 3/5: Starting Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.start()!
if hero_runner.running()! {
println(' Hero Runner is running')
} else {
println(' Hero Runner failed to start')
}
// Step 4: Start Osiris Runner
println('\n Step 4/5: Starting Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.start()!
if osiris_runner.running()! {
println(' Osiris Runner is running')
} else {
println(' Osiris Runner failed to start')
}
// Step 5: Start SAL Runner
println('\n Step 5/5: Starting SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.start()!
if sal_runner.running()! {
println(' SAL Runner is running')
} else {
println(' SAL Runner failed to start')
}
println('🎉 All Horus services started!')
println('\n📊 Service Status:')
coordinator_status := if coordinator_installer.running()! { ' Running' } else { ' Stopped' }
println(' Coordinator: ${coordinator_status} (http://127.0.0.1:${coordinator_installer.http_port})')
supervisor_status := if supervisor_inst.running()! { ' Running' } else { ' Stopped' }
println(' Supervisor: ${supervisor_status} (http://127.0.0.1:${supervisor_inst.http_port})')
hero_runner_status := if hero_runner.running()! { ' Running' } else { ' Stopped' }
println(' Hero Runner: ${hero_runner_status}')
osiris_runner_status := if osiris_runner.running()! { ' Running' } else { ' Stopped' }
println(' Osiris Runner: ${osiris_runner_status}')
sal_runner_status := if sal_runner.running()! { ' Running' } else { ' Stopped' }
println(' SAL Runner: ${sal_runner_status}')
println('\n💡 Next Steps:')
println(' To stop services, run: ./horus_stop_all.vsh')
println(' To check status, run: ./horus_status.vsh')

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Check Status of All Horus Services
println('📊 Horus Services Status')
println('=' * 60)
// Get all services
mut coordinator := herocoordinator.get()!
mut supervisor_inst := supervisor.get()!
mut hero_runner := herorunner.get()!
mut osiris_runner := osirisrunner.get()!
mut sal_runner := salrunner.get()!
// Check status
println('\n🔍 Checking service status...\n')
coord_running := coordinator.running()!
super_running := supervisor_inst.running()!
hero_running := hero_runner.running()!
osiris_running := osiris_runner.running()!
sal_running := sal_runner.running()!
println('Service Status Details')
println('-' * 60)
println('Coordinator ${if coord_running { " Running" } else { " Stopped" }} http://127.0.0.1:${coordinator.http_port}')
println('Supervisor ${if super_running { " Running" } else { " Stopped" }} http://127.0.0.1:${supervisor_inst.http_port}')
println('Hero Runner ${if hero_running { " Running" } else { " Stopped" }}')
println('Osiris Runner ${if osiris_running { " Running" } else { " Stopped" }}')
println('SAL Runner ${if sal_running { " Running" } else { " Stopped" }}')
println('\n' + '=' * 60)
// Count running services
mut running_count := 0
if coord_running { running_count++ }
if super_running { running_count++ }
if hero_running { running_count++ }
if osiris_running { running_count++ }
if sal_running { running_count++ }
println('Summary: ${running_count}/5 services running')
if running_count == 5 {
println('🎉 All services are running!')
} else if running_count == 0 {
println('💤 All services are stopped')
} else {
println(' Some services are not running')
}

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Stop All Horus Services
// This script stops all running Horus components
println('🛑 Stopping All Horus Services')
println('=' * 60)
// Stop in reverse order
println('\n Stopping SAL Runner...')
mut sal_runner := salrunner.get()!
sal_runner.stop()!
println(' SAL Runner stopped')
println('\n Stopping Osiris Runner...')
mut osiris_runner := osirisrunner.get()!
osiris_runner.stop()!
println(' Osiris Runner stopped')
println('\n Stopping Hero Runner...')
mut hero_runner := herorunner.get()!
hero_runner.stop()!
println(' Hero Runner stopped')
println('\n Stopping Supervisor...')
mut supervisor_inst := supervisor.get()!
supervisor_inst.stop()!
println(' Supervisor stopped')
println('\n Stopping Coordinator...')
mut coordinator := herocoordinator.get()!
coordinator.stop()!
println(' Coordinator stopped')
println('\n' + '=' * 60)
println(' All Horus services stopped!')
println('=' * 60)

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
// Quick Start Example - Install and Start Coordinator and Supervisor
// This is a minimal example to get started with Horus
println('🚀 Horus Quick Start')
println('=' * 60)
println('This will install and start Coordinator and Supervisor')
println('(Runners can be added later using the full install script)')
println('=' * 60)
// Install Coordinator
println('\n📦 Installing Coordinator...')
mut coordinator := herocoordinator.get(create: true)!
coordinator.install()!
println(' Coordinator installed')
// Install Supervisor
println('\n📦 Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed')
// Start services
println('\n Starting Coordinator...')
coordinator.start()!
if coordinator.running()! {
println(' Coordinator is running on http://127.0.0.1:${coordinator.http_port}')
}
println('\n Starting Supervisor...')
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on http://127.0.0.1:${supervisor_inst.http_port}')
}
println('\n' + '=' * 60)
println('🎉 Quick Start Complete!')
println('=' * 60)
println('\n📊 Services Running:')
println(' Coordinator: http://127.0.0.1:${coordinator.http_port}')
println(' Supervisor: http://127.0.0.1:${supervisor_inst.http_port}')
println('\n💡 Next Steps:')
println(' Test coordinator: curl http://127.0.0.1:${coordinator.http_port}')
println(' Test supervisor: curl http://127.0.0.1:${supervisor_inst.http_port}')
println(' Install runners: ./horus_full_install.vsh')
println(' Check status: ./horus_status.vsh')
println(' Stop services: ./horus_stop_all.vsh')

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.virt.crun_installer
mut crun := crun_installer.get()!
// To install
crun.install()!
// To remove
crun.destroy()!

View File

@@ -17,7 +17,7 @@ pub mut:
pub fn (b BizModel) export(args ExportArgs) ! {
name := if args.name != '' { args.name } else { texttools.snake_case(args.title) }
path := pathlib.get_dir(
path: os.join_path(os.home_dir(), '/hero/var/bizmodel/exports/${name}')
path: os.join_path(os.home_dir(), 'hero/var/bizmodel/exports/${name}')
create: true
empty: true
)!

View File

@@ -20,8 +20,7 @@ import incubaid.herolib.installers.lang.python
import os
@if args.startupmanager
fn startupcmd () ![]startupmanager.ZProcessNewArgs{
mut installer := get()!
fn (self &${args.classname}) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// res << startupmanager.ZProcessNewArgs{
@@ -36,8 +35,7 @@ fn startupcmd () ![]startupmanager.ZProcessNewArgs{
}
fn running() !bool {
mut installer := get()!
fn (self &${args.classname}) running_check() !bool {
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// this checks health of ${args.name}
// curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
@@ -58,19 +56,19 @@ fn running() !bool {
return false
}
fn start_pre()!{
fn (self &${args.classname}) start_pre() ! {
}
fn start_post()!{
fn (self &${args.classname}) start_post() ! {
}
fn stop_pre()!{
fn (self &${args.classname}) stop_pre() ! {
}
fn stop_post()!{
fn (self &${args.classname}) stop_post() ! {
}
@@ -80,7 +78,7 @@ fn stop_post()!{
@if args.cat == .installer
// checks if a certain version or above is installed
fn installed() !bool {
fn (self &${args.classname}) installed() !bool {
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// res := os.execute('??{osal.profile_path_source_and()!} ${args.name} version')
// if res.exit_code != 0 {
@@ -111,7 +109,14 @@ fn upload() ! {
}
fn install() ! {
^^[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self ${args.classname}) install(args InstallArgs) ! {
console.print_header('install ${args.name}')
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// mut url := ''
@@ -143,7 +148,7 @@ fn install() ! {
}
@if args.build
fn build() ! {
fn (mut self ${args.classname}) build() ! {
//url := 'https://github.com/threefoldtech/${args.name}'
// make sure we install base on the node
@@ -174,7 +179,8 @@ fn build() ! {
}
@end
fn destroy() ! {
fn (mut self ${args.classname}) destroy() ! {
self.stop()!
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("zinit")!

View File

@@ -169,29 +169,34 @@ pub fn play(mut plbook PlayBook) ! {
@if args.cat == .installer
mut other_actions := plbook.find(filter: '${args.name}.')!
for mut other_action in other_actions {
@if args.startupmanager
if other_action.name in ["destroy","install","build","start","stop","restart","start_pre","start_post","stop_pre","stop_post"]{
@else
if other_action.name in ["destroy","install","build"]{
@end
mut p := other_action.params
name := p.get_default('name', 'default')!
reset:=p.get_default_false("reset")
mut ${args.name}_obj:=get(name:name)!
console.print_debug("action object:\n??{${args.name}_obj}")
if other_action.name == "destroy" || reset{
console.print_debug("install action ${args.name}.destroy")
destroy()!
${args.name}_obj.destroy()!
}
if other_action.name == "install"{
console.print_debug("install action ${args.name}.install")
install()!
${args.name}_obj.install(reset: reset)!
}
if other_action.name == "build"{
console.print_debug("install action ${args.name}.build")
${args.name}_obj.build()!
}
@if args.startupmanager
if other_action.name in ["start","stop","restart"]{
mut p := other_action.params
name := p.get('name')!
mut ${args.name}_obj:=get(name:name)!
console.print_debug("action object:\n??{${args.name}_obj}")
if other_action.name == "start"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.start()!
}
if other_action.name == "stop"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.stop()!
@@ -200,8 +205,24 @@ pub fn play(mut plbook PlayBook) ! {
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.restart()!
}
if other_action.name == "start_pre"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.start_pre()!
}
if other_action.name == "start_post"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.start_post()!
}
if other_action.name == "stop_pre"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.stop_pre()!
}
if other_action.name == "stop_post"{
console.print_debug("install action ${args.name}.??{other_action.name}")
${args.name}_obj.stop_post()!
}
@end
}
other_action.done = true
}
@end
@@ -262,15 +283,13 @@ pub fn (mut self ${args.classname}) start() ! {
console.print_header('installer: ${args.name} start')
if ! installed()!{
install()!
if ! self.installed()!{
self.install()!
}
configure()!
self.start_pre()!
start_pre()!
for zprocess in startupcmd()!{
for zprocess in self.startupcmd()!{
mut sm:=startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: ${args.name} starting with ??{zprocess.startuptype}...')
@@ -280,7 +299,7 @@ pub fn (mut self ${args.classname}) start() ! {
sm.start(zprocess.name)!
}
start_post()!
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
@@ -300,12 +319,12 @@ pub fn (mut self ${args.classname}) install_start(args InstallArgs) ! {
pub fn (mut self ${args.classname}) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()!{
self.stop_pre()!
for zprocess in self.startupcmd()!{
mut sm:=startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
self.stop_post()!
}
pub fn (mut self ${args.classname}) restart() ! {
@@ -318,7 +337,7 @@ pub fn (mut self ${args.classname}) running() !bool {
switch(self.name)
//walk over the generic processes, if not running return
for zprocess in startupcmd()!{
for zprocess in self.startupcmd()!{
if zprocess.startuptype != .screen{
mut sm:=startupmanager_get(zprocess.startuptype)!
r:=sm.running(zprocess.name)!
@@ -327,37 +346,10 @@ pub fn (mut self ${args.classname}) running() !bool {
}
}
}
return running()!
return self.running_check()!
}
@end
@@[params]
pub struct InstallArgs{
pub mut:
reset bool
}
pub fn (mut self ${args.classname}) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
@if args.build
pub fn (mut self ${args.classname}) build() ! {
switch(self.name)
build()!
}
@end
pub fn (mut self ${args.classname}) destroy() ! {
switch(self.name)
@if args.startupmanager
self.stop() or {}
@end
destroy()!
}
@end

View File

@@ -14,6 +14,11 @@ import incubaid.herolib.osal.tmux
import incubaid.herolib.installers.base
import incubaid.herolib.installers.lang.vlang
import incubaid.herolib.installers.lang.herolib
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import incubaid.herolib.installers.virt.podman
import incubaid.herolib.installers.infra.gitea
import incubaid.herolib.builder
@@ -79,6 +84,13 @@ pub fn run(args_ PlayArgs) ! {
giteaclient.play(mut plbook)!
// Horus
coordinator.play(mut plbook)!
supervisor.play(mut plbook)!
herorunner.play(mut plbook)!
osirisrunner.play(mut plbook)!
salrunner.play(mut plbook)!
if args.emptycheck {
// Ensure we did not leave any actions unprocessed
plbook.empty_check()!

View File

@@ -21,6 +21,11 @@ import incubaid.herolib.clients.zerodb_client
import incubaid.herolib.clients.zinit
import incubaid.herolib.develop.heroprompt
import incubaid.herolib.installers.db.meilisearch_installer
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import incubaid.herolib.installers.infra.coredns
import incubaid.herolib.installers.infra.gitea
import incubaid.herolib.installers.infra.livekit
@@ -38,7 +43,6 @@ import incubaid.herolib.installers.sysadmintools.garage_s3
import incubaid.herolib.installers.threefold.griddriver
import incubaid.herolib.installers.virt.cloudhypervisor
import incubaid.herolib.installers.virt.docker
import incubaid.herolib.installers.virt.herorunner
import incubaid.herolib.installers.virt.kubernetes_installer
import incubaid.herolib.installers.virt.lima
import incubaid.herolib.installers.virt.pacman
@@ -109,4 +113,9 @@ pub fn run_all(args_ PlayArgs) ! {
zola.play(mut plbook)!
hetznermanager.play(mut plbook)!
kubernetes.play(mut plbook)!
coordinator.play(mut plbook)!
supervisor.play(mut plbook)!
herorunner.play(mut plbook)!
osirisrunner.play(mut plbook)!
salrunner.play(mut plbook)!
}

View File

@@ -192,6 +192,11 @@ fn test_prd_list() ! {
mut db_prd := DBPrd{
db: &mydb
}
// Clear any existing PRDs before running the test
existing_prds := db_prd.list()!
for prd in existing_prds {
db_prd.delete(prd.id)!
}
// Create multiple PRDs
for i in 0 .. 3 {

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'RedisInstall'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:0

View File

@@ -0,0 +1,119 @@
# Redis Installer
A modular Redis installer that works across multiple platforms (Ubuntu, Debian, Alpine, Arch, macOS, containers).
## Features
- Cross-platform support (systemd and non-systemd systems)
- Automatic package installation via package managers
- Configurable data directory, port, and IP address
- Smart startup (uses systemctl when available, falls back to direct start)
- No circular dependencies (works without Redis being pre-installed)
## Quick Start
### Simple Installation
```v
import incubaid.herolib.installers.base.redis
// Create configuration
config := redis.RedisInstall{
port: 6379
datadir: '/var/lib/redis'
ipaddr: 'localhost'
}
// Install and start Redis
redis.redis_install(config)!
// Check if running
if redis.check(config) {
println('Redis is running!')
}
```
### Using Individual Functions
```v
import incubaid.herolib.installers.base.redis
config := redis.RedisInstall{
port: 6379
datadir: '/var/lib/redis'
ipaddr: 'localhost'
}
// Install package only (doesn't start)
redis.redis_install(config)!
// Start Redis
redis.start(config)!
// Stop Redis
redis.stop()!
// Restart Redis
redis.restart(config)!
// Check if running
is_running := redis.check(config)
```
## Configuration Options
```v
pub struct RedisInstall {
pub mut:
name string = 'default' // Instance name
port int = 6379 // Redis port
datadir string = '/var/lib/redis' // Data directory
ipaddr string = 'localhost' // Bind address (space-separated for multiple)
}
```
## Platform Support
| Platform | Package Manager | Startup Method |
|----------|----------------|----------------|
| Ubuntu/Debian | apt (redis-server) | systemctl |
| Alpine | apk (redis) | direct start |
| Arch | pacman (redis) | systemctl |
| Fedora | dnf (redis) | systemctl |
| macOS | brew (redis) | direct start |
| Containers | varies | direct start |
## Using with Factory (Advanced)
For applications that need Redis state management:
```v
import incubaid.herolib.installers.base.redis
// Create and store in factory
mut installer := redis.new(name: 'myredis')!
// Install and start
installer.install(reset: false)!
installer.start()!
// Check status
if installer.running()! {
println('Redis is running')
}
// Stop
installer.stop()!
```
## Example Script
See `examples/installers/base/redis.vsh` for a complete working example.
## Notes
- Default data directory is `/var/lib/redis` (standard location)
- On systemd systems, uses the package's systemd service
- On non-systemd systems, starts Redis directly with `--daemonize yes`
- Automatically handles permissions for the Redis user
- Config file location: `/etc/redis/redis.conf` (Linux) or `${datadir}/redis.conf` (macOS)

View File

@@ -0,0 +1,209 @@
module redis
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.core
import time
import os
fn startupcmd() ![]startupmanager.ZProcessNewArgs {
mut cfg := get()!
mut res := []startupmanager.ZProcessNewArgs{}
res << startupmanager.ZProcessNewArgs{
name: 'redis'
cmd: 'redis-server ${configfilepath(cfg)}'
env: {
'HOME': os.home_dir()
}
}
return res
}
fn running() !bool {
mut cfg := get()!
res := os.execute('redis-cli -c -p ${cfg.port} ping > /dev/null 2>&1')
if res.exit_code == 0 {
return true
}
return false
}
fn start_pre() ! {
// Check if already running
if running()! {
return
}
mut cfg := get()!
// Ensure data directory exists with proper permissions before configuring
osal.execute_silent('mkdir -p ${cfg.datadir}')!
if core.is_linux()! {
// On Linux, ensure redis user can access the directory
osal.execute_silent('chown -R redis:redis ${cfg.datadir}')!
osal.execute_silent('chmod 755 ${cfg.datadir}')!
}
// Configure redis before starting (applies template)
configure()!
// Kill any existing redis processes
osal.process_kill_recursive(name: 'redis-server')!
// On macOS, start redis with daemonize (not via startupmanager)
if core.platform()! == .osx {
osal.exec(cmd: 'redis-server ${configfilepath(cfg)} --daemonize yes')!
}
}
fn start_post() ! {
// Wait for redis to be ready
for _ in 0 .. 100 {
if running()! {
console.print_debug('redis started.')
return
}
time.sleep(100)
}
return error("Redis did not start properly could not do:'redis-cli -c ping'")
}
fn stop_pre() ! {
osal.execute_silent('redis-cli shutdown') or {}
}
fn stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if redis-server is installed
fn installed() !bool {
return osal.cmd_exists_profile('redis-server')
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// installers.upload(
// cmdname: 'redis'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/redis'
// )!
}
// Install and start Redis with the given configuration
// This is the main entry point for installing Redis without using the factory
pub fn redis_install(args RedisInstall) ! {
// Check if already running
if check(args) {
console.print_debug('Redis already running on port ${args.port}')
return
}
console.print_header('install redis')
// Install Redis package if not already installed
if !installed()! {
if core.is_linux()! {
osal.package_install('redis-server')! // Ubuntu/Debian
} else {
osal.package_install('redis')! // macOS, Alpine, Arch, etc.
}
}
// Create data directory with correct permissions
osal.execute_silent('mkdir -p ${args.datadir}')!
osal.execute_silent('chown -R redis:redis ${args.datadir}') or {}
osal.execute_silent('chmod 755 ${args.datadir}') or {}
// Configure and start Redis
start(args)!
}
// Check if Redis is running
pub fn check(args RedisInstall) bool {
res := os.execute('redis-cli -c -p ${args.port} ping > /dev/null 2>&1')
if res.exit_code == 0 {
return true
}
return false
}
// Start Redis with the given configuration
// Writes config file, kills any existing processes, and starts Redis
pub fn start(args RedisInstall) ! {
if check(args) {
console.print_debug('Redis already running on port ${args.port}')
return
}
// Write Redis configuration file
configure_with_args(args)!
// Kill any existing Redis processes (including package auto-started ones)
osal.process_kill_recursive(name: 'redis-server')!
if core.platform()! == .osx {
// macOS: start directly with daemonize
osal.exec(cmd: 'redis-server ${configfilepath(args)} --daemonize yes')!
} else {
// Linux: prefer systemctl if available, otherwise start directly
if osal.cmd_exists('systemctl') {
// Ensure permissions are correct for systemd-managed Redis
osal.execute_silent('chown -R redis:redis ${args.datadir}') or {}
osal.execute_silent('chmod 755 ${args.datadir}') or {}
// Reset any failed state from previous kills
osal.execute_silent('systemctl reset-failed redis-server') or {}
osal.exec(cmd: 'systemctl start redis-server')!
} else {
// No systemctl (Alpine, containers, etc.)
// Set permissions for redis user before starting
osal.execute_silent('chown -R redis:redis ${args.datadir}') or {}
osal.execute_silent('chmod 755 ${args.datadir}') or {}
osal.exec(cmd: 'redis-server ${configfilepath(args)} --daemonize yes')!
}
}
// Wait for Redis to be ready
for _ in 0 .. 100 {
if check(args) {
console.print_debug('Redis started successfully')
return
}
time.sleep(100)
}
return error('Redis did not start properly after 10 seconds - could not ping on port ${args.port}')
}
// Stop Redis
pub fn stop() ! {
osal.execute_silent('redis-cli shutdown')!
}
// Restart Redis
pub fn restart(args RedisInstall) ! {
stop()!
time.sleep(500) // Give Redis time to shut down
start(args)!
}
// Private install function for factory-based usage
fn install() ! {
mut cfg := get()!
redis_install(cfg)!
}
fn destroy() ! {
stop()!
osal.process_kill_recursive(name: 'redis-server')!
}

View File

@@ -0,0 +1,307 @@
module redis
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
redis_global map[string]&RedisInstall
redis_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&RedisInstall {
mut obj := RedisInstall{
name: args.name
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&RedisInstall {
mut context := base.context()!
redis_default = args.name
if args.fromdb || args.name !in redis_global {
mut r := context.redis()!
if r.hexists('context:redis', args.name)! {
data := r.hget('context:redis', args.name)!
if data.len == 0 {
print_backtrace()
return error('RedisInstall with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(RedisInstall, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("RedisInstall with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return redis_global[args.name] or {
print_backtrace()
return error('could not get config for redis with name:${args.name}')
}
}
// register the config for the future
pub fn set(o RedisInstall) ! {
mut o2 := set_in_mem(o)!
redis_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:redis', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:redis', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:redis', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&RedisInstall {
mut res := []&RedisInstall{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
redis_global = map[string]&RedisInstall{}
redis_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:redis')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in redis_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o RedisInstall) !RedisInstall {
mut o2 := obj_init(o)!
redis_global[o2.name] = &o2
redis_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'redis.') {
return
}
mut install_actions := plbook.find(filter: 'redis.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'redis.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action redis.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action redis.install')
install()!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
mut p := other_action.params
name := p.get('name')!
mut redis_obj := get(name: name)!
console.print_debug('action object:\n${redis_obj}')
if other_action.name == 'start' {
console.print_debug('install action redis.${other_action.name}')
redis_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action redis.${other_action.name}')
redis_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action redis.${other_action.name}')
redis_obj.restart()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: redis' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: redis' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: redis' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: redis' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self RedisInstall) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self RedisInstall) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: redis start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: redis starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('redis did not install properly.')
}
pub fn (mut self RedisInstall) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self RedisInstall) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self RedisInstall) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self RedisInstall) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self RedisInstall) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self RedisInstall) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for redis
pub fn switch(name string) {
redis_default = name
}

View File

@@ -0,0 +1,68 @@
module redis
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
import incubaid.herolib.core
import os
pub const version = '7.0.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct RedisInstall {
pub mut:
name string = 'default'
port int = 6379
datadir string = '/var/lib/redis'
ipaddr string = 'localhost' // can be more than 1, space separated
}
// your checking & initialization code if needed
fn obj_init(mycfg_ RedisInstall) !RedisInstall {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.port == 0 {
mycfg.port = 6379
}
if mycfg.datadir == '' {
mycfg.datadir = '/var/lib/redis'
}
if mycfg.ipaddr == '' {
mycfg.ipaddr = 'localhost'
}
return mycfg
}
fn configfilepath(args RedisInstall) string {
if core.is_linux() or { panic(err) } {
return '/etc/redis/redis.conf'
} else {
return '${args.datadir}/redis.conf'
}
}
// Configure with args passed directly (like old installer)
fn configure_with_args(args RedisInstall) ! {
// Use V's template macro like the old installer
c := $tmpl('templates/redis_config.conf')
pathlib.template_write(c, configfilepath(args), true)!
}
// called before start if done (uses factory)
fn configure() ! {
mut args := get()!
configure_with_args(args)!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_loads(heroscript string) !RedisInstall {
mut obj := encoderhero.decode[RedisInstall](heroscript)!
return obj
}

View File

@@ -0,0 +1,5 @@
name: ${cfg.configpath}

File diff suppressed because it is too large Load Diff

View File

@@ -414,7 +414,7 @@ proc-title-template "{title} {listen-addr} {server-mode}"
# Set the local environment which is used for string comparison operations, and
# also affect the performance of Lua scripts. Empty String indicates the locale
# is derived from the environment variables.
locale-collate ""
# locale-collate "" # Not supported in Redis 7.0.15
################################ SNAPSHOTTING ################################
@@ -1973,14 +1973,14 @@ set-max-intset-entries 512
# data structure when they have a small number of entries, and the biggest entry
# does not exceed a given threshold. These thresholds can be configured using
# the following directives.
set-max-listpack-entries 128
set-max-listpack-value 64
# set-max-listpack-entries 128 # Not supported in Redis 7.0.15
# set-max-listpack-value 64 # Not supported in Redis 7.0.15
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-listpack-entries 128
zset-max-listpack-value 64
# zset-max-listpack-entries 128 # Not supported in Redis 7.0.15
# zset-max-listpack-value 64 # Not supported in Redis 7.0.15
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When a HyperLogLog using the sparse representation crosses

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'Coordinator'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,235 @@
module coordinator
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.installers.lang.rust
import incubaid.herolib.develop.gittools
import os
fn (self &Coordinator) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
res << startupmanager.ZProcessNewArgs{
name: 'coordinator'
cmd: '${self.binary_path} --redis-addr ${self.redis_addr} --api-http-port ${self.http_port} --api-ws-port ${self.ws_port}'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
'RUST_LOG_STYLE': 'never'
}
}
return res
}
fn (self &Coordinator) running_check() !bool {
// Check if the process is running by checking the HTTP port
// The coordinator returns 405 for GET requests (requires POST), so we check if we get any response
res := osal.exec(
cmd: 'curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${self.http_port}'
stdout: false
raise_error: false
)!
// Any HTTP response code (including 405) means the server is running
return res.output.len > 0 && res.output.int() > 0
}
fn (self &Coordinator) start_pre() ! {
}
fn (self &Coordinator) start_post() ! {
}
fn (self &Coordinator) stop_pre() ! {
}
fn (self &Coordinator) stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn (self &Coordinator) installed() !bool {
// Check if the binary exists
mut binary := pathlib.get(self.binary_path)
if !binary.exists() {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// installers.upload(
// cmdname: 'coordinator'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/coordinator'
// )!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self Coordinator) install(args InstallArgs) ! {
console.print_header('install coordinator')
// For coordinator, we build from source instead of downloading
self.build()!
}
// Public function to build coordinator without requiring factory/redis
pub fn build_coordinator() ! {
console.print_header('build coordinator')
println('📦 Starting coordinator build process...\n')
// Use default config instead of getting from factory
println(' Initializing configuration...')
mut cfg := Coordinator{}
println(' Configuration initialized')
println(' - Binary path: ${cfg.binary_path}')
println(' - Redis address: ${cfg.redis_addr}')
println(' - HTTP port: ${cfg.http_port}')
println(' - WS port: ${cfg.ws_port}\n')
// Ensure rust is installed
println('Step 1/3: Checking Rust dependency...')
if !osal.cmd_exists('rustc') {
println('Rust not found, installing...')
mut rust_installer := rust.get()!
rust_installer.install()!
println('Rust installed successfully\n')
} else {
res := osal.exec(cmd: 'rustc --version', stdout: false, raise_error: false)!
println('Rust is already installed: ${res.output.trim_space()}\n')
}
// Clone or get the repository
println('Step 2/3: Cloning/updating horus repository...')
// Use the configured repo_path or default coderoot
mut gs := gittools.new(coderoot: '/root/code')!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
cfg.repo_path = repo.path()
println(' Repository ready at: ${cfg.repo_path}\n')
// Build the coordinator binary from the horus workspace
println('Step 3/3: Building coordinator binary...')
println('WARNING: This may take several minutes (compiling Rust code)...')
println('Running: cargo build -p hero-coordinator --release\n')
cmd := 'cd ${cfg.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p hero-coordinator --release'
osal.execute_stdout(cmd)!
println('\n Build completed successfully')
// Ensure binary directory exists and copy the binary
println('📁 Preparing binary directory: ${cfg.binary_path}')
mut binary_path_obj := pathlib.get(cfg.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${cfg.repo_path}/target/release/coordinator'
println('📋 Copying binary from: ${source_binary}')
println('📋 Copying binary to: ${cfg.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: cfg.binary_path, rsync: false)!
println('\n🎉 Coordinator built successfully!')
println('📍 Binary location: ${cfg.binary_path}')
}
fn (mut self Coordinator) build() ! {
console.print_header('build coordinator')
println('Building coordinator binary from ${self}')
// Ensure Redis is installed and running (required for coordinator)
console.print_debug('Checking if Redis is installed and running...')
redis_check := osal.exec(cmd: 'redis-cli -c -p 6379 ping', stdout: false, raise_error: false)!
if redis_check.exit_code != 0 {
console.print_header('Redis is not running, checking if installed...')
if !osal.cmd_exists_profile('redis-server') {
console.print_header('Installing Redis...')
osal.package_install('redis-server')!
}
console.print_header('Starting Redis...')
osal.exec(cmd: 'systemctl start redis-server')!
console.print_debug('Redis started successfully')
} else {
console.print_debug('Redis is already running')
}
// Ensure rust is installed
console.print_debug('Checking if Rust is installed...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
console.print_header('Installing Rust first...')
rust_installer.install()!
} else {
console.print_debug('Rust is already installed: ${res.output.trim_space()}')
}
// Clone or get the repository
console.print_debug('Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
self.repo_path = repo.path()
set(self)!
console.print_debug('Repository path: ${self.repo_path}')
// Build the coordinator binary from the horus workspace
console.print_header('Building coordinator binary (this may take several minutes ${self.repo_path})...')
console.print_debug('Running: cargo build -p hero-coordinator --release')
console.print_debug('Build output:')
cmd := 'cd ${self.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p hero-coordinator --release'
osal.execute_stdout(cmd)!
console.print_debug('Build completed successfully')
// Ensure binary directory exists and copy the binary
console.print_header('Preparing binary directory: ${self.binary_path}')
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${self.repo_path}/target/release/coordinator'
console.print_debug('Copying binary from: ${source_binary}')
console.print_debug('Copying binary to: ${self.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: self.binary_path, rsync: false)!
console.print_header('coordinator built successfully at ${self.binary_path}')
}
fn (mut self Coordinator) destroy() ! {
self.stop()!
osal.process_kill_recursive(name: 'coordinator')!
// Remove the built binary
osal.rm(self.binary_path)!
}

View File

@@ -0,0 +1,325 @@
module coordinator
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
coordinator_global map[string]&Coordinator
coordinator_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
binary_path string
redis_addr string
http_port int
ws_port int
log_level string
repo_path string
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&Coordinator {
mut obj := Coordinator{
name: args.name
binary_path: args.binary_path
redis_addr: args.redis_addr
http_port: args.http_port
ws_port: args.ws_port
log_level: args.log_level
repo_path: args.repo_path
}
// Try to set in Redis, if it fails (Redis not available), use in-memory config
set(obj) or {
console.print_debug('Redis not available, using in-memory configuration')
set_in_mem(obj)!
}
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&Coordinator {
mut context := base.context()!
coordinator_default = args.name
if args.fromdb || args.name !in coordinator_global {
mut r := context.redis()!
if r.hexists('context:coordinator', args.name)! {
data := r.hget('context:coordinator', args.name)!
if data.len == 0 {
print_backtrace()
return error('Coordinator with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(Coordinator, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("Coordinator with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return coordinator_global[args.name] or {
print_backtrace()
return error('could not get config for coordinator with name:${args.name}')
}
}
// register the config for the future
pub fn set(o Coordinator) ! {
mut o2 := set_in_mem(o)!
coordinator_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:coordinator', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:coordinator', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:coordinator', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&Coordinator {
mut res := []&Coordinator{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
coordinator_global = map[string]&Coordinator{}
coordinator_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:coordinator')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in coordinator_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o Coordinator) !Coordinator {
mut o2 := obj_init(o)!
coordinator_global[o2.name] = &o2
coordinator_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'coordinator.') {
return
}
mut install_actions := plbook.find(filter: 'coordinator.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'coordinator.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut coordinator_obj := get(name: name, create: true)!
console.print_debug('action object:\n${coordinator_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action coordinator.destroy')
coordinator_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action coordinator.install')
coordinator_obj.install(reset: reset)!
}
if other_action.name == 'build' {
console.print_debug('install action coordinator.build')
coordinator_obj.build()!
}
if other_action.name == 'start' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.restart()!
}
if other_action.name == 'start_pre' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.start_pre()!
}
if other_action.name == 'start_post' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.start_post()!
}
if other_action.name == 'stop_pre' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.stop_pre()!
}
if other_action.name == 'stop_post' {
console.print_debug('install action coordinator.${other_action.name}')
coordinator_obj.stop_post()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: coordinator' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: coordinator' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: coordinator' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
// default to zinit
console.print_debug("installer: coordinator' startupmanager get auto")
return startupmanager.get(.zinit)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Coordinator) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Coordinator) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: coordinator start')
if !self.installed()! {
self.install()!
}
self.configure()!
self.start_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: coordinator starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('coordinator did not install properly.')
}
pub fn (mut self Coordinator) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Coordinator) stop() ! {
switch(self.name)
self.stop_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
self.stop_post()!
}
pub fn (mut self Coordinator) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Coordinator) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return self.running_check()!
}
// switch instance to be used for coordinator
pub fn switch(name string) {
coordinator_default = name
}

View File

@@ -0,0 +1,69 @@
module coordinator
import os
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
const version = '0.1.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct Coordinator {
pub mut:
name string = 'default'
binary_path string = os.join_path(os.home_dir(), 'hero/bin/coordinator')
redis_addr string = '127.0.0.1:6379'
http_port int = 8081
ws_port int = 9653
log_level string = 'info'
repo_path string = '/root/code/git.ourworld.tf/herocode/horus'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ Coordinator) !Coordinator {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.binary_path == '' {
mycfg.binary_path = os.join_path(os.home_dir(), 'hero/bin/coordinator')
}
if mycfg.redis_addr == '' {
mycfg.redis_addr = '127.0.0.1:6379'
}
if mycfg.http_port == 0 {
mycfg.http_port = 8081
}
if mycfg.ws_port == 0 {
mycfg.ws_port = 9653
}
if mycfg.log_level == '' {
mycfg.log_level = 'info'
}
if mycfg.repo_path == '' {
mycfg.repo_path = '/root/code/git.ourworld.tf/herocode/horus'
}
return mycfg
}
// called before start if done
fn (self &Coordinator) configure() ! {
// Ensure the binary directory exists
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj Coordinator) !string {
return encoderhero.encode[Coordinator](obj)!
}
pub fn heroscript_loads(heroscript string) !Coordinator {
mut obj := encoderhero.decode[Coordinator](heroscript)!
return obj
}

View File

@@ -0,0 +1,166 @@
# Coordinator Installer
A V language installer module for building and managing the Coordinator service. This installer handles the complete lifecycle of the Coordinator binary from the Horus workspace.
## Features
- **Automatic Rust Installation**: Installs Rust toolchain if not present
- **Git Repository Management**: Clones and manages the horus repository
- **Binary Building**: Compiles the coordinator binary from the horus workspace
- **Service Management**: Start/stop/restart via zinit
- **Configuration**: Customizable Redis, HTTP, and WebSocket ports
## Quick Start
### Using the Example Script
```bash
cd /root/code/github/incubaid/herolib/examples/installers/horus
./coordinator.vsh
```
### Manual Usage
```v
import incubaid.herolib.installers.horus.coordinator as coordinator_installer
mut coordinator := coordinator_installer.get()!
coordinator.install()!
coordinator.start()!
```
## Configuration
```bash
!!coordinator.configure
name:'default'
binary_path:'/hero/var/bin/coordinator'
redis_addr:'127.0.0.1:6379'
redis_port:6379
http_port:8081
ws_port:9653
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
```
### Configuration Fields
- **name**: Instance name (default: 'default')
- **binary_path**: Path where the coordinator binary will be installed (default: '/hero/var/bin/coordinator')
- **redis_addr**: Redis server address (default: '127.0.0.1:6379')
- **redis_port**: Redis server port (default: 6379)
- **http_port**: HTTP API port (default: 8081)
- **ws_port**: WebSocket API port (default: 9653)
- **log_level**: Rust log level - trace, debug, info, warn, error (default: 'info')
- **repo_path**: Path to clone the horus repository (default: '/root/code/git.ourworld.tf/herocode/horus')
## Commands
### Install
Builds the coordinator binary from the horus workspace. This will:
1. Check if Rust is installed (installs if not present)
2. Clone the horus repository from git.ourworld.tf
3. Build the coordinator binary with `cargo build -p hero-coordinator --release`
**Note**: The installer skips the build if the binary already exists at the configured path.
```bash
hero coordinator.install
```
### Force Reinstall
To force a rebuild even if the binary already exists, use the `reset` flag:
```v
import incubaid.herolib.installers.horus.coordinator as coordinator_installer
mut coordinator := coordinator_installer.get()!
coordinator.install(reset: true)! // Force reinstall
```
Or manually delete the binary before running install:
```bash
rm /hero/var/bin/coordinator
hero coordinator.install
```
### Start
Starts the coordinator service using zinit:
```bash
hero coordinator.start
```
### Stop
Stops the running service:
```bash
hero coordinator.stop
```
### Restart
Restarts the service:
```bash
hero coordinator.restart
```
### Destroy
Stops the service and removes all files:
```bash
hero coordinator.destroy
```
## Requirements
- **Dependencies**:
- Rust toolchain (automatically installed if not present)
- Git (for cloning repository)
- Redis (must be pre-installed and running)
- Mycelium (must be installed and running separately)
## Architecture
The installer follows the standard herolib installer pattern:
- **coordinator_model.v**: Configuration structure and initialization
- **coordinator_actions.v**: Build, install, start, stop, destroy logic
- **coordinator_factory_.v**: Factory pattern for instance management
## Notes
- The installer builds from source rather than downloading pre-built binaries
- **Redis must be pre-installed and running** - the installer does not install Redis
- The installer checks if the binary already exists and skips rebuild unless `reset: true` is used
- Rust is automatically installed if not present (checks for `rustc` command)
- The binary is built with `RUSTFLAGS="-A warnings"` to suppress warnings
- Service management uses zinit by default
## Example Workflow
```v
import incubaid.herolib.installers.horus.coordinator as hc
// Get installer instance
mut coordinator := hc.get()!
// Customize configuration
coordinator.redis_addr = '127.0.0.1:6379'
coordinator.redis_port = 6379
coordinator.http_port = 8081
coordinator.log_level = 'debug'
hc.set(coordinator)!
// Build and start
coordinator.install()!
coordinator.start()!
// Check status
if coordinator.running()! {
println('Coordinator is running on port ${coordinator.http_port}')
}
// Later: cleanup
coordinator.destroy()!

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'Herorunner'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,146 @@
module herorunner
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.installers.lang.rust
import incubaid.herolib.develop.gittools
import os
fn (self &Herorunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'herorunner'
cmd: '${self.binary_path} --redis-url ${redis_url} 12001'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
'RUST_LOG_STYLE': 'never'
}
}
return res
}
fn (self &Herorunner) running_check() !bool {
// Check if the process is running
res := osal.exec(cmd: 'pgrep -f herorunner', stdout: false, raise_error: false)!
return res.exit_code == 0
}
fn (self &Herorunner) start_pre() ! {
}
fn (self &Herorunner) start_post() ! {
}
fn (self &Herorunner) stop_pre() ! {
}
fn (self &Herorunner) stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn (self &Herorunner) installed() !bool {
// Check if the binary exists
mut binary := pathlib.get(self.binary_path)
if !binary.exists() {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self Herorunner) install(args InstallArgs) ! {
console.print_header('install herorunner')
// For herorunner, we build from source instead of downloading
self.build()!
}
fn (mut self Herorunner) build() ! {
console.print_header('build herorunner')
// Ensure rust is installed
console.print_debug('Checking if Rust is installed...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
console.print_header('Installing Rust first...')
rust_installer.install()!
} else {
console.print_debug('Rust is already installed: ${res.output.trim_space()}')
}
// Clone or get the repository
console.print_debug('Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
repo_path := repo.path()
console.print_debug('Repository path: ${repo_path}')
// Build the herorunner binary from the horus workspace
console.print_header('Building herorunner binary (this may take several minutes)...')
console.print_debug('Running: cargo build -p runner-hero --release')
console.print_debug('Build output:')
cmd := 'cd ${repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p runner-hero --release'
osal.execute_stdout(cmd)!
console.print_debug('Build completed successfully')
// Ensure binary directory exists and copy the binary
console.print_debug('Preparing binary directory: ${self.binary_path}')
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${repo_path}/target/release/herorunner'
console.print_debug('Copying binary from: ${source_binary}')
console.print_debug('Copying binary to: ${self.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: self.binary_path, rsync: false)!
console.print_header('herorunner built successfully at ${self.binary_path}')
}
fn (mut self Herorunner) destroy() ! {
self.stop()!
osal.process_kill_recursive(name: 'herorunner')!
// Remove the built binary
osal.rm(self.binary_path)!
}

View File

@@ -0,0 +1,310 @@
module herorunner
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
herorunner_global map[string]&Herorunner
herorunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
binary_path string
redis_addr string
log_level string
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&Herorunner {
mut obj := Herorunner{
name: args.name
binary_path: args.binary_path
redis_addr: args.redis_addr
log_level: args.log_level
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&Herorunner {
mut context := base.context()!
herorunner_default = args.name
if args.fromdb || args.name !in herorunner_global {
mut r := context.redis()!
if r.hexists('context:herorunner', args.name)! {
data := r.hget('context:herorunner', args.name)!
if data.len == 0 {
print_backtrace()
return error('Herorunner with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(Herorunner, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("Herorunner with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return herorunner_global[args.name] or {
print_backtrace()
return error('could not get config for herorunner with name:${args.name}')
}
}
// register the config for the future
pub fn set(o Herorunner) ! {
mut o2 := set_in_mem(o)!
herorunner_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:herorunner', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:herorunner', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:herorunner', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&Herorunner {
mut res := []&Herorunner{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
herorunner_global = map[string]&Herorunner{}
herorunner_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:herorunner')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in herorunner_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o Herorunner) !Herorunner {
mut o2 := obj_init(o)!
herorunner_global[o2.name] = &o2
herorunner_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'herorunner.') {
return
}
mut install_actions := plbook.find(filter: 'herorunner.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'herorunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut herorunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${herorunner_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action herorunner.destroy')
herorunner_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action herorunner.install')
herorunner_obj.install(reset: reset)!
}
if other_action.name == 'build' {
console.print_debug('install action herorunner.build')
herorunner_obj.build()!
}
if other_action.name == 'start' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.restart()!
}
if other_action.name == 'start_pre' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.start_pre()!
}
if other_action.name == 'start_post' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.start_post()!
}
if other_action.name == 'stop_pre' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.stop_pre()!
}
if other_action.name == 'stop_post' {
console.print_debug('install action herorunner.${other_action.name}')
herorunner_obj.stop_post()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: herorunner' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: herorunner' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: herorunner' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: herorunner' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Herorunner) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Herorunner) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: herorunner start')
if !self.installed()! {
self.install()!
}
self.start_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: herorunner starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('herorunner did not install properly.')
}
pub fn (mut self Herorunner) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Herorunner) stop() ! {
switch(self.name)
self.stop_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
self.stop_post()!
}
pub fn (mut self Herorunner) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Herorunner) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return self.running_check()!
}
// switch instance to be used for herorunner
pub fn switch(name string) {
herorunner_default = name
}

View File

@@ -0,0 +1,58 @@
module herorunner
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
import os
const version = '0.1.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct Herorunner {
pub mut:
name string = 'default'
binary_path string = os.join_path(os.home_dir(), 'hero/bin/herorunner')
redis_addr string = '127.0.0.1:6379'
log_level string = 'info'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ Herorunner) !Herorunner {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.binary_path == '' {
mycfg.binary_path = os.join_path(os.home_dir(), 'hero/bin/herorunner')
}
if mycfg.redis_addr == '' {
mycfg.redis_addr = '127.0.0.1:6379'
}
if mycfg.log_level == '' {
mycfg.log_level = 'info'
}
return mycfg
}
// called before start if done
fn configure() ! {
mut server := get()!
// Ensure the binary directory exists
mut binary_path_obj := pathlib.get(server.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj Herorunner) !string {
return encoderhero.encode[Herorunner](obj)!
}
pub fn heroscript_loads(heroscript string) !Herorunner {
mut obj := encoderhero.decode[Herorunner](heroscript)!
return obj
}

View File

@@ -0,0 +1,104 @@
# Herorunner Installer
A V language installer module for building and managing the Hero Runner service. This installer handles the complete lifecycle of the Herorunner binary from the Horus workspace.
## Features
- **Automatic Rust Installation**: Installs Rust toolchain if not present
- **Git Repository Management**: Clones and manages the horus repository
- **Binary Building**: Compiles the herorunner binary from the horus workspace
- **Service Management**: Start/stop/restart via zinit
- **Configuration**: Customizable Redis connection
## Quick Start
### Manual Usage
```v
import freeflowuniverse.herolib.installers.horus.herorunner as herorunner_installer
mut herorunner := herorunner_installer.get()!
herorunner.install()!
herorunner.start()!
```
## Configuration
```bash
!!herorunner.configure
name:'default'
binary_path:'/hero/var/bin/herorunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
```
### Configuration Fields
- **name**: Instance name (default: 'default')
- **binary_path**: Path where the herorunner binary will be installed (default: '/hero/var/bin/herorunner')
- **redis_addr**: Redis server address (default: '127.0.0.1:6379')
- **log_level**: Rust log level - trace, debug, info, warn, error (default: 'info')
- **repo_path**: Path to clone the horus repository (default: '/root/code/git.ourworld.tf/herocode/horus')
## Commands
### Install
Builds the herorunner binary from the horus workspace. This will:
1. Install Rust if not present
2. Clone the horus repository from git.ourworld.tf
3. Build the herorunner binary with `cargo build -p runner-hero --release`
```bash
hero herorunner.install
```
### Start
Starts the herorunner service using zinit:
```bash
hero herorunner.start
```
### Stop
Stops the running service:
```bash
hero herorunner.stop
```
### Restart
Restarts the service:
```bash
hero herorunner.restart
```
### Destroy
Stops the service and removes all files:
```bash
hero herorunner.destroy
```
## Requirements
- **Dependencies**:
- Rust toolchain (automatically installed)
- Git (for cloning repository)
- Redis (must be running separately)
## Architecture
The installer follows the standard herolib installer pattern:
- **herorunner_model.v**: Configuration structure and initialization
- **herorunner_actions.v**: Build, install, start, stop, destroy logic
- **herorunner_factory_.v**: Factory pattern for instance management
## Notes
- The installer builds from source rather than downloading pre-built binaries
- Redis must be running and accessible at the configured address
- The binary is built with `RUSTFLAGS="-A warnings"` to suppress warnings
- Service management uses zinit by default

View File

@@ -0,0 +1,5 @@
name: ${cfg.configpath}

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'Osirisrunner'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,148 @@
module osirisrunner
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.installers.lang.rust
import incubaid.herolib.develop.gittools
import os
fn (self &Osirisrunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'runner_osiris'
cmd: '${self.binary_path} --redis-url ${redis_url} 12002'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
'RUST_LOG_STYLE': 'never'
}
}
return res
}
fn (self &Osirisrunner) running_check() !bool {
// Check if the process is running
res := osal.exec(cmd: 'pgrep -f runner_osiris', stdout: false, raise_error: false)!
return res.exit_code == 0
}
fn (self &Osirisrunner) start_pre() ! {
}
fn (self &Osirisrunner) start_post() ! {
}
fn (self &Osirisrunner) stop_pre() ! {
}
fn (self &Osirisrunner) stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn (self &Osirisrunner) installed() !bool {
// Check if the binary exists
mut binary := pathlib.get(self.binary_path)
if !binary.exists() {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self Osirisrunner) install(args InstallArgs) ! {
console.print_header('install osirisrunner')
// For osirisrunner, we build from source instead of downloading
self.build()!
}
fn (mut self Osirisrunner) build() ! {
console.print_header('build osirisrunner')
// Ensure rust is installed
console.print_debug('Checking if Rust is installed...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
console.print_header('Installing Rust first...')
rust_installer.install()!
} else {
console.print_debug('Rust is already installed: ${res.output.trim_space()}')
}
// Clone or get the repository
console.print_debug('Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
self.repo_path = repo.path()
set(self)!
console.print_debug('Repository path: ${self.repo_path}')
// Build the osirisrunner binary from the horus workspace
console.print_header('Building osirisrunner binary (this may take several minutes)...')
console.print_debug('Running: cargo build -p runner-osiris --release')
console.print_debug('Build output:')
cmd := 'cd ${self.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p runner-osiris --release'
osal.execute_stdout(cmd)!
console.print_debug('Build completed successfully')
// Ensure binary directory exists and copy the binary
console.print_debug('Preparing binary directory: ${self.binary_path}')
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${self.repo_path}/target/release/runner_osiris'
console.print_debug('Copying binary from: ${source_binary}')
console.print_debug('Copying binary to: ${self.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: self.binary_path, rsync: false)!
console.print_header('osirisrunner built successfully at ${self.binary_path}')
}
fn (mut self Osirisrunner) destroy() ! {
self.stop()!
osal.process_kill_recursive(name: 'runner_osiris')!
// Remove the built binary
osal.rm(self.binary_path)!
}

View File

@@ -0,0 +1,312 @@
module osirisrunner
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
osirisrunner_global map[string]&Osirisrunner
osirisrunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
binary_path string
redis_addr string
log_level string
repo_path string
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&Osirisrunner {
mut obj := Osirisrunner{
name: args.name
binary_path: args.binary_path
redis_addr: args.redis_addr
log_level: args.log_level
repo_path: args.repo_path
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&Osirisrunner {
mut context := base.context()!
osirisrunner_default = args.name
if args.fromdb || args.name !in osirisrunner_global {
mut r := context.redis()!
if r.hexists('context:osirisrunner', args.name)! {
data := r.hget('context:osirisrunner', args.name)!
if data.len == 0 {
print_backtrace()
return error('Osirisrunner with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(Osirisrunner, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("Osirisrunner with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return osirisrunner_global[args.name] or {
print_backtrace()
return error('could not get config for osirisrunner with name:${args.name}')
}
}
// register the config for the future
pub fn set(o Osirisrunner) ! {
mut o2 := set_in_mem(o)!
osirisrunner_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:osirisrunner', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:osirisrunner', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:osirisrunner', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&Osirisrunner {
mut res := []&Osirisrunner{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
osirisrunner_global = map[string]&Osirisrunner{}
osirisrunner_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:osirisrunner')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in osirisrunner_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o Osirisrunner) !Osirisrunner {
mut o2 := obj_init(o)!
osirisrunner_global[o2.name] = &o2
osirisrunner_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'osirisrunner.') {
return
}
mut install_actions := plbook.find(filter: 'osirisrunner.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'osirisrunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut osirisrunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${osirisrunner_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action osirisrunner.destroy')
osirisrunner_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action osirisrunner.install')
osirisrunner_obj.install(reset: reset)!
}
if other_action.name == 'build' {
console.print_debug('install action osirisrunner.build')
osirisrunner_obj.build()!
}
if other_action.name == 'start' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.restart()!
}
if other_action.name == 'start_pre' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.start_pre()!
}
if other_action.name == 'start_post' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.start_post()!
}
if other_action.name == 'stop_pre' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.stop_pre()!
}
if other_action.name == 'stop_post' {
console.print_debug('install action osirisrunner.${other_action.name}')
osirisrunner_obj.stop_post()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: osirisrunner' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: osirisrunner' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: osirisrunner' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: osirisrunner' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Osirisrunner) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Osirisrunner) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: osirisrunner start')
if !self.installed()! {
self.install()!
}
self.start_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: osirisrunner starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('osirisrunner did not install properly.')
}
pub fn (mut self Osirisrunner) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Osirisrunner) stop() ! {
switch(self.name)
self.stop_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
self.stop_post()!
}
pub fn (mut self Osirisrunner) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Osirisrunner) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return self.running_check()!
}
// switch instance to be used for osirisrunner
pub fn switch(name string) {
osirisrunner_default = name
}

View File

@@ -0,0 +1,62 @@
module osirisrunner
import os
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
const version = '0.1.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct Osirisrunner {
pub mut:
name string = 'default'
binary_path string = os.join_path(os.home_dir(), 'hero/bin/runner_osiris')
redis_addr string = '127.0.0.1:6379'
log_level string = 'info'
repo_path string = '/root/code/git.ourworld.tf/herocode/horus'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ Osirisrunner) !Osirisrunner {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.binary_path == '' {
mycfg.binary_path = os.join_path(os.home_dir(), 'hero/bin/runner_osiris')
}
if mycfg.redis_addr == '' {
mycfg.redis_addr = '127.0.0.1:6379'
}
if mycfg.log_level == '' {
mycfg.log_level = 'info'
}
if mycfg.repo_path == '' {
mycfg.repo_path = '/root/code/git.ourworld.tf/herocode/horus'
}
return mycfg
}
// called before start if done
fn configure() ! {
mut server := get()!
// Ensure the binary directory exists
mut binary_path_obj := pathlib.get(server.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj Osirisrunner) !string {
return encoderhero.encode[Osirisrunner](obj)!
}
pub fn heroscript_loads(heroscript string) !Osirisrunner {
mut obj := encoderhero.decode[Osirisrunner](heroscript)!
return obj
}

View File

@@ -0,0 +1,104 @@
# Osirisrunner Installer
A V language installer module for building and managing the Hero Runner service. This installer handles the complete lifecycle of the Osirisrunner binary from the Horus workspace.
## Features
- **Automatic Rust Installation**: Installs Rust toolchain if not present
- **Git Repository Management**: Clones and manages the horus repository
- **Binary Building**: Compiles the osirisrunner binary from the horus workspace
- **Service Management**: Start/stop/restart via zinit
- **Configuration**: Customizable Redis connection
## Quick Start
### Manual Usage
```v
import freeflowuniverse.herolib.installers.horus.osirisrunner as osirisrunner_installer
mut osirisrunner := osirisrunner_installer.get()!
osirisrunner.install()!
osirisrunner.start()!
```
## Configuration
```bash
!!osirisrunner.configure
name:'default'
binary_path:'/hero/var/bin/osirisrunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
```
### Configuration Fields
- **name**: Instance name (default: 'default')
- **binary_path**: Path where the osirisrunner binary will be installed (default: '/hero/var/bin/osirisrunner')
- **redis_addr**: Redis server address (default: '127.0.0.1:6379')
- **log_level**: Rust log level - trace, debug, info, warn, error (default: 'info')
- **repo_path**: Path to clone the horus repository (default: '/root/code/git.ourworld.tf/herocode/horus')
## Commands
### Install
Builds the osirisrunner binary from the horus workspace. This will:
1. Install Rust if not present
2. Clone the horus repository from git.ourworld.tf
3. Build the osirisrunner binary with `cargo build -p runner-osiris --release`
```bash
hero osirisrunner.install
```
### Start
Starts the osirisrunner service using zinit:
```bash
hero osirisrunner.start
```
### Stop
Stops the running service:
```bash
hero osirisrunner.stop
```
### Restart
Restarts the service:
```bash
hero osirisrunner.restart
```
### Destroy
Stops the service and removes all files:
```bash
hero osirisrunner.destroy
```
## Requirements
- **Dependencies**:
- Rust toolchain (automatically installed)
- Git (for cloning repository)
- Redis (must be running separately)
## Architecture
The installer follows the standard herolib installer pattern:
- **osirisrunner_model.v**: Configuration structure and initialization
- **osirisrunner_actions.v**: Build, install, start, stop, destroy logic
- **osirisrunner_factory_.v**: Factory pattern for instance management
## Notes
- The installer builds from source rather than downloading pre-built binaries
- Redis must be running and accessible at the configured address
- The binary is built with `RUSTFLAGS="-A warnings"` to suppress warnings
- Service management uses zinit by default

View File

@@ -0,0 +1,5 @@
name: ${cfg.configpath}

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'Salrunner'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,104 @@
# Salrunner Installer
A V language installer module for building and managing the Hero Runner service. This installer handles the complete lifecycle of the Salrunner binary from the Horus workspace.
## Features
- **Automatic Rust Installation**: Installs Rust toolchain if not present
- **Git Repository Management**: Clones and manages the horus repository
- **Binary Building**: Compiles the salrunner binary from the horus workspace
- **Service Management**: Start/stop/restart via zinit
- **Configuration**: Customizable Redis connection
## Quick Start
### Manual Usage
```v
import freeflowuniverse.herolib.installers.horus.salrunner as salrunner_installer
mut salrunner := salrunner_installer.get()!
salrunner.install()!
salrunner.start()!
```
## Configuration
```bash
!!salrunner.configure
name:'default'
binary_path:'/hero/var/bin/salrunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
```
### Configuration Fields
- **name**: Instance name (default: 'default')
- **binary_path**: Path where the salrunner binary will be installed (default: '/hero/var/bin/salrunner')
- **redis_addr**: Redis server address (default: '127.0.0.1:6379')
- **log_level**: Rust log level - trace, debug, info, warn, error (default: 'info')
- **repo_path**: Path to clone the horus repository (default: '/root/code/git.ourworld.tf/herocode/horus')
## Commands
### Install
Builds the salrunner binary from the horus workspace. This will:
1. Install Rust if not present
2. Clone the horus repository from git.ourworld.tf
3. Build the salrunner binary with `cargo build -p runner-sal --release`
```bash
hero salrunner.install
```
### Start
Starts the salrunner service using zinit:
```bash
hero salrunner.start
```
### Stop
Stops the running service:
```bash
hero salrunner.stop
```
### Restart
Restarts the service:
```bash
hero salrunner.restart
```
### Destroy
Stops the service and removes all files:
```bash
hero salrunner.destroy
```
## Requirements
- **Dependencies**:
- Rust toolchain (automatically installed)
- Git (for cloning repository)
- Redis (must be running separately)
## Architecture
The installer follows the standard herolib installer pattern:
- **salrunner_model.v**: Configuration structure and initialization
- **salrunner_actions.v**: Build, install, start, stop, destroy logic
- **salrunner_factory_.v**: Factory pattern for instance management
## Notes
- The installer builds from source rather than downloading pre-built binaries
- Redis must be running and accessible at the configured address
- The binary is built with `RUSTFLAGS="-A warnings"` to suppress warnings
- Service management uses zinit by default

View File

@@ -0,0 +1,148 @@
module salrunner
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.installers.lang.rust
import incubaid.herolib.develop.gittools
import os
fn (self &Salrunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'runner_sal'
cmd: '${self.binary_path} --redis-url ${redis_url} 12003'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
'RUST_LOG_STYLE': 'never'
}
}
return res
}
fn (self &Salrunner) running_check() !bool {
// Check if the process is running
res := osal.exec(cmd: 'pgrep -f runner_sal', stdout: false, raise_error: false)!
return res.exit_code == 0
}
fn (self &Salrunner) start_pre() ! {
}
fn (self &Salrunner) start_post() ! {
}
fn (self &Salrunner) stop_pre() ! {
}
fn (self &Salrunner) stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn (self &Salrunner) installed() !bool {
// Check if the binary exists
mut binary := pathlib.get(self.binary_path)
if !binary.exists() {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self Salrunner) install(args InstallArgs) ! {
console.print_header('install salrunner')
// For salrunner, we build from source instead of downloading
self.build()!
}
fn (mut self Salrunner) build() ! {
console.print_header('build salrunner')
// Ensure rust is installed
console.print_debug('Checking if Rust is installed...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
console.print_header('Installing Rust first...')
rust_installer.install()!
} else {
console.print_debug('Rust is already installed: ${res.output.trim_space()}')
}
// Clone or get the repository
console.print_debug('Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
self.repo_path = repo.path()
set(self)!
console.print_debug('Repository path: ${self.repo_path}')
// Build the salrunner binary from the horus workspace
console.print_header('Building salrunner binary (this may take several minutes)...')
console.print_debug('Running: cargo build -p runner-sal --release')
console.print_debug('Build output:')
cmd := 'cd ${self.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p runner-sal --release'
osal.execute_stdout(cmd)!
console.print_debug('Build completed successfully')
// Ensure binary directory exists and copy the binary
console.print_debug('Preparing binary directory: ${self.binary_path}')
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${self.repo_path}/target/release/runner_sal'
console.print_debug('Copying binary from: ${source_binary}')
console.print_debug('Copying binary to: ${self.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: self.binary_path, rsync: false)!
console.print_header('salrunner built successfully at ${self.binary_path}')
}
fn (mut self Salrunner) destroy() ! {
self.stop()!
osal.process_kill_recursive(name: 'runner_sal')!
// Remove the built binary
osal.rm(self.binary_path)!
}

View File

@@ -0,0 +1,312 @@
module salrunner
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
salrunner_global map[string]&Salrunner
salrunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
binary_path string
redis_addr string
log_level string
repo_path string
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&Salrunner {
mut obj := Salrunner{
name: args.name
binary_path: args.binary_path
redis_addr: args.redis_addr
log_level: args.log_level
repo_path: args.repo_path
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&Salrunner {
mut context := base.context()!
salrunner_default = args.name
if args.fromdb || args.name !in salrunner_global {
mut r := context.redis()!
if r.hexists('context:salrunner', args.name)! {
data := r.hget('context:salrunner', args.name)!
if data.len == 0 {
print_backtrace()
return error('Salrunner with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(Salrunner, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("Salrunner with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return salrunner_global[args.name] or {
print_backtrace()
return error('could not get config for salrunner with name:${args.name}')
}
}
// register the config for the future
pub fn set(o Salrunner) ! {
mut o2 := set_in_mem(o)!
salrunner_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:salrunner', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:salrunner', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:salrunner', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&Salrunner {
mut res := []&Salrunner{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
salrunner_global = map[string]&Salrunner{}
salrunner_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:salrunner')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in salrunner_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o Salrunner) !Salrunner {
mut o2 := obj_init(o)!
salrunner_global[o2.name] = &o2
salrunner_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'salrunner.') {
return
}
mut install_actions := plbook.find(filter: 'salrunner.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'salrunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut salrunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${salrunner_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action salrunner.destroy')
salrunner_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action salrunner.install')
salrunner_obj.install(reset: reset)!
}
if other_action.name == 'build' {
console.print_debug('install action salrunner.build')
salrunner_obj.build()!
}
if other_action.name == 'start' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.restart()!
}
if other_action.name == 'start_pre' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.start_pre()!
}
if other_action.name == 'start_post' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.start_post()!
}
if other_action.name == 'stop_pre' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.stop_pre()!
}
if other_action.name == 'stop_post' {
console.print_debug('install action salrunner.${other_action.name}')
salrunner_obj.stop_post()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: salrunner' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: salrunner' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: salrunner' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: salrunner' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Salrunner) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Salrunner) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: salrunner start')
if !self.installed()! {
self.install()!
}
self.start_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: salrunner starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('salrunner did not install properly.')
}
pub fn (mut self Salrunner) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Salrunner) stop() ! {
switch(self.name)
self.stop_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
self.stop_post()!
}
pub fn (mut self Salrunner) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Salrunner) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return self.running_check()!
}
// switch instance to be used for salrunner
pub fn switch(name string) {
salrunner_default = name
}

View File

@@ -0,0 +1,62 @@
module salrunner
import os
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
const version = '0.1.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct Salrunner {
pub mut:
name string = 'default'
binary_path string = os.join_path(os.home_dir(), 'hero/bin/runner_sal')
redis_addr string = '127.0.0.1:6379'
log_level string = 'info'
repo_path string = '/root/code/git.ourworld.tf/herocode/horus'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ Salrunner) !Salrunner {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.binary_path == '' {
mycfg.binary_path = os.join_path(os.home_dir(), 'hero/var/bin/runner_sal')
}
if mycfg.redis_addr == '' {
mycfg.redis_addr = '127.0.0.1:6379'
}
if mycfg.log_level == '' {
mycfg.log_level = 'info'
}
if mycfg.repo_path == '' {
mycfg.repo_path = '/root/code/git.ourworld.tf/herocode/horus'
}
return mycfg
}
// called before start if done
fn configure() ! {
mut server := get()!
// Ensure the binary directory exists
mut binary_path_obj := pathlib.get(server.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj Salrunner) !string {
return encoderhero.encode[Salrunner](obj)!
}
pub fn heroscript_loads(heroscript string) !Salrunner {
mut obj := encoderhero.decode[Salrunner](heroscript)!
return obj
}

View File

@@ -0,0 +1,5 @@
name: ${cfg.configpath}

View File

@@ -0,0 +1,12 @@
!!hero_code.generate_installer
name:''
classname:'Supervisor'
singleton:0
templates:1
default:1
title:''
supported_platforms:''
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,144 @@
# Supervisor Installer
A V language installer module for building and managing the Supervisor service. This installer handles the complete lifecycle of the Supervisor binary from the Horus workspace.
## Features
- **Automatic Rust Installation**: Installs Rust toolchain if not present
- **Git Repository Management**: Clones and manages the horus repository
- **Binary Building**: Compiles the supervisor binary from the horus workspace
- **Service Management**: Start/stop/restart via zinit
- **Configuration**: Customizable Redis, HTTP, and WebSocket ports
## Quick Start
### Using the Example Script
```bash
cd /root/code/github/freeflowuniverse/herolib/examples/installers/horus
./supervisor.vsh
```
### Manual Usage
```v
import freeflowuniverse.herolib.installers.horus.supervisor as supervisor_installer
mut supervisor := supervisor_installer.get()!
supervisor.install()!
supervisor.start()!
```
## Configuration
```bash
!!supervisor.configure
name:'default'
binary_path:'/hero/var/bin/supervisor'
redis_addr:'127.0.0.1:6379'
http_port:8082
ws_port:9654
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
```
### Configuration Fields
- **name**: Instance name (default: 'default')
- **binary_path**: Path where the supervisor binary will be installed (default: '/hero/var/bin/supervisor')
- **redis_addr**: Redis server address (default: '127.0.0.1:6379')
- **http_port**: HTTP API port (default: 8082)
- **ws_port**: WebSocket API port (default: 9654)
- **log_level**: Rust log level - trace, debug, info, warn, error (default: 'info')
- **repo_path**: Path to clone the horus repository (default: '/root/code/git.ourworld.tf/herocode/horus')
## Commands
### Install
Builds the supervisor binary from the horus workspace. This will:
1. Install Rust if not present
2. Clone the horus repository from git.ourworld.tf
3. Build the supervisor binary with `cargo build -p hero-supervisor --release`
```bash
hero supervisor.install
```
### Start
Starts the supervisor service using zinit:
```bash
hero supervisor.start
```
### Stop
Stops the running service:
```bash
hero supervisor.stop
```
### Restart
Restarts the service:
```bash
hero supervisor.restart
```
### Destroy
Stops the service and removes all files:
```bash
hero supervisor.destroy
```
## Requirements
- **Dependencies**:
- Rust toolchain (automatically installed)
- Git (for cloning repository)
- Redis (must be running separately)
- Mycelium (must be installed and running separately)
## Architecture
The installer follows the standard herolib installer pattern:
- **supervisor_model.v**: Configuration structure and initialization
- **supervisor_actions.v**: Build, install, start, stop, destroy logic
- **supervisor_factory_.v**: Factory pattern for instance management
## Notes
- The installer builds from source rather than downloading pre-built binaries
- Mycelium is expected to be already installed and running in the environment
- Redis must be running and accessible at the configured address
- The binary is built with `RUSTFLAGS="-A warnings"` to suppress warnings
- Service management uses zinit by default
## Example Workflow
```v
import freeflowuniverse.herolib.installers.horus.supervisor as sv
// Get installer instance
mut supervisor := sv.get()!
// Customize configuration
supervisor.redis_addr = '127.0.0.1:6379'
supervisor.http_port = 8082
supervisor.log_level = 'debug'
sv.set(supervisor)!
// Build and start
supervisor.install()!
supervisor.start()!
// Check status
if supervisor.running()! {
println('Supervisor is running on port ${supervisor.http_port}')
}
// Later: cleanup
supervisor.destroy()!
```

View File

@@ -0,0 +1,265 @@
module supervisor
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.installers.lang.rust
import incubaid.herolib.develop.gittools
import os
fn (self &Supervisor) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'supervisor'
cmd: '${self.binary_path} --redis-url ${redis_url} --port ${self.http_port} --admin-secret mysecret'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
'RUST_LOG_STYLE': 'never'
}
}
return res
}
fn (self &Supervisor) running_check() !bool {
// Check if the process is running by checking the HTTP port
// The supervisor returns 405 for GET requests (requires POST), so we check if we get any response
res := osal.exec(
cmd: 'curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${self.http_port}'
stdout: false
raise_error: false
)!
// Any HTTP response code (including 405) means the server is running
return res.output.len > 0 && res.output.int() > 0
}
fn (self &Supervisor) start_pre() ! {
}
fn (self &Supervisor) start_post() ! {
}
fn (self &Supervisor) stop_pre() ! {
}
fn (self &Supervisor) stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn (self &Supervisor) installed() !bool {
// Check if the binary exists
mut binary := pathlib.get(self.binary_path)
if !binary.exists() {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// installers.upload(
// cmdname: 'supervisor'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/supervisor'
// )!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
fn (mut self Supervisor) install(args InstallArgs) ! {
console.print_header('install supervisor')
// For supervisor, we build from source instead of downloading
self.build()!
}
// Public function to build supervisor without requiring factory/redis
pub fn build_supervisor() ! {
console.print_header('build supervisor')
println('📦 Starting supervisor build process...\n')
// Use default config instead of getting from factory
println(' Initializing configuration...')
mut cfg := Supervisor{}
println(' Configuration initialized')
println(' - Binary path: ${cfg.binary_path}')
println(' - Redis address: ${cfg.redis_addr}')
println(' - HTTP port: ${cfg.http_port}')
println(' - WS port: ${cfg.ws_port}\n')
// Ensure Redis is installed and running (required for supervisor)
println('🔍 Step 1/4: Checking Redis dependency...')
// First check if redis-server is installed
if !osal.cmd_exists_profile('redis-server') {
println(' Redis is not installed')
println('📥 Installing Redis...')
osal.package_install('redis-server')!
println(' Redis installed')
} else {
println(' Redis is already installed')
}
// Now check if it's running
println('🔍 Checking if Redis is running...')
redis_check := osal.exec(cmd: 'redis-cli -c -p 6379 ping', stdout: false, raise_error: false)!
if redis_check.exit_code != 0 {
println(' Redis is not running')
println('🚀 Starting Redis...')
osal.exec(cmd: 'systemctl start redis-server')!
println(' Redis started successfully\n')
} else {
println(' Redis is already running\n')
}
// Ensure rust is installed
println('🔍 Step 2/4: Checking Rust dependency...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
println('📥 Installing Rust...')
rust_installer.install()!
println(' Rust installed\n')
} else {
println(' Rust is already installed: ${res.output.trim_space()}\n')
}
// Clone or get the repository
println('🔍 Step 3/4: Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
cfg.repo_path = repo.path()
println(' Repository ready at: ${cfg.repo_path}\n')
// Build the supervisor binary from the horus workspace
println('🔍 Step 4/4: Building supervisor binary...')
println(' This may take several minutes (compiling Rust code)...')
println('📝 Running: cargo build -p hero-supervisor --release\n')
cmd := 'cd ${cfg.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p hero-supervisor --release'
osal.execute_stdout(cmd)!
println('\n Build completed successfully')
// Ensure binary directory exists and copy the binary
println('📁 Preparing binary directory: ${cfg.binary_path}')
mut binary_path_obj := pathlib.get(cfg.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${cfg.repo_path}/target/release/supervisor'
println('📋 Copying binary from: ${source_binary}')
println('📋 Copying binary to: ${cfg.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: cfg.binary_path, rsync: false)!
println('\n🎉 Supervisor built successfully!')
println('📍 Binary location: ${cfg.binary_path}')
}
fn (mut self Supervisor) build() ! {
console.print_header('build supervisor')
// Ensure Redis is installed and running (required for supervisor)
console.print_debug('Checking if Redis is installed and running...')
redis_check := osal.exec(cmd: 'redis-cli -c -p 6379 ping', stdout: false, raise_error: false)!
if redis_check.exit_code != 0 {
console.print_header('Redis is not running, checking if installed...')
if !osal.cmd_exists_profile('redis-server') {
console.print_header('Installing Redis...')
osal.package_install('redis-server')!
}
console.print_header('Starting Redis...')
osal.exec(cmd: 'systemctl start redis-server')!
console.print_debug('Redis started successfully')
} else {
console.print_debug('Redis is already running')
}
// Ensure rust is installed
console.print_debug('Checking if Rust is installed...')
mut rust_installer := rust.get()!
res := osal.exec(cmd: 'rustc -V', stdout: false, raise_error: false)!
if res.exit_code != 0 {
console.print_header('Installing Rust first...')
rust_installer.install()!
} else {
console.print_debug('Rust is already installed: ${res.output.trim_space()}')
}
// Clone or get the repository
console.print_debug('Cloning/updating horus repository...')
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'https://git.ourworld.tf/herocode/horus.git'
pull: true
reset: false
)!
// Update the path to the actual cloned repo
self.repo_path = repo.path()
set(self)!
console.print_debug('Repository path: ${self.repo_path}')
// Build the supervisor binary from the horus workspace
console.print_header('Building supervisor binary (this may take several minutes)...')
console.print_debug('Running: cargo build -p hero-supervisor --release')
console.print_debug('Build output:')
cmd := 'cd ${self.repo_path} && . ~/.cargo/env && RUSTFLAGS="-A warnings" cargo build -p hero-supervisor --release'
osal.execute_stdout(cmd)!
console.print_debug('Build completed successfully')
// Ensure binary directory exists and copy the binary
console.print_debug('Preparing binary directory: ${self.binary_path}')
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
// Copy the built binary to the configured location
source_binary := '${self.repo_path}/target/release/supervisor'
console.print_debug('Copying binary from: ${source_binary}')
console.print_debug('Copying binary to: ${self.binary_path}')
mut source_file := pathlib.get_file(path: source_binary)!
source_file.copy(dest: self.binary_path, rsync: false)!
console.print_header('supervisor built successfully at ${self.binary_path}')
}
fn (mut self Supervisor) destroy() ! {
self.stop()!
osal.process_kill_recursive(name: 'supervisor')!
// Remove the built binary
osal.rm(self.binary_path)!
}

View File

@@ -0,0 +1,330 @@
module supervisor
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
supervisor_global map[string]&Supervisor
supervisor_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
binary_path string
redis_addr string
http_port int
ws_port int
log_level string
repo_path string
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&Supervisor {
mut obj := Supervisor{
name: args.name
binary_path: args.binary_path
redis_addr: args.redis_addr
http_port: args.http_port
ws_port: args.ws_port
log_level: args.log_level
repo_path: args.repo_path
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&Supervisor {
mut context := base.context()!
mut name := if args.name == 'default' && supervisor_default.len > 0 {
supervisor_default
} else {
args.name
}
supervisor_default = name
if args.fromdb || name !in supervisor_global {
mut r := context.redis()!
if r.hexists('context:supervisor', name)! {
data := r.hget('context:supervisor', name)!
if data.len == 0 {
print_backtrace()
return error('Supervisor with name: ${name} does not exist, prob bug.')
}
mut obj := json.decode(Supervisor, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("Supervisor with name '${name}' does not exist")
}
}
return get(name: name)! // no longer from db nor create
}
return supervisor_global[name] or {
print_backtrace()
return error('could not get config for supervisor with name:${name}')
}
}
// register the config for the future
pub fn set(o Supervisor) ! {
mut o2 := set_in_mem(o)!
supervisor_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:supervisor', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:supervisor', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:supervisor', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&Supervisor {
mut res := []&Supervisor{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
supervisor_global = map[string]&Supervisor{}
supervisor_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:supervisor')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in supervisor_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o Supervisor) !Supervisor {
mut o2 := obj_init(o)!
supervisor_global[o2.name] = &o2
supervisor_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'supervisor.') {
return
}
mut install_actions := plbook.find(filter: 'supervisor.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'supervisor.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut supervisor_obj := get(name: name, create: true)!
console.print_debug('action object:\n${supervisor_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action supervisor.destroy')
supervisor_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action supervisor.install')
supervisor_obj.install(reset: reset)!
}
if other_action.name == 'build' {
console.print_debug('install action supervisor.build')
supervisor_obj.build()!
}
}
if other_action.name in ['start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre',
'stop_post'] {
mut p := other_action.params
name := p.get('name')!
mut supervisor_obj := get(name: name, create: true)!
console.print_debug('action object:\n${supervisor_obj}')
if other_action.name == 'start' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.restart()!
}
if other_action.name == 'start_pre' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.start_pre()!
}
if other_action.name == 'start_post' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.start_post()!
}
if other_action.name == 'stop_pre' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.stop_pre()!
}
if other_action.name == 'stop_post' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.stop_post()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.screen {
console.print_debug("installer: supervisor' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: supervisor' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: supervisor' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: supervisor' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Supervisor) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Supervisor) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: supervisor start')
if !self.installed()! {
self.install()!
}
self.configure()!
self.start_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: supervisor starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
self.start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('supervisor did not install properly.')
}
pub fn (mut self Supervisor) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Supervisor) stop() ! {
switch(self.name)
self.stop_pre()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
self.stop_post()!
}
pub fn (mut self Supervisor) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Supervisor) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return self.running_check()!
}
// switch instance to be used for supervisor
pub fn switch(name string) {
supervisor_default = name
}

View File

@@ -0,0 +1,69 @@
module supervisor
import os
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.pathlib
const version = '0.1.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct Supervisor {
pub mut:
name string = 'default'
binary_path string = os.join_path(os.home_dir(), 'hero/bin/supervisor')
redis_addr string = '127.0.0.1:6379'
http_port int = 8082
ws_port int = 9654
log_level string = 'info'
repo_path string = '/root/code/git.ourworld.tf/herocode/horus'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ Supervisor) !Supervisor {
mut mycfg := mycfg_
if mycfg.name == '' {
mycfg.name = 'default'
}
if mycfg.binary_path == '' {
mycfg.binary_path = os.join_path(os.home_dir(), 'hero/bin/supervisor')
}
if mycfg.redis_addr == '' {
mycfg.redis_addr = '127.0.0.1:6379'
}
if mycfg.http_port == 0 {
mycfg.http_port = 8082
}
if mycfg.ws_port == 0 {
mycfg.ws_port = 9654
}
if mycfg.log_level == '' {
mycfg.log_level = 'info'
}
if mycfg.repo_path == '' {
mycfg.repo_path = os.join_path(os.home_dir(), 'code/git.ourworld.tf/herocode/horus')
}
return mycfg
}
// called before start if done
fn (self &Supervisor) configure() ! {
// Ensure the binary directory exists
mut binary_path_obj := pathlib.get(self.binary_path)
osal.dir_ensure(binary_path_obj.path_dir())!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj Supervisor) !string {
return encoderhero.encode[Supervisor](obj)!
}
pub fn heroscript_loads(heroscript string) !Supervisor {
mut obj := encoderhero.decode[Supervisor](heroscript)!
return obj
}

View File

@@ -0,0 +1,5 @@
name: ${cfg.configpath}

View File

@@ -29,7 +29,7 @@ fn startupcmd() ![]startupmanager.ZProcessNewArgs {
start: true
}
}
osal.dir_ensure(os.home_dir() + '/hero/cfg/zinit')!
osal.dir_ensure(os.join_path(os.home_dir(), 'hero/cfg/zinit'))!
return res
}

View File

@@ -1,13 +1,13 @@
!!hero_code.generate_installer
name:'herorunner'
classname:'HeroRunner'
name:'crun_installer'
classname:'CrunInstaller'
singleton:0
templates:0
default:1
title:''
title:'crun container runtime installer'
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:0
hasconfig:1
build:1

View File

@@ -0,0 +1,77 @@
module crun_installer
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core
import incubaid.herolib.installers.ulist
import os
//////////////////// following actions are not specific to instance of the object
// checks if crun is installed
pub fn (self &CrunInstaller) installed() !bool {
res := os.execute('${osal.profile_path_source_and()!} crun --version')
if res.exit_code != 0 {
return false
}
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self CrunInstaller) install(args InstallArgs) ! {
console.print_header('install crun')
// Check platform support
pl := core.platform()!
if pl == .ubuntu || pl == .arch {
console.print_debug('installing crun via package manager')
osal.package_install('crun')!
console.print_header('crun is installed')
return
}
if pl == .osx {
return error('crun is not available on macOS - it is a Linux-only container runtime. On macOS, use Docker Desktop or Podman Desktop instead.')
}
return error('unsupported platform for crun installation')
}
pub fn (mut self CrunInstaller) destroy() ! {
console.print_header('destroy crun')
if !self.installed()! {
console.print_debug('crun is not installed')
return
}
pl := core.platform()!
if pl == .ubuntu || pl == .arch {
console.print_debug('removing crun via package manager')
osal.package_remove('crun')!
console.print_header('crun has been removed')
return
}
if pl == .osx {
return error('crun is not available on macOS')
}
return error('unsupported platform for crun removal')
}

View File

@@ -0,0 +1,170 @@
module crun_installer
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
__global (
crun_installer_global map[string]&CrunInstaller
crun_installer_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&CrunInstaller {
mut obj := CrunInstaller{
name: args.name
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&CrunInstaller {
mut context := base.context()!
crun_installer_default = args.name
if args.fromdb || args.name !in crun_installer_global {
mut r := context.redis()!
if r.hexists('context:crun_installer', args.name)! {
data := r.hget('context:crun_installer', args.name)!
if data.len == 0 {
print_backtrace()
return error('CrunInstaller with name: ${args.name} does not exist, prob bug.')
}
mut obj := json.decode(CrunInstaller, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("CrunInstaller with name '${args.name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return crun_installer_global[args.name] or {
print_backtrace()
return error('could not get config for crun_installer with name:${args.name}')
}
}
// register the config for the future
pub fn set(o CrunInstaller) ! {
mut o2 := set_in_mem(o)!
crun_installer_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:crun_installer', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:crun_installer', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:crun_installer', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&CrunInstaller {
mut res := []&CrunInstaller{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
crun_installer_global = map[string]&CrunInstaller{}
crun_installer_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:crun_installer')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in crun_installer_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o CrunInstaller) !CrunInstaller {
mut o2 := obj_init(o)!
crun_installer_global[o2.name] = &o2
crun_installer_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'crun_installer.') {
return
}
mut install_actions := plbook.find(filter: 'crun_installer.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'crun_installer.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut crun_installer_obj := get(name: name)!
console.print_debug('action object:\n${crun_installer_obj}')
if other_action.name == 'destroy' || reset {
console.print_debug('install action crun_installer.destroy')
crun_installer_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action crun_installer.install')
crun_installer_obj.install(reset: reset)!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// load from disk and make sure is properly intialized
pub fn (mut self CrunInstaller) reload() ! {
switch(self.name)
self = obj_init(self)!
}
// switch instance to be used for crun_installer
pub fn switch(name string) {
crun_installer_default = name
}

View File

@@ -0,0 +1,32 @@
module crun_installer
import incubaid.herolib.data.encoderhero
pub const version = '0.0.0'
const singleton = false
const default = true
// CrunInstaller manages the installation of the crun container runtime
@[heap]
pub struct CrunInstaller {
pub mut:
name string = 'default'
}
// Initialize the installer object
fn obj_init(mycfg_ CrunInstaller) !CrunInstaller {
mut mycfg := mycfg_
return mycfg
}
// Configure is called before installation if needed
fn configure() ! {
// No configuration needed for crun installer
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_loads(heroscript string) !CrunInstaller {
mut obj := encoderhero.decode[CrunInstaller](heroscript)!
return obj
}

View File

@@ -0,0 +1,53 @@
# crun_installer
Installer for the crun container runtime - a fast and lightweight OCI runtime written in C.
## Features
- **Simple Package Installation**: Installs crun via system package manager
- **Cross-Platform Support**: Works on Ubuntu, Arch Linux, and macOS
- **Clean Uninstall**: Removes crun cleanly from the system
## Quick Start
### Using V Code
```v
import incubaid.herolib.installers.virt.crun_installer
mut crun := crun_installer.get()!
// Install crun
crun.install()!
// Check if installed
if crun.installed()! {
println('crun is installed')
}
// Uninstall crun
crun.destroy()!
```
### Using Heroscript
```hero
!!crun_installer.install
!!crun_installer.destroy
```
## Platform Support
- **Ubuntu/Debian**: Installs via `apt`
- **Arch Linux**: Installs via `pacman`
- **macOS**: ⚠️ Not supported - crun is Linux-only. Use Docker Desktop or Podman Desktop on macOS instead.
## What is crun?
crun is a fast and low-memory footprint OCI Container Runtime fully written in C. It is designed to be a drop-in replacement for runc and is used by container engines like Podman.
## See Also
- **crun client**: `lib/virt/crun` - V client for interacting with crun
- **podman installer**: `lib/installers/virt/podman` - Podman installer (includes crun)

View File

@@ -1,67 +0,0 @@
module herorunner
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core.pathlib
import incubaid.herolib.installers.ulist
import os
//////////////////// following actions are not specific to instance of the object
fn installed() !bool {
return false
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
fn upload() ! {
}
fn install() ! {
console.print_header('install herorunner')
osal.package_install('crun')!
// osal.exec(
// cmd: '
// '
// stdout: true
// name: 'herorunner_install'
// )!
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("zinit")!
// osal.process_kill_recursive(name:'zinit')!
// osal.cmd_delete('zinit')!
// osal.package_remove('
// podman
// conmon
// buildah
// skopeo
// runc
// ')!
// //will remove all paths where go/bin is found
// osal.profile_path_add_remove(paths2delete:"go/bin")!
// osal.rm("
// podman
// conmon
// buildah
// skopeo
// runc
// /var/lib/containers
// /var/lib/podman
// /var/lib/buildah
// /tmp/podman
// /tmp/conmon
// ")!
}

View File

@@ -1,80 +0,0 @@
module herorunner
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
__global (
herorunner_global map[string]&HeroRunner
herorunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
}
pub fn new(args ArgsGet) !&HeroRunner {
return &HeroRunner{}
}
pub fn get(args ArgsGet) !&HeroRunner {
return new(args)!
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'herorunner.') {
return
}
mut install_actions := plbook.find(filter: 'herorunner.configure')!
if install_actions.len > 0 {
return error("can't configure herorunner, because no configuration allowed for this installer.")
}
mut other_actions := plbook.find(filter: 'herorunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action herorunner.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action herorunner.install')
install()!
}
}
other_action.done = true
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self HeroRunner) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self HeroRunner) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for herorunner
pub fn switch(name string) {
herorunner_default = name
}

View File

@@ -1,34 +0,0 @@
module herorunner
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import os
pub const version = '0.0.0'
const singleton = false
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct HeroRunner {
pub mut:
name string = 'default'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ HeroRunner) !HeroRunner {
mut mycfg := mycfg_
return mycfg
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_loads(heroscript string) !HeroRunner {
mut obj := encoderhero.decode[HeroRunner](heroscript)!
return obj
}

View File

@@ -1,40 +0,0 @@
# herorunner
To get started
```vlang
import incubaid.herolib.installers.something.herorunner as herorunner_installer
heroscript:="
!!herorunner.configure name:'test'
password: '1234'
port: 7701
!!herorunner.start name:'test' reset:1
"
herorunner_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= herorunner_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!herorunner.configure
homedir: '/home/user/herorunner'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -16,9 +16,20 @@ pub fn get(cat StartupManagerType) !StartupManager {
mut sm := StartupManager{
cat: cat
}
if sm.cat == .auto {
match sm.cat {
.zinit {
mut zinit_client_test := zinit.get(create: true)! // 'create:true' ensures a client object is initiated even if the socket isn't active.
if _ := zinit_client_test.rpc_discover() {
sm.cat = .zinit
} else {
return error('zinit not found ${err}')
}
}
.auto {
// Try to get a ZinitRPC client and check if it can discover RPC methods.
// This implies the zinit daemon is running and accessible via its socket.
// Since it's auto doesn't insist and die if it can't find zinit.
mut zinit_client_test := zinit.get(create: true)! // 'create:true' ensures a client object is initiated even if the socket isn't active.
if _ := zinit_client_test.rpc_discover() {
sm.cat = .zinit
@@ -26,10 +37,14 @@ pub fn get(cat StartupManagerType) !StartupManager {
sm.cat = .screen
}
}
if sm.cat == .unknown {
.unknown {
print_backtrace()
return error("can't determine startup manager type, need to be a known one.")
}
else {
return sm
}
}
return sm
}
@@ -87,19 +102,43 @@ pub fn (mut sm StartupManager) new(args ZProcessNewArgs) ! {
shutdown_timeout: 0 // Default, or add to ZProcessNewArgs if needed
}
// Check if service already exists
existing_service := zinit_client.service_get(args.name) or { zinit.ServiceConfig{} }
// If service exists, stop monitoring, stop, and delete it first
if existing_service.exec.len > 0 {
console.print_debug('startupmanager: service ${args.name} already exists, cleaning up...')
// Stop the service first
zinit_client.service_stop(args.name) or {
console.print_debug('startupmanager: failed to stop service ${args.name}: ${err}')
}
// Forget (stop monitoring) the service
zinit_client.service_forget(args.name) or {
console.print_debug('startupmanager: failed to forget service ${args.name}: ${err}')
}
// Delete the service configuration
zinit_client.service_delete(args.name) or {
console.print_debug('startupmanager: failed to delete service ${args.name}: ${err}')
}
}
// Create the service configuration file in zinit
zinit_client.service_create(args.name, service_config) or {
return error('startupmanager: failed to create zinit service ${args.name}: ${err}')
}
// If 'start' is true, monitor and start the service immediately after creation
if args.start {
// Monitor loads the config and starts monitoring the service
zinit_client.service_monitor(args.name) or {
return error('startupmanager: failed to monitor zinit service ${args.name}: ${err}')
}
}
}
else {
panic('to implement, startup manager only support screen & systemd for now: ${mycat}')
}
}
// If 'start' is true, also monitor and start the service
if args.start {
sm.start(args.name)!
}
}
pub fn (mut sm StartupManager) start(name string) ! {
@@ -124,11 +163,6 @@ pub fn (mut sm StartupManager) start(name string) ! {
zinit_client.service_start(name) or {
return error('startupmanager: Failed to start zinit service ${name}: ${err}')
}
// Monitor loads the config, if it's new it starts it.
// If the service is already managed, this will bring it back up.
zinit_client.service_monitor(name) or {
return error('startupmanager: Failed to monitor zinit service ${name}: ${err}')
}
}
else {
panic('to implement, startup manager only support screen, systemd and zinit for now')

View File

@@ -33,8 +33,6 @@ pub fn (mut t UnixSocketTransport) send(request string, params SendParams) !stri
// Close the socket explicitly
unix.shutdown(socket.sock.handle)
socket.close() or {}
print_backtrace()
console.print_debug('The server did not close the socket, we did timeout or there was other error.')
}
// Set timeout if specified

View File

@@ -2,7 +2,7 @@ module heropods
import incubaid.herolib.osal.core as osal
import incubaid.herolib.virt.crun
import incubaid.herolib.installers.virt.herorunner as herorunner_installer
import incubaid.herolib.installers.virt.crun_installer
import os
// ContainerImageType defines the available container base images
@@ -92,8 +92,8 @@ pub fn (mut self HeroPods) container_new(args ContainerNewArgs) !&Container {
// Ensure crun is installed on host
if !osal.cmd_exists('crun') {
mut herorunner := herorunner_installer.new()!
herorunner.install()!
mut crun_inst := crun_installer.get()!
crun_inst.install(reset: false)!
}
// Create container struct but don't create the actual container in crun yet

View File

@@ -3,6 +3,7 @@ module herorun2
import incubaid.herolib.osal.tmux
import incubaid.herolib.osal.sshagent
import incubaid.herolib.osal.core as osal
import incubaid.herolib.core.texttools
import time
import os
@@ -49,9 +50,6 @@ pub fn new_executor(args ExecutorArgs) !Executor {
// Initialize tmux properly
mut t := tmux.new(sessionid: args.container_id)!
// Initialize Hetzner manager properly
mut hetzner := hetznermanager.get() or { hetznermanager.new()! }
return Executor{
node: node
container_id: args.container_id
@@ -61,7 +59,6 @@ pub fn new_executor(args ExecutorArgs) !Executor {
session_name: args.container_id
window_name: 'main'
agent: agent
hetzner: hetzner
}
}