Merge branch 'development' into development_nile_installers

This commit is contained in:
Mahmoud-Emad
2025-11-23 11:06:51 +02:00
19 changed files with 3765 additions and 119 deletions

391
examples/builder/zosbuilder.vsh Executable file
View File

@@ -0,0 +1,391 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.builder
import incubaid.herolib.core.pathlib
// Configuration for the remote builder
// Update these values for your remote machine
const remote_host = 'root@65.109.31.171' // Change to your remote host
const remote_port = 22 // SSH port
// Build configuration
const build_dir = '/root/zosbuilder'
const repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Set to true to upload kernel to S3
const upload_kernel = false
fn main() {
println('=== Zero OS Builder - Remote Build System ===\n')
// Initialize builder
mut b := builder.new() or {
eprintln('Failed to initialize builder: ${err}')
exit(1)
}
// Connect to remote node
println('Connecting to remote builder: ${remote_host}:${remote_port}')
mut node := b.node_new(
ipaddr: '${remote_host}:${remote_port}'
name: 'zosbuilder'
) or {
eprintln('Failed to connect to remote node: ${err}')
exit(1)
}
// Run the build process
build_zos(mut node) or {
eprintln('Build failed: ${err}')
exit(1)
}
println('\n=== Build completed successfully! ===')
}
fn build_zos(mut node builder.Node) ! {
println('\n--- Step 1: Installing prerequisites ---')
install_prerequisites(mut node)!
println('\n--- Step 2: Cloning zosbuilder repository ---')
clone_repository(mut node)!
println('\n--- Step 3: Creating RFS configuration ---')
create_rfs_config(mut node)!
println('\n--- Step 4: Running build ---')
run_build(mut node)!
println('\n--- Step 5: Checking build artifacts ---')
check_artifacts(mut node)!
println('\n=== Build completed successfully! ===')
}
fn install_prerequisites(mut node builder.Node) ! {
println('Detecting platform...')
// Check platform type
if node.platform == .ubuntu {
println('Installing Ubuntu/Debian prerequisites...')
// Update package list and install all required packages
node.exec_cmd(
cmd: '
apt-get update
apt-get install -y \\
build-essential \\
upx-ucl \\
binutils \\
git \\
wget \\
curl \\
qemu-system-x86 \\
podman \\
musl-tools \\
cpio \\
xz-utils \\
bc \\
flex \\
bison \\
libelf-dev \\
libssl-dev
# Install rustup and Rust toolchain
if ! command -v rustup &> /dev/null; then
echo "Installing rustup..."
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
source "\$HOME/.cargo/env"
fi
# Add Rust musl target
source "\$HOME/.cargo/env"
rustup target add x86_64-unknown-linux-musl
'
name: 'install_ubuntu_packages'
reset: true
)!
} else if node.platform == .alpine {
println('Installing Alpine prerequisites...')
node.exec_cmd(
cmd: '
apk add --no-cache \\
build-base \\
rust \\
cargo \\
upx \\
git \\
wget \\
qemu-system-x86 \\
podman
# Add Rust musl target
rustup target add x86_64-unknown-linux-musl || echo "rustup not available"
'
name: 'install_alpine_packages'
reset: true
)!
} else {
return error('Unsupported platform: ${node.platform}. Only Ubuntu/Debian and Alpine are supported.')
}
println('Prerequisites installed successfully')
}
fn clone_repository(mut node builder.Node) ! {
// Clean up disk space first
println('Cleaning up disk space...')
node.exec_cmd(
cmd: '
# Remove old build directories if they exist
rm -rf ${build_dir} || true
# Clean up podman/docker cache to free space
podman system prune -af || true
# Clean up package manager cache
if command -v apt-get &> /dev/null; then
apt-get clean || true
fi
# Show disk space
df -h /
'
name: 'cleanup_disk_space'
stdout: true
)!
// Clone the repository
println('Cloning from ${repo_url}...')
node.exec_cmd(
cmd: '
git clone ${repo_url} ${build_dir}
cd ${build_dir}
git log -1 --oneline
'
name: 'clone_zosbuilder'
stdout: true
)!
println('Repository cloned successfully')
}
fn create_rfs_config(mut node builder.Node) ! {
println('Creating config/rfs.conf...')
rfs_config := 'S3_ENDPOINT="http://wizenoze.grid.tf:3900"
S3_REGION="garage"
S3_BUCKET="zos"
S3_PREFIX="store"
S3_ACCESS_KEY="<put key here>"
S3_SECRET_KEY="<put key here>"
WEB_ENDPOINT=""
MANIFESTS_SUBPATH="flists"
READ_ACCESS_KEY="<put key here>"
READ_SECRET_KEY="<put key here>"
ROUTE_ENDPOINT="http://wizenoze.grid.tf:3900"
ROUTE_PATH="/zos/store"
ROUTE_REGION="garage"
KEEP_S3_FALLBACK="false"
UPLOAD_MANIFESTS="true"
'
// Create config directory if it doesn't exist
node.exec_cmd(
cmd: 'mkdir -p ${build_dir}/config'
name: 'create_config_dir'
stdout: false
)!
// Write the RFS configuration file
node.file_write('${build_dir}/config/rfs.conf', rfs_config)!
// Verify the file was created
result := node.exec(
cmd: 'cat ${build_dir}/config/rfs.conf'
stdout: false
)!
println('RFS configuration created successfully')
println('Config preview:')
println(result)
// Skip youki component by removing it from sources.conf
println('\nRemoving youki from sources.conf (requires SSH keys)...')
node.exec_cmd(
cmd: '
# Remove any line containing youki from sources.conf
grep -v "youki" ${build_dir}/config/sources.conf > ${build_dir}/config/sources.conf.tmp
mv ${build_dir}/config/sources.conf.tmp ${build_dir}/config/sources.conf
# Verify it was removed
echo "Updated sources.conf:"
cat ${build_dir}/config/sources.conf
'
name: 'remove_youki'
stdout: true
)!
println('youki component skipped')
}
fn run_build(mut node builder.Node) ! {
println('Starting build process...')
println('This may take 15-30 minutes depending on your system...')
println('Status updates will be printed every 2 minutes...\n')
// Check disk space before building
println('Checking disk space...')
disk_info := node.exec(
cmd: 'df -h ${build_dir}'
stdout: false
)!
println(disk_info)
// Clean up any previous build artifacts and corrupted databases
println('Cleaning up previous build artifacts...')
node.exec_cmd(
cmd: '
cd ${build_dir}
# Remove dist directory to clean up any corrupted databases
rm -rf dist/
# Clean up any temporary files
rm -rf /tmp/rfs-* || true
# Show available disk space after cleanup
df -h ${build_dir}
'
name: 'cleanup_before_build'
stdout: true
)!
// Make scripts executable and run build with periodic status messages
mut build_cmd := '
cd ${build_dir}
# Source Rust environment
source "\$HOME/.cargo/env"
# Make scripts executable
chmod +x scripts/build.sh scripts/clean.sh
# Set environment variables
export UPLOAD_KERNEL=${upload_kernel}
export UPLOAD_MANIFESTS=false
# Create a wrapper script that prints status every 2 minutes
cat > /tmp/build_with_status.sh << "EOF"
#!/bin/bash
set -e
# Source Rust environment
source "\$HOME/.cargo/env"
# Start the build in background
./scripts/build.sh &
BUILD_PID=\$!
# Print status every 2 minutes while build is running
COUNTER=0
while kill -0 \$BUILD_PID 2>/dev/null; do
sleep 120
COUNTER=\$((COUNTER + 2))
echo ""
echo "=== Build still in progress... (\${COUNTER} minutes elapsed) ==="
echo ""
done
# Wait for build to complete and get exit code
wait \$BUILD_PID
EXIT_CODE=\$?
if [ \$EXIT_CODE -eq 0 ]; then
echo ""
echo "=== Build completed successfully after \${COUNTER} minutes ==="
else
echo ""
echo "=== Build failed after \${COUNTER} minutes with exit code \$EXIT_CODE ==="
fi
exit \$EXIT_CODE
EOF
chmod +x /tmp/build_with_status.sh
/tmp/build_with_status.sh
' // Execute build with output
result := node.exec_cmd(
cmd: build_cmd
name: 'zos_build'
stdout: true
reset: true
period: 0 // Don't cache, always rebuild
)!
println('\nBuild completed!')
println(result)
}
fn check_artifacts(mut node builder.Node) ! {
println('Checking build artifacts in ${build_dir}/dist/...')
// List the dist directory
result := node.exec(
cmd: 'ls -lh ${build_dir}/dist/'
stdout: true
)!
println('\nBuild artifacts:')
println(result)
// Check for expected files
vmlinuz_exists := node.file_exists('${build_dir}/dist/vmlinuz.efi')
initramfs_exists := node.file_exists('${build_dir}/dist/initramfs.cpio.xz')
if vmlinuz_exists && initramfs_exists {
println('\n Build artifacts created successfully:')
println(' - vmlinuz.efi (Kernel with embedded initramfs)')
println(' - initramfs.cpio.xz (Standalone initramfs archive)')
// Get file sizes
size_info := node.exec(
cmd: 'du -h ${build_dir}/dist/vmlinuz.efi ${build_dir}/dist/initramfs.cpio.xz'
stdout: false
)!
println('\nFile sizes:')
println(size_info)
} else {
return error('Build artifacts not found. Build may have failed.')
}
}
// Download artifacts to local machine
fn download_artifacts(mut node builder.Node, local_dest string) ! {
println('Downloading artifacts to local machine...')
mut dest_path := pathlib.get_dir(path: local_dest, create: true)!
println('Downloading to ${dest_path.path}...')
// Download the entire dist directory
node.download(
source: '${build_dir}/dist/'
dest: dest_path.path
)!
println('\n Artifacts downloaded successfully to ${dest_path.path}')
// List downloaded files
println('\nDownloaded files:')
result := node.exec(
cmd: 'ls -lh ${dest_path.path}'
stdout: false
) or {
println('Could not list local files')
return
}
println(result)
}

View File

@@ -0,0 +1,224 @@
# Zero OS Builder - Remote Build System
This example demonstrates how to build [Zero OS (zosbuilder)](https://git.ourworld.tf/tfgrid/zosbuilder) on a remote machine using the herolib builder module.
## Overview
The zosbuilder creates a Zero OS Alpine Initramfs with:
- Alpine Linux 3.22 base
- Custom kernel with embedded initramfs
- ThreeFold components (zinit, rfs, mycelium, zosstorage)
- Optimized size with UPX compression
- Two-stage module loading
## Prerequisites
### Local Machine
- V compiler installed
- SSH access to a remote build machine
- herolib installed
### Remote Build Machine
The script will automatically install these on the remote machine:
- **Ubuntu/Debian**: build-essential, rustc, cargo, upx-ucl, binutils, git, wget, qemu-system-x86, podman, musl-tools
- **Alpine Linux**: build-base, rust, cargo, upx, git, wget, qemu-system-x86, podman
- Rust musl target (x86_64-unknown-linux-musl)
## Configuration
Edit the constants in `zosbuilder.vsh`:
```v
const (
// Remote machine connection
remote_host = 'root@195.192.213.2' // Your remote host
remote_port = 22 // SSH port
// Build configuration
build_dir = '/root/zosbuilder' // Build directory on remote
repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Upload kernel to S3
upload_kernel = false
)
```
## Usage
### Basic Build
```bash
# Make the script executable
chmod +x zosbuilder.vsh
# Run the build
./zosbuilder.vsh
```
### What the Script Does
1. **Connects to Remote Machine**: Establishes SSH connection to the build server
2. **Installs Prerequisites**: Automatically installs all required build tools
3. **Clones Repository**: Fetches the latest zosbuilder code
4. **Runs Build**: Executes the build process (takes 15-30 minutes)
5. **Verifies Artifacts**: Checks that build outputs were created successfully
### Build Output
The build creates two main artifacts in `${build_dir}/dist/`:
- `vmlinuz.efi` - Kernel with embedded initramfs (bootable)
- `initramfs.cpio.xz` - Standalone initramfs archive
## Build Process Details
The zosbuilder follows these phases:
### Phase 1: Environment Setup
- Creates build directories
- Installs build dependencies
- Sets up Rust musl target
### Phase 2: Alpine Base
- Downloads Alpine 3.22 miniroot
- Extracts to initramfs directory
- Installs packages from config/packages.list
### Phase 3: Component Building
- Builds zinit (init system)
- Builds rfs (remote filesystem)
- Builds mycelium (networking)
- Builds zosstorage (storage orchestration)
### Phase 4: System Configuration
- Replaces /sbin/init with zinit
- Copies zinit configuration
- Sets up 2-stage module loading
- Configures system services
### Phase 5: Optimization
- Removes docs, man pages, locales
- Strips executables and libraries
- UPX compresses all binaries
- Aggressive cleanup
### Phase 6: Packaging
- Creates initramfs.cpio.xz with XZ compression
- Builds kernel with embedded initramfs
- Generates vmlinuz.efi
- Optionally uploads to S3
## Advanced Usage
### Download Artifacts to Local Machine
Add this to your script after the build completes:
```v
// Download artifacts to local machine
download_artifacts(mut node, '/tmp/zos-artifacts') or {
eprintln('Failed to download artifacts: ${err}')
}
```
### Custom Build Configuration
You can modify the build by editing files on the remote machine before building:
```v
// After cloning, before building
node.file_write('${build_dir}/config/packages.list', 'your custom packages')!
```
### Rebuild Without Re-cloning
To rebuild without re-cloning the repository, modify the script to skip the clone step:
```v
// Comment out the clone_repository call
// clone_repository(mut node)!
// Or just run the build directly
node.exec_cmd(
cmd: 'cd ${build_dir} && ./scripts/build.sh'
name: 'zos_rebuild'
)!
```
## Testing the Build
After building, you can test the kernel with QEMU:
```bash
# On the remote machine
cd /root/zosbuilder
./scripts/test-qemu.sh
```
## Troubleshooting
### Build Fails
1. Check the build output for specific errors
2. Verify all prerequisites are installed
3. Ensure sufficient disk space (at least 5GB)
4. Check internet connectivity for downloading components
### SSH Connection Issues
1. Verify SSH access: `ssh root@195.192.213.2`
2. Check SSH key authentication is set up
3. Verify the remote host and port are correct
### Missing Dependencies
The script automatically installs dependencies, but if manual installation is needed:
**Ubuntu/Debian:**
```bash
sudo apt-get update
sudo apt-get install -y build-essential rustc cargo upx-ucl binutils git wget qemu-system-x86 podman musl-tools
rustup target add x86_64-unknown-linux-musl
```
**Alpine Linux:**
```bash
apk add --no-cache build-base rust cargo upx git wget qemu-system-x86 podman
rustup target add x86_64-unknown-linux-musl
```
## Integration with CI/CD
This builder can be integrated into CI/CD pipelines:
```v
// Example: Build and upload to artifact storage
fn ci_build() ! {
mut b := builder.new()!
mut node := b.node_new(ipaddr: '${ci_builder_host}')!
build_zos(mut node)!
// Upload to artifact storage
node.exec_cmd(
cmd: 's3cmd put ${build_dir}/dist/* s3://artifacts/zos/'
name: 'upload_artifacts'
)!
}
```
## Related Examples
- `simple.vsh` - Basic builder usage
- `remote_executor/` - Remote code execution
- `simple_ip4.vsh` - IPv4 connection example
- `simple_ip6.vsh` - IPv6 connection example
## References
- [zosbuilder Repository](https://git.ourworld.tf/tfgrid/zosbuilder)
- [herolib Builder Documentation](../../lib/builder/readme.md)
- [Zero OS Documentation](https://manual.grid.tf/)
## License
This example follows the same license as herolib.

View File

@@ -5,7 +5,7 @@ import incubaid.herolib.schemas.openrpc
import os
// 1. Create a new server instance
mut server := heroserver.new(port: 8080, auth_enabled: false)!
mut server := heroserver.new(port: 8081, auth_enabled: false)!
// 2. Create and register your OpenRPC handlers
// These handlers must conform to the `openrpc.OpenRPCHandler` interface.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Comment Service",
"description": "A simple service for managing comments.",
"version": "1.0.0"
},
"methods": [
{
"name": "add_comment",
"summary": "Add a new comment",
"params": [
{
"name": "text",
"description": "The content of the comment.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "comment_id",
"description": "The ID of the newly created comment.",
"schema": {
"type": "string"
}
}
},
{
"name": "get_comment",
"summary": "Get a comment by ID",
"description": "Retrieves a specific comment using its unique identifier.",
"params": [
{
"name": "id",
"description": "The unique identifier of the comment to retrieve.",
"required": true,
"schema": {
"type": "number",
"example": "1"
}
},
{
"name": "include_metadata",
"description": "Whether to include metadata in the response.",
"required": false,
"schema": {
"type": "boolean",
"example": true
}
}
],
"result": {
"name": "comment",
"description": "The requested comment object.",
"schema": {
"type": "object",
"example": {
"id": 1,
"text": "This is a sample comment",
"created_at": "2024-01-15T10:30:00Z"
}
}
}
}
],
"components": {}
}

View File

@@ -14,7 +14,7 @@ println('🚀 Starting All Horus Services')
// Step 1: Start Coordinator
println('\n Step 1/5: Starting Coordinator...')
mut coordinator_installer := coordinator.get()!
mut coordinator_installer := coordinator.get(name: 'ayman', create: true)!
coordinator_installer.start()!
if coordinator_installer.running()! {
println(' Coordinator is running on HTTP:${coordinator_installer.http_port} WS:${coordinator_installer.ws_port}')
@@ -24,7 +24,7 @@ if coordinator_installer.running()! {
// Step 2: Start Supervisor
println('\n Step 2/5: Starting Supervisor...')
mut supervisor_inst := supervisor.get()!
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on HTTP:${supervisor_inst.http_port} WS:${supervisor_inst.ws_port}')
@@ -34,7 +34,7 @@ if supervisor_inst.running()! {
// Step 3: Start Hero Runner
println('\n Step 3/5: Starting Hero Runner...')
mut hero_runner := herorunner.get()!
mut hero_runner := herorunner.get(create: true)!
hero_runner.start()!
if hero_runner.running()! {
println(' Hero Runner is running')
@@ -44,7 +44,7 @@ if hero_runner.running()! {
// Step 4: Start Osiris Runner
println('\n Step 4/5: Starting Osiris Runner...')
mut osiris_runner := osirisrunner.get()!
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.start()!
if osiris_runner.running()! {
println(' Osiris Runner is running')
@@ -54,7 +54,7 @@ if osiris_runner.running()! {
// Step 5: Start SAL Runner
println('\n Step 5/5: Starting SAL Runner...')
mut sal_runner := salrunner.get()!
mut sal_runner := salrunner.get(create: true)!
sal_runner.start()!
if sal_runner.running()! {
println(' SAL Runner is running')
@@ -65,11 +65,20 @@ if sal_runner.running()! {
println('🎉 All Horus services started!')
println('\n📊 Service Status:')
println(' Coordinator: ${if coordinator_installer.running()! { " Running" } else { " Stopped" }} (http://127.0.0.1:${coordinator_installer.http_port})')
println(' Supervisor: ${if supervisor_inst.running()! { " Running" } else { " Stopped" }} (http://127.0.0.1:${supervisor_inst.http_port})')
println(' Hero Runner: ${if hero_runner.running()! { " Running" } else { " Stopped" }}')
println(' Osiris Runner: ${if osiris_runner.running()! { " Running" } else { " Stopped" }}')
println(' SAL Runner: ${if sal_runner.running()! { " Running" } else { " Stopped" }}')
coordinator_status := if coordinator_installer.running()! { ' Running' } else { ' Stopped' }
println(' Coordinator: ${coordinator_status} (http://127.0.0.1:${coordinator_installer.http_port})')
supervisor_status := if supervisor_inst.running()! { ' Running' } else { ' Stopped' }
println(' Supervisor: ${supervisor_status} (http://127.0.0.1:${supervisor_inst.http_port})')
hero_runner_status := if hero_runner.running()! { ' Running' } else { ' Stopped' }
println(' Hero Runner: ${hero_runner_status}')
osiris_runner_status := if osiris_runner.running()! { ' Running' } else { ' Stopped' }
println(' Osiris Runner: ${osiris_runner_status}')
sal_runner_status := if sal_runner.running()! { ' Running' } else { ' Stopped' }
println(' SAL Runner: ${sal_runner_status}')
println('\n💡 Next Steps:')
println(' To stop services, run: ./horus_stop_all.vsh')

View File

@@ -192,6 +192,11 @@ fn test_prd_list() ! {
mut db_prd := DBPrd{
db: &mydb
}
// Clear any existing PRDs before running the test
existing_prds := db_prd.list()!
for prd_id in existing_prds {
db_prd.delete[ProductRequirementsDoc](u32(prd_id))!
}
// Create multiple PRDs
for i in 0 .. 3 {

View File

@@ -2,7 +2,6 @@ module coordinator
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core.pathlib
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
@@ -28,12 +27,14 @@ fn (self &Coordinator) startupcmd() ![]startupmanager.ZProcessNewArgs {
fn (self &Coordinator) running_check() !bool {
// Check if the process is running by checking the HTTP port
// The coordinator returns 405 for GET requests (requires POST), so we check if we get any response
res := osal.exec(
cmd: 'curl -fsSL http://127.0.0.1:${self.http_port} || exit 1'
cmd: 'curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${self.http_port}'
stdout: false
raise_error: false
)!
return res.exit_code == 0
// Any HTTP response code (including 405) means the server is running
return res.output.len > 0 && res.output.int() > 0
}
fn (self &Coordinator) start_pre() ! {

View File

@@ -154,11 +154,12 @@ pub fn play(mut plbook PlayBook) ! {
}
mut other_actions := plbook.find(filter: 'coordinator.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut coordinator_obj := get(name: name)!
mut coordinator_obj := get(name: name, create: true)!
console.print_debug('action object:\n${coordinator_obj}')
if other_action.name == 'destroy' || reset {

View File

@@ -12,9 +12,16 @@ import os
fn (self &Herorunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'herorunner'
cmd: '${self.binary_path} --redis-addr ${self.redis_addr}'
cmd: '${self.binary_path} --redis-url ${redis_url} 12001'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level

View File

@@ -142,11 +142,12 @@ pub fn play(mut plbook PlayBook) ! {
}
mut other_actions := plbook.find(filter: 'herorunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut herorunner_obj := get(name: name)!
mut herorunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${herorunner_obj}')
if other_action.name == 'destroy' || reset {

View File

@@ -12,9 +12,16 @@ import os
fn (self &Osirisrunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'runner_osiris'
cmd: '${self.binary_path} --redis-addr ${self.redis_addr}'
cmd: '${self.binary_path} --redis-url ${redis_url} 12002'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level

View File

@@ -144,11 +144,12 @@ pub fn play(mut plbook PlayBook) ! {
}
mut other_actions := plbook.find(filter: 'osirisrunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut osirisrunner_obj := get(name: name)!
mut osirisrunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${osirisrunner_obj}')
if other_action.name == 'destroy' || reset {

View File

@@ -12,9 +12,16 @@ import os
fn (self &Salrunner) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'runner_sal'
cmd: '${self.binary_path} --redis-addr ${self.redis_addr}'
cmd: '${self.binary_path} --redis-url ${redis_url} 12003'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level

View File

@@ -144,11 +144,12 @@ pub fn play(mut plbook PlayBook) ! {
}
mut other_actions := plbook.find(filter: 'salrunner.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut salrunner_obj := get(name: name)!
mut salrunner_obj := get(name: name, create: true)!
console.print_debug('action object:\n${salrunner_obj}')
if other_action.name == 'destroy' || reset {

View File

@@ -13,9 +13,16 @@ import os
fn (self &Supervisor) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Ensure redis_addr has the redis:// prefix
redis_url := if self.redis_addr.starts_with('redis://') {
self.redis_addr
} else {
'redis://${self.redis_addr}'
}
res << startupmanager.ZProcessNewArgs{
name: 'supervisor'
cmd: '${self.binary_path} --redis-addr ${self.redis_addr} --api-http-port ${self.http_port} --api-ws-port ${self.ws_port}'
cmd: '${self.binary_path} --redis-url ${redis_url} --port ${self.http_port} --admin-secret mysecret'
env: {
'HOME': os.home_dir()
'RUST_LOG': self.log_level
@@ -28,12 +35,14 @@ fn (self &Supervisor) startupcmd() ![]startupmanager.ZProcessNewArgs {
fn (self &Supervisor) running_check() !bool {
// Check if the process is running by checking the HTTP port
// The supervisor returns 405 for GET requests (requires POST), so we check if we get any response
res := osal.exec(
cmd: 'curl -fsSL http://127.0.0.1:${self.http_port} || exit 1'
cmd: 'curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${self.http_port}'
stdout: false
raise_error: false
)!
return res.exit_code == 0
// Any HTTP response code (including 405) means the server is running
return res.output.len > 0 && res.output.int() > 0
}
fn (self &Supervisor) start_pre() ! {

View File

@@ -44,14 +44,19 @@ pub fn new(args ArgsGet) !&Supervisor {
pub fn get(args ArgsGet) !&Supervisor {
mut context := base.context()!
supervisor_default = args.name
if args.fromdb || args.name !in supervisor_global {
mut name := if args.name == 'default' && supervisor_default.len > 0 {
supervisor_default
} else {
args.name
}
supervisor_default = name
if args.fromdb || name !in supervisor_global {
mut r := context.redis()!
if r.hexists('context:supervisor', args.name)! {
data := r.hget('context:supervisor', args.name)!
if r.hexists('context:supervisor', name)! {
data := r.hget('context:supervisor', name)!
if data.len == 0 {
print_backtrace()
return error('Supervisor with name: ${args.name} does not exist, prob bug.')
return error('Supervisor with name: ${name} does not exist, prob bug.')
}
mut obj := json.decode(Supervisor, data)!
set_in_mem(obj)!
@@ -60,14 +65,14 @@ pub fn get(args ArgsGet) !&Supervisor {
new(args)!
} else {
print_backtrace()
return error("Supervisor with name '${args.name}' does not exist")
return error("Supervisor with name '${name}' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
return get(name: name)! // no longer from db nor create
}
return supervisor_global[args.name] or {
return supervisor_global[name] or {
print_backtrace()
return error('could not get config for supervisor with name:${args.name}')
return error('could not get config for supervisor with name:${name}')
}
}
@@ -148,11 +153,12 @@ pub fn play(mut plbook PlayBook) ! {
}
mut other_actions := plbook.find(filter: 'supervisor.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
if other_action.name in ['destroy', 'install', 'build', 'start', 'stop', 'restart',
'start_pre', 'start_post', 'stop_pre', 'stop_post'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut supervisor_obj := get(name: name)!
mut supervisor_obj := get(name: name, create: true)!
console.print_debug('action object:\n${supervisor_obj}')
if other_action.name == 'destroy' || reset {
@@ -167,6 +173,13 @@ pub fn play(mut plbook PlayBook) ! {
console.print_debug('install action supervisor.build')
supervisor_obj.build()!
}
}
if other_action.name in ['start', 'stop', 'restart', 'start_pre', 'start_post', 'stop_pre',
'stop_post'] {
mut p := other_action.params
name := p.get('name')!
mut supervisor_obj := get(name: name, create: true)!
console.print_debug('action object:\n${supervisor_obj}')
if other_action.name == 'start' {
console.print_debug('install action supervisor.${other_action.name}')
supervisor_obj.start()!
@@ -255,8 +268,6 @@ pub fn (mut self Supervisor) start() ! {
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
println('debugzo ${sm}')
console.print_debug('installer: supervisor starting with ${zprocess.startuptype}...')
sm.new(zprocess)!

View File

@@ -25,7 +25,6 @@ pub fn get(cat StartupManagerType) !StartupManager {
} else {
return error('zinit not found ${err}')
}
}
.auto {
// Try to get a ZinitRPC client and check if it can discover RPC methods.
@@ -103,19 +102,43 @@ pub fn (mut sm StartupManager) new(args ZProcessNewArgs) ! {
shutdown_timeout: 0 // Default, or add to ZProcessNewArgs if needed
}
// Check if service already exists
existing_service := zinit_client.service_get(args.name) or { zinit.ServiceConfig{} }
// If service exists, stop monitoring, stop, and delete it first
if existing_service.exec.len > 0 {
console.print_debug('startupmanager: service ${args.name} already exists, cleaning up...')
// Stop the service first
zinit_client.service_stop(args.name) or {
console.print_debug('startupmanager: failed to stop service ${args.name}: ${err}')
}
// Forget (stop monitoring) the service
zinit_client.service_forget(args.name) or {
console.print_debug('startupmanager: failed to forget service ${args.name}: ${err}')
}
// Delete the service configuration
zinit_client.service_delete(args.name) or {
console.print_debug('startupmanager: failed to delete service ${args.name}: ${err}')
}
}
// Create the service configuration file in zinit
zinit_client.service_create(args.name, service_config) or {
return error('startupmanager: failed to create zinit service ${args.name}: ${err}')
}
// If 'start' is true, monitor and start the service immediately after creation
if args.start {
// Monitor loads the config and starts monitoring the service
zinit_client.service_monitor(args.name) or {
return error('startupmanager: failed to monitor zinit service ${args.name}: ${err}')
}
}
}
else {
panic('to implement, startup manager only support screen & systemd for now: ${mycat}')
}
}
// If 'start' is true, also monitor and start the service
if args.start {
sm.start(args.name)!
}
}
pub fn (mut sm StartupManager) start(name string) ! {
@@ -140,11 +163,6 @@ pub fn (mut sm StartupManager) start(name string) ! {
zinit_client.service_start(name) or {
return error('startupmanager: Failed to start zinit service ${name}: ${err}')
}
// Monitor loads the config, if it's new it starts it.
// If the service is already managed, this will bring it back up.
zinit_client.service_monitor(name) or {
return error('startupmanager: Failed to monitor zinit service ${name}: ${err}')
}
}
else {
panic('to implement, startup manager only support screen, systemd and zinit for now')

View File

@@ -33,8 +33,6 @@ pub fn (mut t UnixSocketTransport) send(request string, params SendParams) !stri
// Close the socket explicitly
unix.shutdown(socket.sock.handle)
socket.close() or {}
// print_backtrace()
console.print_debug('The server did not close the socket, we did timeout or there was other error.')
}
// Set timeout if specified