diff --git a/examples/virt/heropods/heropods.vsh b/examples/virt/heropods/heropods.vsh
new file mode 100755
index 00000000..7fc57890
--- /dev/null
+++ b/examples/virt/heropods/heropods.vsh
@@ -0,0 +1,62 @@
+#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
+
+import freeflowuniverse.herolib.virt.heropods
+
+// Initialize factory
+mut factory := heropods.new(
+ reset: false
+ use_podman: true
+) or { panic('Failed to init ContainerFactory: ${err}') }
+
+println('=== HeroPods Refactored API Demo ===')
+
+// Step 1: factory.new() now only creates a container definition/handle
+// It does NOT create the actual container in the backend yet
+mut container := factory.new(
+ name: 'myalpine'
+ image: .custom
+ custom_image_name: 'alpine_3_20'
+ docker_url: 'docker.io/library/alpine:3.20'
+)!
+
+println('✓ Container definition created: ${container.name}')
+println(' (No actual container created in backend yet)')
+
+// Step 2: container.start() handles creation and starting
+// - Checks if container exists in backend
+// - Creates it if it doesn't exist
+// - Starts it if it exists but is stopped
+println('\n--- First start() call ---')
+container.start()!
+println('✓ Container started successfully')
+
+// Step 3: Multiple start() calls are now idempotent
+println('\n--- Second start() call (should be idempotent) ---')
+container.start()!
+println('✓ Second start() call successful - no errors!')
+
+// Step 4: Execute commands in the container and save results
+println('\n--- Executing commands in container ---')
+result1 := container.exec(cmd: 'ls -la /')!
+println('✓ Command executed: ls -la /')
+println('Result: ${result1}')
+
+result2 := container.exec(cmd: 'echo "Hello from container!"')!
+println('✓ Command executed: echo "Hello from container!"')
+println('Result: ${result2}')
+
+result3 := container.exec(cmd: 'uname -a')!
+println('✓ Command executed: uname -a')
+println('Result: ${result3}')
+
+// Step 5: container.delete() works naturally on the instance
+println('\n--- Deleting container ---')
+container.delete()!
+println('✓ Container deleted successfully')
+
+println('\n=== Demo completed! ===')
+println('The refactored API now works as expected:')
+println('- factory.new() creates definition only')
+println('- container.start() is idempotent')
+println('- container.exec() works and returns results')
+println('- container.delete() works on instances')
diff --git a/examples/virt/heropods/runcommands.vsh b/examples/virt/heropods/runcommands.vsh
new file mode 100644
index 00000000..f54fc43b
--- /dev/null
+++ b/examples/virt/heropods/runcommands.vsh
@@ -0,0 +1,19 @@
+#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
+
+import freeflowuniverse.herolib.virt.heropods
+
+mut factory := heropods.new(
+ reset: false
+ use_podman: true
+) or { panic('Failed to init ContainerFactory: ${err}') }
+
+mut container := factory.new(
+ name: 'myalpine'
+ image: .custom
+ custom_image_name: 'alpine_3_20'
+ docker_url: 'docker.io/library/alpine:3.20'
+)!
+
+container.start()!
+container.exec(cmd: 'ls')!
+container.stop()!
diff --git a/examples/virt/herorun/archive/README.md b/examples/virt/herorun/archive/README.md
deleted file mode 100644
index 78befbe8..00000000
--- a/examples/virt/herorun/archive/README.md
+++ /dev/null
@@ -1,105 +0,0 @@
-# HeroRun - AI Agent Optimized Container Management
-
-**Production-ready scripts for fast remote command execution**
-
-## 🎯 Purpose
-
-Optimized for AI agents that need rapid, reliable command execution with minimal latency and clean output.
-
-## 🏗️ Base Image Types
-
-HeroRun supports different base images through the `BaseImage` enum:
-
-```v
-pub enum BaseImage {
- alpine // Standard Alpine Linux minirootfs (~5MB)
- alpine_python // Alpine Linux with Python 3 pre-installed
-}
-```
-
-### Usage Examples
-
-**Standard Alpine Container:**
-
-```v
-base_image: .alpine // Default - minimal Alpine Linux
-```
-
-**Alpine with Python:**
-
-```v
-base_image: .alpine_python // Python 3 + pip pre-installed
-```
-
-## 📋 Three Scripts
-
-### 1. `setup.vsh` - Environment Preparation
-
-Creates container infrastructure on remote node.
-
-```bash
-./setup.vsh
-```
-
-**Output:** `Setup complete`
-
-### 2. `execute.vsh` - Fast Command Execution
-
-Executes commands on remote node with clean output only.
-
-```bash
-./execute.vsh "command" [context_id]
-```
-
-**Examples:**
-
-```bash
-./execute.vsh "ls /containers"
-./execute.vsh "whoami"
-./execute.vsh "echo 'Hello World'"
-```
-
-**Output:** Command result only (no verbose logging)
-
-### 3. `cleanup.vsh` - Complete Teardown
-
-Removes container and cleans up all resources.
-
-```bash
-./cleanup.vsh
-```
-
-**Output:** `Cleanup complete`
-
-## ⚡ Performance Features
-
-- **Clean Output**: Execute returns only command results
-- **No Verbose Logging**: Silent operation for production use
-- **Fast Execution**: Direct SSH without tmux overhead
-- **AI Agent Ready**: Perfect for automated command execution
-
-## 🚀 Usage Pattern
-
-```bash
-# Setup once
-./setup.vsh
-
-# Execute many commands (fast)
-./execute.vsh "ls -la"
-./execute.vsh "ps aux"
-./execute.vsh "df -h"
-
-# Cleanup when done
-./cleanup.vsh
-```
-
-## 🎯 AI Agent Integration
-
-Perfect for AI agents that need:
-
-- Rapid command execution
-- Clean, parseable output
-- Minimal setup overhead
-- Production-ready reliability
-
-Each execute call returns only the command output, making it ideal for AI agents to parse and process results.
diff --git a/examples/virt/herorun/archive/cleanup.vsh b/examples/virt/herorun/archive/cleanup.vsh
deleted file mode 100755
index f4c23e42..00000000
--- a/examples/virt/herorun/archive/cleanup.vsh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor using proper modules
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'ai_agent_container'
- keyname: 'id_ed25519'
-)!
-
-// Cleanup using tmux and osal modules
-executor.cleanup()!
-
-println('Cleanup complete')
diff --git a/examples/virt/herorun/archive/execute.vsh b/examples/virt/herorun/archive/execute.vsh
deleted file mode 100755
index 8c5fc234..00000000
--- a/examples/virt/herorun/archive/execute.vsh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-import os
-
-// Get command from command line args
-if os.args.len < 2 {
- println('Usage: ./execute.vsh "command" [context_id]')
- exit(1)
-}
-
-cmd := os.args[1]
-// context_id := if os.args.len > 2 { os.args[2] } else { 'default' }
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor using proper modules
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'ai_agent_container'
- keyname: 'id_ed25519'
-)!
-
-// Execute command using osal module for clean output
-output := executor.execute(cmd)!
-
-// Output only the command result
-print(output)
diff --git a/examples/virt/herorun/archive/setup.vsh b/examples/virt/herorun/archive/setup.vsh
deleted file mode 100755
index dea1db51..00000000
--- a/examples/virt/herorun/archive/setup.vsh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor using proper module integration
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'ai_agent_container'
- keyname: 'id_ed25519'
-)!
-
-// Setup using sshagent, tmux, hetznermanager, and osal modules
-executor.setup()!
-
-println('Setup complete')
diff --git a/examples/virt/herorun/archive/setup_python_alpine.vsh b/examples/virt/herorun/archive/setup_python_alpine.vsh
deleted file mode 100755
index 7b47b55e..00000000
--- a/examples/virt/herorun/archive/setup_python_alpine.vsh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor with Alpine Python base image
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'python_alpine_container'
- keyname: 'id_ed25519'
- image_script: 'examples/virt/herorun/images/python_server.sh'
- base_image: .alpine_python // Use Alpine with Python pre-installed
-)!
-
-// Setup container
-executor.setup()!
-
-// Create container with Python Alpine base and Python server script
-mut container := executor.get_or_create_container(
- name: 'python_alpine_container'
- image_script: 'examples/virt/herorun/images/python_server.sh'
- base_image: .alpine_python
-)!
-
-println('✅ Setup complete with Python Alpine container')
-println('Container: python_alpine_container')
-println('Base image: Alpine Linux with Python 3 pre-installed')
-println('Entry point: python_server.sh')
-
-// Test the container to show Python is available
-println('\n🐍 Testing Python availability...')
-python_test := executor.execute('runc exec python_alpine_container python3 --version') or {
- println('❌ Python test failed: ${err}')
- return
-}
-
-println('✅ Python version: ${python_test}')
-
-println('\n🚀 Running Python HTTP server...')
-println('Note: This will start the server and exit (use runc run for persistent server)')
-
-// Run the container to start the Python server
-result := executor.execute('runc run python_alpine_container') or {
- println('❌ Container execution failed: ${err}')
- return
-}
-
-println('📋 Server output:')
-println(result)
-
-println('\n🎉 Python Alpine container executed successfully!')
-println('💡 The Python HTTP server would run on port 8000 if started persistently')
diff --git a/examples/virt/herorun/archive/setup_with_script.vsh b/examples/virt/herorun/archive/setup_with_script.vsh
deleted file mode 100755
index daa6f178..00000000
--- a/examples/virt/herorun/archive/setup_with_script.vsh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor with image script for Python server
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'python_server_container'
- keyname: 'id_ed25519'
- image_script: 'examples/virt/herorun/images/python_server.sh' // Path to entry point script
-)!
-
-// Setup using sshagent, tmux, hetznermanager, and osal modules
-executor.setup()!
-
-// Create container with the Python server script
-mut container := executor.get_or_create_container(
- name: 'python_server_container'
- image_script: 'examples/virt/herorun/images/python_server.sh'
-)!
-
-println('Setup complete with Python server container')
-println('Container: python_server_container')
-println('Entry point: examples/virt/herorun/images/python_server.sh (Python HTTP server)')
-println('To start the server: runc run python_server_container')
diff --git a/examples/virt/herorun/archive/test_hello_world.vsh b/examples/virt/herorun/archive/test_hello_world.vsh
deleted file mode 100755
index ea70e221..00000000
--- a/examples/virt/herorun/archive/test_hello_world.vsh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
-// Create user with SSH key using sshagent module
-mut user := herorun.new_user(keyname: 'id_ed25519')!
-
-// Create executor with hello world script
-mut executor := herorun.new_executor(
- node_ip: '65.21.132.119'
- user: 'root'
- container_id: 'hello_world_container'
- keyname: 'id_ed25519'
- image_script: 'examples/virt/herorun/images/hello_world.sh'
-)!
-
-// Setup container
-executor.setup()!
-
-// Create container with hello world script
-mut container := executor.get_or_create_container(
- name: 'hello_world_container'
- image_script: 'examples/virt/herorun/images/hello_world.sh'
-)!
-
-println('✅ Setup complete with Hello World container')
-println('Container: hello_world_container')
-println('Entry point: hello_world.sh')
-
-// Run the container to demonstrate it works
-println('\n🚀 Running container...')
-result := executor.execute('runc run hello_world_container') or {
- println('❌ Container execution failed: ${err}')
- return
-}
-
-println('📋 Container output:')
-println(result)
-
-println('\n🎉 Container executed successfully!')
diff --git a/examples/virt/herorun/basic_example.vsh b/examples/virt/herorun/basic_example.vsh
deleted file mode 100644
index 0acd62f2..00000000
--- a/examples/virt/herorun/basic_example.vsh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-import freeflowuniverse.herolib.ui.console
-
-// Create container factory
-mut factory := herorun.new(reset: false)!
-
-// Create a new Alpine container
-mut container := factory.new(name: 'test-alpine', image: .alpine_3_20)!
-
-// Start the container
-container.start()!
-
-// Execute commands in the container
-result := container.exec(cmd: 'ls -la /', stdout: true)!
-console.print_debug('Container ls result: ${result}')
-
-// Test file operations
-container.exec(cmd: 'echo "Hello from container" > /tmp/test.txt', stdout: false)!
-content := container.exec(cmd: 'cat /tmp/test.txt', stdout: false)!
-console.print_debug('File content: ${content}')
-
-// Get container status and resource usage
-status := container.status()!
-cpu := container.cpu_usage()!
-mem := container.mem_usage()!
-
-console.print_debug('Container status: ${status}')
-console.print_debug('CPU usage: ${cpu}%')
-console.print_debug('Memory usage: ${mem} MB')
-
-// Clean up
-container.stop()!
-container.delete()!
\ No newline at end of file
diff --git a/examples/virt/herorun/builder_integration.vsh b/examples/virt/herorun/builder_integration.vsh
deleted file mode 100644
index f018cafb..00000000
--- a/examples/virt/herorun/builder_integration.vsh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-import freeflowuniverse.herolib.builder
-import freeflowuniverse.herolib.ui.console
-
-// Create container
-mut factory := herorun.new()!
-mut container := factory.new(name: 'builder-test', image: .ubuntu_24_04)!
-container.start()!
-
-// Get builder node for the container
-mut node := container.node()!
-
-// Use builder methods to interact with container
-node.file_write('/tmp/script.sh', '
-#!/bin/bash
-echo "Running from builder node"
-whoami
-pwd
-ls -la /
-')!
-
-result := node.exec(cmd: 'chmod +x /tmp/script.sh && /tmp/script.sh', stdout: true)!
-console.print_debug('Builder execution result: ${result}')
-
-// Test file operations through builder
-exists := node.file_exists('/tmp/script.sh')
-console.print_debug('Script exists: ${exists}')
-
-content := node.file_read('/tmp/script.sh')!
-console.print_debug('Script content: ${content}')
-
-// Clean up
-container.stop()!
-container.delete()!
\ No newline at end of file
diff --git a/examples/virt/herorun/herorun2.vsh b/examples/virt/herorun/herorun2.vsh
deleted file mode 100644
index 17dc32e4..00000000
--- a/examples/virt/herorun/herorun2.vsh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
-
-import freeflowuniverse.herolib.virt.herorun
-
diff --git a/examples/virt/herorun/images/hello_world.sh b/examples/virt/herorun/images/hello_world.sh
deleted file mode 100644
index b5b6ce29..00000000
--- a/examples/virt/herorun/images/hello_world.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-set -e
-
-echo "🎉 Hello from custom container entry point!"
-echo "Container ID: $(hostname)"
-echo "Current time: $(date)"
-echo "Working directory: $(pwd)"
-echo "Available commands:"
-ls /bin | head -10
-echo "..."
-echo "✅ Container is working perfectly!"
diff --git a/examples/virt/herorun/images/python_server.sh b/examples/virt/herorun/images/python_server.sh
deleted file mode 100644
index 3a194a44..00000000
--- a/examples/virt/herorun/images/python_server.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/sh
-set -e
-
-echo "🐍 Starting Python HTTP server..."
-
-# Allow overriding port via environment variable (default: 8000)
-PORT=${PORT:-8000}
-HOST=${HOST:-0.0.0.0}
-
-# Check if Python is available
-if ! command -v python >/dev/null 2>&1 && ! command -v python3 >/dev/null 2>&1; then
- echo "❌ Python not found in this container"
- echo "💡 To use Python server, you need a container with Python pre-installed"
- echo " For now, starting a simple HTTP server using busybox httpd..."
-
- # Create a simple index.html
- mkdir -p /tmp/www
- cat > /tmp/www/index.html << 'EOF'
-
-
-
- Container HTTP Server
-
-
-
-
-
🎉 Container HTTP Server
-
✅ Container is running successfully!
-
-
Server Information:
-
- - Server: BusyBox httpd
- - Port: 8000
- - Container: Alpine Linux
- - Status: Active
-
-
-
Note: Python was not available, so we're using BusyBox httpd instead.
-
-
-
-EOF
-
- echo "📁 Created simple web content at /tmp/www/"
- echo "🌐 Would start HTTP server on $HOST:$PORT (if httpd was available)"
- echo ""
- echo "🎉 Container executed successfully!"
- echo "✅ Entry point script is working"
- echo "📋 Container contents:"
- ls -la /tmp/www/
- echo ""
- echo "📄 Sample web content:"
- cat /tmp/www/index.html | head -10
- echo "..."
- echo ""
- echo "💡 To run a real HTTP server, use a container image with Python or httpd pre-installed"
-else
- # Use python3 if available, otherwise python
- PYTHON_CMD="python3"
- if ! command -v python3 >/dev/null 2>&1; then
- PYTHON_CMD="python"
- fi
-
- echo "✅ Found Python: $PYTHON_CMD"
- echo "🌐 Starting Python HTTP server on $HOST:$PORT"
-
- # Use exec so signals (like Ctrl+C) are properly handled
- exec $PYTHON_CMD -m http.server "$PORT" --bind "$HOST"
-fi
diff --git a/lib/builder/executor.v b/lib/builder/executor.v
index 2029dd46..5c9326d2 100644
--- a/lib/builder/executor.v
+++ b/lib/builder/executor.v
@@ -2,7 +2,7 @@ module builder
import freeflowuniverse.herolib.data.ipaddress
-type Executor = ExecutorLocal | ExecutorSSH
+type Executor = ExecutorLocal | ExecutorSSH | ExecutorCrun
pub struct ExecutorNewArguments {
pub mut:
diff --git a/lib/builder/executor_crun.v b/lib/builder/executor_crun.v
index 91e7ad7b..990c5d0a 100644
--- a/lib/builder/executor_crun.v
+++ b/lib/builder/executor_crun.v
@@ -3,9 +3,7 @@ module builder
import os
import rand
import freeflowuniverse.herolib.osal.core as osal
-import freeflowuniverse.herolib.osal.rsync
import freeflowuniverse.herolib.core.pathlib
-import freeflowuniverse.herolib.data.ipaddress
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
@@ -17,14 +15,14 @@ pub mut:
debug bool = true
}
-fn (mut executor ExecutorCrun) init() ! {
+pub fn (mut executor ExecutorCrun) init() ! {
// Verify container exists and is running
result := osal.exec(cmd: 'crun state ${executor.container_id}', stdout: false) or {
return error('Container ${executor.container_id} not found or not accessible')
}
// Parse state to ensure container is running
- if !result.output.contains('"status":"running"') {
+ if !result.output.contains('"status": "running"') {
return error('Container ${executor.container_id} is not running')
}
}
diff --git a/lib/builder/node_executor.v b/lib/builder/node_executor.v
index 729b6d51..3d4d0723 100644
--- a/lib/builder/node_executor.v
+++ b/lib/builder/node_executor.v
@@ -14,6 +14,8 @@ pub fn (mut node Node) exec(args ExecArgs) !string {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
}
panic('did not find right executor')
}
@@ -80,6 +82,8 @@ pub fn (mut node Node) exec_silent(cmd string) !string {
return node.executor.exec(cmd: cmd, stdout: false)
} else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: cmd, stdout: false)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.exec(cmd: cmd, stdout: false)
}
panic('did not find right executor')
}
@@ -89,8 +93,11 @@ pub fn (mut node Node) exec_interactive(cmd_ string) ! {
node.executor.exec_interactive(cmd: cmd_)!
} else if mut node.executor is ExecutorSSH {
node.executor.exec_interactive(cmd: cmd_)!
+ } else if mut node.executor is ExecutorCrun {
+ node.executor.exec_interactive(cmd: cmd_)!
+ } else {
+ panic('did not find right executor')
}
- panic('did not find right executor')
}
pub fn (mut node Node) file_write(path string, text string) ! {
@@ -98,6 +105,8 @@ pub fn (mut node Node) file_write(path string, text string) ! {
return node.executor.file_write(path, text)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_write(path, text)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.file_write(path, text)
}
panic('did not find right executor')
}
@@ -107,6 +116,8 @@ pub fn (mut node Node) file_read(path string) !string {
return node.executor.file_read(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_read(path)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.file_read(path)
}
panic('did not find right executor')
}
@@ -116,6 +127,8 @@ pub fn (mut node Node) file_exists(path string) bool {
return node.executor.file_exists(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_exists(path)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.file_exists(path)
}
panic('did not find right executor')
}
@@ -137,6 +150,8 @@ pub fn (mut node Node) delete(path string) ! {
return node.executor.delete(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.delete(path)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.delete(path)
}
panic('did not find right executor')
}
@@ -179,6 +194,8 @@ pub fn (mut node Node) download(args_ SyncArgs) ! {
return node.executor.download(args)
} else if mut node.executor is ExecutorSSH {
return node.executor.download(args)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.download(args)
}
panic('did not find right executor')
}
@@ -208,6 +225,8 @@ pub fn (mut node Node) upload(args_ SyncArgs) ! {
return node.executor.upload(args)
} else if mut node.executor is ExecutorSSH {
return node.executor.upload(args)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.upload(args)
}
panic('did not find right executor')
}
@@ -224,6 +243,8 @@ pub fn (mut node Node) environ_get(args EnvGetParams) !map[string]string {
return node.executor.environ_get()
} else if mut node.executor is ExecutorSSH {
return node.executor.environ_get()
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.environ_get()
}
panic('did not find right executor')
}
@@ -235,6 +256,8 @@ pub fn (mut node Node) info() map[string]string {
return node.executor.info()
} else if mut node.executor is ExecutorSSH {
return node.executor.info()
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.info()
}
panic('did not find right executor')
}
@@ -244,6 +267,8 @@ pub fn (mut node Node) shell(cmd string) ! {
return node.executor.shell(cmd)
} else if mut node.executor is ExecutorSSH {
return node.executor.shell(cmd)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.shell(cmd)
}
panic('did not find right executor')
}
@@ -257,6 +282,8 @@ pub fn (mut node Node) list(path string) ![]string {
return node.executor.list(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.list(path)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.list(path)
}
panic('did not find right executor')
}
@@ -266,6 +293,8 @@ pub fn (mut node Node) dir_exists(path string) bool {
return node.executor.dir_exists(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.dir_exists(path)
+ } else if mut node.executor is ExecutorCrun {
+ return node.executor.dir_exists(path)
}
panic('did not find right executor')
}
@@ -275,8 +304,11 @@ pub fn (mut node Node) debug_off() {
node.executor.debug_off()
} else if mut node.executor is ExecutorSSH {
node.executor.debug_off()
+ } else if mut node.executor is ExecutorCrun {
+ node.executor.debug_off()
+ } else {
+ panic('did not find right executor')
}
- panic('did not find right executor')
}
pub fn (mut node Node) debug_on() {
@@ -284,6 +316,9 @@ pub fn (mut node Node) debug_on() {
node.executor.debug_on()
} else if mut node.executor is ExecutorSSH {
node.executor.debug_on()
+ } else if mut node.executor is ExecutorCrun {
+ node.executor.debug_on()
+ } else {
+ panic('did not find right executor')
}
- panic('did not find right executor')
}
diff --git a/lib/installers/virt/herorunner/herorunner_actions.v b/lib/installers/virt/herorunner/herorunner_actions.v
index be6713bf..8ea53f05 100644
--- a/lib/installers/virt/herorunner/herorunner_actions.v
+++ b/lib/installers/virt/herorunner/herorunner_actions.v
@@ -23,9 +23,7 @@ fn upload() ! {
fn install() ! {
console.print_header('install herorunner')
- osal.package_install('
- xz-utils
- crun')!
+ osal.package_install('crun')!
// osal.exec(
// cmd: '
diff --git a/lib/installers/virt/herorunner/herorunner_model.v b/lib/installers/virt/herorunner/herorunner_model.v
index 9e7b2f39..e08ae618 100644
--- a/lib/installers/virt/herorunner/herorunner_model.v
+++ b/lib/installers/virt/herorunner/herorunner_model.v
@@ -18,9 +18,6 @@ pub mut:
// your checking & initialization code if needed
fn obj_init(mycfg_ HeroRunner) !HeroRunner {
mut mycfg := mycfg_
- if mycfg.password == '' && mycfg.secret == '' {
- return error('password or secret needs to be filled in for ${mycfg.name}')
- }
return mycfg
}
diff --git a/lib/osal/tmux/tmux_pane.v b/lib/osal/tmux/tmux_pane.v
index bbfb9486..043eacf6 100644
--- a/lib/osal/tmux/tmux_pane.v
+++ b/lib/osal/tmux/tmux_pane.v
@@ -7,7 +7,7 @@ import time
import os
@[heap]
-struct Pane {
+pub struct Pane {
pub mut:
window &Window @[str: skip]
id int // pane id (e.g., %1, %2)
@@ -696,3 +696,22 @@ pub fn (p Pane) logging_status() string {
}
return 'disabled'
}
+
+pub fn (mut p Pane) clear() ! {
+ // Kill current process in the pane
+ osal.exec(
+ cmd: 'tmux send-keys -t %${p.id} C-c'
+ stdout: false
+ name: 'tmux_pane_interrupt'
+ ) or {}
+
+ // Reset pane by running a new bash
+ osal.exec(
+ cmd: "tmux send-keys -t %${p.id} '/bin/bash' Enter"
+ stdout: false
+ name: 'tmux_pane_reset_shell'
+ )!
+
+ // Update pane info
+ p.window.scan()!
+}
diff --git a/lib/osal/tmux/tmux_window.v b/lib/osal/tmux/tmux_window.v
index 81f39064..60d2d9ba 100644
--- a/lib/osal/tmux/tmux_window.v
+++ b/lib/osal/tmux/tmux_window.v
@@ -406,3 +406,22 @@ pub fn (mut w Window) stop_ttyd(port int) ! {
}
println('ttyd stopped for window ${w.name} on port ${port} (if it was running)')
}
+
+// Get a pane by its ID
+pub fn (mut w Window) pane_get(id int) !&Pane {
+ w.scan()! // refresh info from tmux
+ for pane in w.panes {
+ if pane.id == id {
+ return pane
+ }
+ }
+ return error('Pane with id ${id} not found in window ${w.name}. Available panes: ${w.panes}')
+}
+
+// Create a new pane (just a split with default shell)
+pub fn (mut w Window) pane_new() !&Pane {
+ return w.pane_split(
+ cmd: '/bin/bash'
+ horizontal: true
+ )
+}
diff --git a/lib/virt/heropods/config_template.json b/lib/virt/heropods/config_template.json
index e1ef0f56..51cd699a 100644
--- a/lib/virt/heropods/config_template.json
+++ b/lib/virt/heropods/config_template.json
@@ -7,7 +7,9 @@
"gid": 0
},
"args": [
- "/bin/sh"
+ "/bin/sh",
+ "-c",
+ "while true; do sleep 30; done"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
diff --git a/lib/virt/heropods/container.v b/lib/virt/heropods/container.v
index 548fd1ea..bb2e1289 100644
--- a/lib/virt/heropods/container.v
+++ b/lib/virt/heropods/container.v
@@ -9,19 +9,54 @@ import json
pub struct Container {
pub mut:
- name string
- node ?&builder.Node
+ name string
+ node ?&builder.Node
tmux_pane ?&tmux.Pane
- factory &ContainerFactory
+ factory &ContainerFactory
+}
+
+// Struct to parse JSON output of `crun state`
+struct CrunState {
+ id string
+ status string
+ pid int
+ bundle string
+ created string
}
pub fn (mut self Container) start() ! {
+ // Check if container exists in crun
+ container_exists := self.container_exists_in_crun()!
+
+ if !container_exists {
+ // Container doesn't exist, create it first
+ console.print_debug('Container ${self.name} does not exist, creating it...')
+ osal.exec(
+ cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
+ stdout: true
+ )!
+ console.print_debug('Container ${self.name} created')
+ }
+
status := self.status()!
if status == .running {
console.print_debug('Container ${self.name} is already running')
return
}
-
+
+ // If container exists but is stopped, we need to delete and recreate it
+ // because crun doesn't allow restarting a stopped container
+ if container_exists && status != .running {
+ console.print_debug('Container ${self.name} exists but is stopped, recreating...')
+ osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
+ osal.exec(
+ cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
+ stdout: true
+ )!
+ console.print_debug('Container ${self.name} recreated')
+ }
+
+ // start the container (crun start doesn't have --detach flag)
osal.exec(cmd: 'crun start ${self.name}', stdout: true)!
console.print_green('Container ${self.name} started')
}
@@ -32,10 +67,10 @@ pub fn (mut self Container) stop() ! {
console.print_debug('Container ${self.name} is already stopped')
return
}
-
+
osal.exec(cmd: 'crun kill ${self.name} SIGTERM', stdout: false) or {}
time.sleep(2 * time.second)
-
+
// Force kill if still running
if self.status()! == .running {
osal.exec(cmd: 'crun kill ${self.name} SIGKILL', stdout: false) or {}
@@ -44,36 +79,43 @@ pub fn (mut self Container) stop() ! {
}
pub fn (mut self Container) delete() ! {
+ // Check if container exists before trying to delete
+ if !self.container_exists_in_crun()! {
+ console.print_debug('Container ${self.name} does not exist, nothing to delete')
+ return
+ }
+
self.stop()!
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
+
+ // Remove from factory's container cache
+ if self.name in self.factory.containers {
+ self.factory.containers.delete(self.name)
+ }
+
console.print_green('Container ${self.name} deleted')
}
// Execute command inside the container
-pub fn (mut self Container) exec(args osal.ExecArgs) !string {
+pub fn (mut self Container) exec(cmd_ osal.Command) !string {
// Ensure container is running
if self.status()! != .running {
self.start()!
}
-
+
// Use the builder node to execute inside container
mut node := self.node()!
- return node.exec(cmd: args.cmd, stdout: args.stdout)
+ console.print_debug('Executing command in container ${self.name}: ${cmd_.cmd}')
+ return node.exec(cmd: cmd_.cmd, stdout: cmd_.stdout)
}
pub fn (self Container) status() !ContainerStatus {
- result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or {
- return .unknown
- }
-
+ result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return .unknown }
+
// Parse JSON output from crun state
- state := json.decode(map[string]json.Any, result) or {
- return .unknown
- }
-
- status_str := state['status'] or { json.Any('') }.str()
-
- return match status_str {
+ state := json.decode(CrunState, result.output) or { return .unknown }
+
+ return match state.status {
'running' { .running }
'stopped' { .stopped }
'paused' { .paused }
@@ -81,6 +123,15 @@ pub fn (self Container) status() !ContainerStatus {
}
}
+// Check if container exists in crun (regardless of its state)
+fn (self Container) container_exists_in_crun() !bool {
+ // Try to get container state - if it fails, container doesn't exist
+ result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return false }
+
+ // If we get here, the container exists (even if stopped/paused)
+ return result.exit_code == 0
+}
+
pub enum ContainerStatus {
running
stopped
@@ -91,13 +142,12 @@ pub enum ContainerStatus {
// Get CPU usage in percentage
pub fn (self Container) cpu_usage() !f64 {
// Use cgroup stats to get CPU usage
- result := osal.exec(cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/cpu.stat', stdout: false) or {
- return 0.0
- }
-
- // Parse cpu.stat file and calculate usage percentage
- // This is a simplified implementation
- for line in result.split_into_lines() {
+ result := osal.exec(
+ cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/cpu.stat'
+ stdout: false
+ ) or { return 0.0 }
+
+ for line in result.output.split_into_lines() {
if line.starts_with('usage_usec') {
usage := line.split(' ')[1].f64()
return usage / 1000000.0 // Convert to percentage
@@ -108,11 +158,12 @@ pub fn (self Container) cpu_usage() !f64 {
// Get memory usage in MB
pub fn (self Container) mem_usage() !f64 {
- result := osal.exec(cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/memory.current', stdout: false) or {
- return 0.0
- }
-
- bytes := result.trim_space().f64()
+ result := osal.exec(
+ cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/memory.current'
+ stdout: false
+ ) or { return 0.0 }
+
+ bytes := result.output.trim_space().f64()
return bytes / (1024 * 1024) // Convert to MB
}
@@ -120,69 +171,74 @@ pub struct TmuxPaneArgs {
pub mut:
window_name string
pane_nr int
- pane_name string // optional
- cmd string // optional, will execute this cmd
- reset bool // if true will reset everything and restart a cmd
+ pane_name string // optional
+ cmd string // optional, will execute this cmd
+ reset bool // if true will reset everything and restart a cmd
env map[string]string // optional, will set these env vars in the pane
}
pub fn (mut self Container) tmux_pane(args TmuxPaneArgs) !&tmux.Pane {
- mut tmux_session := self.factory.tmux_session
- if tmux_session == '' {
- tmux_session = 'herorun'
+ mut t := tmux.new()!
+ session_name := 'herorun'
+
+ mut session := if t.session_exist(session_name) {
+ t.session_get(session_name)!
+ } else {
+ t.session_create(name: session_name)!
}
-
- // Get or create tmux session
- mut session := tmux.session_get(name: tmux_session) or {
- tmux.session_new(name: tmux_session)!
- }
-
+
// Get or create window
mut window := session.window_get(name: args.window_name) or {
session.window_new(name: args.window_name)!
}
-
- // Get or create pane
- mut pane := window.pane_get(nr: args.pane_nr) or {
- window.pane_new()!
- }
-
+
+ // Get existing pane by number, or create a new one
+ mut pane := window.pane_get(args.pane_nr) or { window.pane_new()! }
+
if args.reset {
pane.clear()!
}
-
+
// Set environment variables if provided
for key, value in args.env {
pane.send_keys('export ${key}="${value}"')!
}
-
+
// Execute command if provided
if args.cmd != '' {
- // First enter the container namespace
pane.send_keys('crun exec ${self.name} ${args.cmd}')!
}
-
- self.tmux_pane = &pane
- return &pane
+
+ self.tmux_pane = pane
+ return pane
}
pub fn (mut self Container) node() !&builder.Node {
- if node := self.node {
- return node
+ // If node already initialized, return it
+ if self.node != none {
+ return self.node
}
-
- // Create a new ExecutorCrun for this container
- mut executor := builder.ExecutorCrun{
- container_id: self.name
- }
-
+
mut b := builder.new()!
- mut node := &builder.Node{
- name: 'container_${self.name}'
- executor: executor
- factory: &b
+
+ mut exec := builder.ExecutorCrun{
+ container_id: self.name
+ debug: false
}
-
+
+ exec.init() or {
+ return error('Failed to init ExecutorCrun for container ${self.name}: ${err}')
+ }
+
+ // Create node using the factory method, then override the executor
+ mut node := b.node_new(name: 'container_${self.name}', ipaddr: 'localhost')!
+ node.executor = exec
+ node.platform = .alpine
+ node.cputype = .intel
+ node.done = map[string]string{}
+ node.environment = map[string]string{}
+ node.hostname = self.name
+
self.node = node
return node
-}
\ No newline at end of file
+}
diff --git a/lib/virt/heropods/container_create.v b/lib/virt/heropods/container_create.v
index 6fcbb4d9..80ae4d80 100644
--- a/lib/virt/heropods/container_create.v
+++ b/lib/virt/heropods/container_create.v
@@ -3,8 +3,9 @@ module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
-import freeflowuniverse.herolib.core.texttools
+import freeflowuniverse.herolib.installers.virt.herorunner as herorunner_installer
import os
+import x.json2
// Updated enum to be more flexible
pub enum ContainerImageType {
@@ -17,82 +18,132 @@ pub enum ContainerImageType {
@[params]
pub struct ContainerNewArgs {
pub:
- name string @[required]
- image ContainerImageType = .alpine_3_20
+ name string @[required]
+ image ContainerImageType = .alpine_3_20
custom_image_name string // Used when image = .custom
- docker_url string // Docker image URL for new images
- reset bool
+ docker_url string // Docker image URL for new images
+ reset bool
}
pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
if args.name in self.containers && !args.reset {
return self.containers[args.name]
}
-
+
// Determine image to use
mut image_name := ''
mut rootfs_path := ''
-
+
match args.image {
.alpine_3_20 {
image_name = 'alpine'
- rootfs_path = '/containers/images/alpine/rootfs'
+ rootfs_path = '${self.base_dir}/images/alpine/rootfs'
}
.ubuntu_24_04 {
image_name = 'ubuntu_24_04'
- rootfs_path = '/containers/images/ubuntu/24.04/rootfs'
+ rootfs_path = '${self.base_dir}/images/ubuntu/24.04/rootfs'
}
.ubuntu_25_04 {
image_name = 'ubuntu_25_04'
- rootfs_path = '/containers/images/ubuntu/25.04/rootfs'
+ rootfs_path = '${self.base_dir}/images/ubuntu/25.04/rootfs'
}
.custom {
if args.custom_image_name == '' {
return error('custom_image_name is required when using custom image type')
}
image_name = args.custom_image_name
- rootfs_path = '/containers/images/${image_name}/rootfs'
-
- // Check if image exists, if not and docker_url provided, create it
+ rootfs_path = '${self.base_dir}/images/${image_name}/rootfs'
+
+ // If image not yet extracted, pull and unpack it
if !os.is_dir(rootfs_path) && args.docker_url != '' {
- console.print_debug('Creating new image ${image_name} from ${args.docker_url}')
- _ = self.image_new(
- image_name: image_name
- docker_url: args.docker_url
- reset: args.reset
- )!
+ console.print_debug('Pulling image ${args.docker_url} with podman...')
+ self.podman_pull_and_export(args.docker_url, image_name, rootfs_path)!
}
}
}
-
+
// Verify rootfs exists
if !os.is_dir(rootfs_path) {
return error('Image rootfs not found: ${rootfs_path}. Please ensure the image is available.')
}
-
- // Create container config
+
+ // Create container config (with terminal disabled) but don't create the container yet
self.create_container_config(args.name, rootfs_path)!
-
- // Create container using crun
- osal.exec(cmd: 'crun create --bundle /containers/configs/${args.name} ${args.name}', stdout: true)!
-
+
+ // Ensure crun is installed on host
+ if !osal.cmd_exists('crun') {
+ mut herorunner := herorunner_installer.new()!
+ herorunner.install()!
+ }
+
+ // Create container struct but don't create the actual container in crun yet
+ // The actual container creation will happen in container.start()
mut container := &Container{
- name: args.name
+ name: args.name
factory: &self
}
-
+
self.containers[args.name] = container
return container
}
+// Create OCI config.json from template
fn (self ContainerFactory) create_container_config(container_name string, rootfs_path string) ! {
- config_dir := '/containers/configs/${container_name}'
+ config_dir := '${self.base_dir}/configs/${container_name}'
osal.exec(cmd: 'mkdir -p ${config_dir}', stdout: false)!
-
- // Generate OCI config.json using template
- config_content := $tmpl('config_template.json')
+
+ // Load template
+ mut config_content := $tmpl('config_template.json')
+
+ // Parse JSON with json2
+ mut root := json2.raw_decode(config_content)!
+ mut config := root.as_map()
+
+ // Get or create process map
+ mut process := if 'process' in config {
+ config['process'].as_map()
+ } else {
+ map[string]json2.Any{}
+ }
+
+ // Force disable terminal
+ process['terminal'] = json2.Any(false)
+ config['process'] = json2.Any(process)
+
+ // Write back to config.json
config_path := '${config_dir}/config.json'
-
mut p := pathlib.get_file(path: config_path, create: true)!
- p.write(config_content)!
-}
\ No newline at end of file
+ p.write(json2.encode_pretty(json2.Any(config)))!
+}
+
+// Use podman to pull image and extract rootfs
+fn (self ContainerFactory) podman_pull_and_export(docker_url string, image_name string, rootfs_path string) ! {
+ // Pull image
+ osal.exec(
+ cmd: 'podman pull ${docker_url}'
+ stdout: true
+ )!
+
+ // Create temp container
+ temp_name := 'tmp_${image_name}_${os.getpid()}'
+ osal.exec(
+ cmd: 'podman create --name ${temp_name} ${docker_url}'
+ stdout: true
+ )!
+
+ // Export container filesystem
+ osal.exec(
+ cmd: 'mkdir -p ${rootfs_path}'
+ stdout: false
+ )!
+ osal.exec(
+ cmd: 'podman export ${temp_name} | tar -C ${rootfs_path} -xf -'
+ stdout: true
+ )!
+
+ // Cleanup temp container
+ osal.exec(
+ cmd: 'podman rm ${temp_name}'
+ stdout: false
+ )!
+}
diff --git a/lib/virt/heropods/container_image.v b/lib/virt/heropods/container_image.v
index 6d1810ec..ab8d4037 100644
--- a/lib/virt/heropods/container_image.v
+++ b/lib/virt/heropods/container_image.v
@@ -10,7 +10,7 @@ import json
@[heap]
pub struct ContainerImage {
pub mut:
- image_name string @[required] // image is located in /containers/images//rootfs
+ image_name string @[required] // image is located in ${self.factory.base_dir}/images//rootfs
docker_url string // optional docker image URL
rootfs_path string // path to the extracted rootfs
size_mb f64 // size in MB
@@ -21,7 +21,7 @@ pub mut:
@[params]
pub struct ContainerImageArgs {
pub mut:
- image_name string @[required] // image is located in /containers/images//rootfs
+ image_name string @[required] // image is located in ${self.factory.base_dir}/images//rootfs
docker_url string // docker image URL like "alpine:3.20" or "ubuntu:24.04"
reset bool
}
@@ -43,7 +43,7 @@ pub mut:
// Create new image or get existing
pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&ContainerImage {
mut image_name := texttools.name_fix(args.image_name)
- rootfs_path := '/containers/images/${image_name}/rootfs'
+ rootfs_path := '${self.base_dir}/images/${image_name}/rootfs'
// Check if image already exists
if image_name in self.images && !args.reset {
@@ -84,7 +84,7 @@ fn (mut self ContainerImage) download_from_docker(docker_url string, reset bool)
console.print_header('Downloading image: ${docker_url}')
// Clean image name for local storage
- image_dir := '/containers/images/${self.image_name}'
+ image_dir := '${self.factory.base_dir}/images/${self.image_name}'
// Remove existing if reset is true
if reset && os.is_dir(image_dir) {
@@ -133,15 +133,15 @@ fn (mut self ContainerImage) update_metadata() ! {
self.size_mb = size_str.f64()
// Get creation time
- stat_result := osal.exec(cmd: 'stat -c "%Y" ${self.rootfs_path}', stdout: false)!
- self.created_at = stat_result.output.trim_space() // TODO: should this be ourtime?
+ info := os.stat(self.rootfs_path) or { return error('stat failed: ${err}') }
+ self.created_at = info.ctime.str() // or mtime.str(), depending on what you want
}
// List all available images
pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
mut images := []&ContainerImage{}
- images_base_dir := '/containers/images'
+ images_base_dir := '${self.base_dir}/images'
if !os.is_dir(images_base_dir) {
return images
}
@@ -206,7 +206,7 @@ pub fn (mut self ContainerFactory) image_import(args ImageImportArgs) !&Containe
console.print_header('Importing image from ${args.source_path}')
- image_dir := '/containers/images/${image_name_clean}'
+ image_dir := '${self.base_dir}/images/${image_name_clean}'
rootfs_path := '${image_dir}/rootfs'
// Check if image already exists
diff --git a/lib/virt/heropods/factory.v b/lib/virt/heropods/factory.v
index 07af0d62..ef6516c1 100644
--- a/lib/virt/heropods/factory.v
+++ b/lib/virt/heropods/factory.v
@@ -1,25 +1,24 @@
module heropods
import freeflowuniverse.herolib.ui.console
-import freeflowuniverse.herolib.osal.tmux
import freeflowuniverse.herolib.osal.core as osal
import time
-import freeflowuniverse.herolib.builder
-import freeflowuniverse.herolib.core.pathlib
import os
+@[heap]
pub struct ContainerFactory {
pub mut:
- tmux_session string // tmux session name if used
+ tmux_session string
containers map[string]&Container
- images map[string]&ContainerImage // Added images map
+ images map[string]&ContainerImage
+ base_dir string
}
@[params]
pub struct FactoryInitArgs {
pub:
- reset bool
- use_podman bool = true // Use podman for image management
+ reset bool
+ use_podman bool = true
}
pub fn new(args FactoryInitArgs) !ContainerFactory {
@@ -30,34 +29,55 @@ pub fn new(args FactoryInitArgs) !ContainerFactory {
fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
// Ensure base directories exist
- osal.exec(cmd: 'mkdir -p /containers/images /containers/configs /containers/runtime', stdout: false)!
-
+ self.base_dir = os.getenv_opt('CONTAINERS_DIR') or { os.home_dir() + '/.containers' }
+
+ osal.exec(
+ cmd: 'mkdir -p ${self.base_dir}/images ${self.base_dir}/configs ${self.base_dir}/runtime'
+ stdout: false
+ )!
+
if args.use_podman {
- // Check if podman is installed
if !osal.cmd_exists('podman') {
- console.print_stderr('Warning: podman not found. Installing podman is recommended for better image management.')
- console.print_debug('You can install podman with: apt install podman (Ubuntu) or brew install podman (macOS)')
+ console.print_stderr('Warning: podman not found. Install podman for better image management.')
+ console.print_debug('Install with: apt install podman (Ubuntu) or brew install podman (macOS)')
} else {
console.print_debug('Using podman for image management')
}
}
-
+
// Load existing images into cache
self.load_existing_images()!
-
- // Setup default images if they don't exist
+
+ // Setup default images if not using podman
if !args.use_podman {
- self.setup_default_images_legacy(args.reset)!
+ self.setup_default_images(args.reset)!
+ }
+}
+
+fn (mut self ContainerFactory) setup_default_images(reset bool) ! {
+ console.print_header('Setting up default images...')
+
+ default_images := [ContainerImageType.alpine_3_20, .ubuntu_24_04, .ubuntu_25_04]
+
+ for img in default_images {
+ mut args := ContainerImageArgs{
+ image_name: img.str()
+ reset: reset
+ }
+ if img.str() !in self.images || reset {
+ console.print_debug('Preparing default image: ${img.str()}')
+ _ = self.image_new(args)!
+ }
}
}
// Load existing images from filesystem into cache
fn (mut self ContainerFactory) load_existing_images() ! {
- images_base_dir := '/containers/images'
+ images_base_dir := '${self.base_dir}/containers/images'
if !os.is_dir(images_base_dir) {
return
}
-
+
dirs := os.ls(images_base_dir) or { return }
for dir in dirs {
full_path := '${images_base_dir}/${dir}'
@@ -65,12 +85,12 @@ fn (mut self ContainerFactory) load_existing_images() ! {
rootfs_path := '${full_path}/rootfs'
if os.is_dir(rootfs_path) {
mut image := &ContainerImage{
- image_name: dir
+ image_name: dir
rootfs_path: rootfs_path
- factory: &self
+ factory: &self
}
image.update_metadata() or {
- console.print_stderr('Failed to load image metadata for ${dir}')
+ console.print_stderr('⚠️ Failed to update metadata for image ${dir}: ${err}')
continue
}
self.images[dir] = image
@@ -80,82 +100,9 @@ fn (mut self ContainerFactory) load_existing_images() ! {
}
}
-// Legacy method for downloading images directly (fallback if no podman)
-fn (mut self ContainerFactory) setup_default_images_legacy(reset bool) ! {
- // Setup for all supported images
- images := [ContainerImage.alpine_3_20, .ubuntu_24_04, .ubuntu_25_04]
-
- for image in images {
- match image {
- .alpine_3_20 {
- alpine_ver := '3.20.3'
- alpine_file := 'alpine-minirootfs-${alpine_ver}-x86_64.tar.gz'
- alpine_url := 'https://dl-cdn.alpinelinux.org/alpine/v${alpine_ver[..4]}/releases/x86_64/${alpine_file}'
- alpine_dest := '/containers/images/alpine/${alpine_file}'
- alpine_rootfs := '/containers/images/alpine/rootfs'
-
- if reset || !os.exists(alpine_rootfs) {
- osal.download(
- url: alpine_url
- dest: alpine_dest
- minsize_kb: 1024
- )!
-
- // Extract alpine rootfs
- osal.exec(cmd: 'mkdir -p ${alpine_rootfs}', stdout: false)!
- osal.exec(cmd: 'tar -xzf ${alpine_dest} -C ${alpine_rootfs}', stdout: false)!
- }
- console.print_green('Alpine ${alpine_ver} rootfs prepared at ${alpine_rootfs}')
- }
- .ubuntu_24_04 {
- ver := '24.04'
- codename := 'noble'
- file := 'ubuntu-${ver}-minimal-cloudimg-amd64-root.tar.xz'
- url := 'https://cloud-images.ubuntu.com/minimal/releases/${codename}/release/${file}'
- dest := '/containers/images/ubuntu/${ver}/${file}'
- rootfs := '/containers/images/ubuntu/${ver}/rootfs'
-
- if reset || !os.exists(rootfs) {
- osal.download(
- url: url
- dest: dest
- minsize_kb: 10240
- )!
-
- // Extract ubuntu rootfs
- osal.exec(cmd: 'mkdir -p ${rootfs}', stdout: false)!
- osal.exec(cmd: 'tar -xf ${dest} -C ${rootfs}', stdout: false)!
- }
- console.print_green('Ubuntu ${ver} (${codename}) rootfs prepared at ${rootfs}')
- }
- .ubuntu_25_04 {
- ver := '25.04'
- codename := 'plucky'
- file := 'ubuntu-${ver}-minimal-cloudimg-amd64-root.tar.xz'
- url := 'https://cloud-images.ubuntu.com/daily/minimal/releases/${codename}/release/${file}'
- dest := '/containers/images/ubuntu/${ver}/${file}'
- rootfs := '/containers/images/ubuntu/${ver}/rootfs'
-
- if reset || !os.exists(rootfs) {
- osal.download(
- url: url
- dest: dest
- minsize_kb: 10240
- )!
-
- // Extract ubuntu rootfs
- osal.exec(cmd: 'mkdir -p ${rootfs}', stdout: false)!
- osal.exec(cmd: 'tar -xf ${dest} -C ${rootfs}', stdout: false)!
- }
- console.print_green('Ubuntu ${ver} (${codename}) rootfs prepared at ${rootfs}')
- }
- }
- }
-}
-
pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
if args.name !in self.containers {
- return error('Container ${args.name} does not exist')
+ return error('Container "${args.name}" does not exist. Use factory.new() to create it first.')
}
return self.containers[args.name]
}
@@ -163,18 +110,18 @@ pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
// Get image by name
pub fn (mut self ContainerFactory) image_get(name string) !&ContainerImage {
if name !in self.images {
- return error('Image ${name} does not exist')
+ return error('Image "${name}" not found in cache. Try importing or downloading it.')
}
return self.images[name]
}
+// List all containers currently managed by crun
pub fn (self ContainerFactory) list() ![]Container {
mut containers := []Container{}
- result := osal.exec(cmd: 'crun list --format json', stdout: false) or { '[]' }
-
- // Parse crun list output and populate containers
- // The output format from crun list is typically tab-separated
- lines := result.split_into_lines()
+ result := osal.exec(cmd: 'crun list --format json', stdout: false)!
+
+ // Parse crun list output (tab-separated)
+ lines := result.output.split_into_lines()
for line in lines {
if line.trim_space() == '' || line.starts_with('ID') {
continue
@@ -182,10 +129,10 @@ pub fn (self ContainerFactory) list() ![]Container {
parts := line.split('\t')
if parts.len > 0 {
containers << Container{
- name: parts[0]
+ name: parts[0]
factory: &self
}
}
}
return containers
-}
\ No newline at end of file
+}