Merge branch 'development' of github.com:freeflowuniverse/herolib into development

* 'development' of github.com:freeflowuniverse/herolib:
  ...
  add example heromodels call
  add example and heromodels openrpc server
  remove server from gitignore
  clean up and fix openrpc server implementation
  Test the workflow
  feat: Add basic `heropods` container example
  refactor: enhance container lifecycle and Crun executor
  refactor: streamline container setup and dependencies
  refactor: externalize container and image base directories
  feat: Add ExecutorCrun and enable container node creation
  refactor: Migrate container management to heropods module
  refactor: simplify console management and apply fixes
  ...
  ...
  ...
  ...
  ...
  ...
This commit is contained in:
2025-09-09 06:30:52 +04:00
95 changed files with 4657 additions and 1493 deletions

1
.gitignore vendored
View File

@@ -48,7 +48,6 @@ compile_summary.log
.summary_lock
.aider*
*.dylib
server
HTTP_REST_MCP_DEMO.md
MCP_HTTP_REST_IMPLEMENTATION_PLAN.md
.roo

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import json
import freeflowuniverse.herolib.hero.heromodels.openrpc
import freeflowuniverse.herolib.schemas.jsonrpc
import freeflowuniverse.herolib.hero.heromodels
fn main() {
mut handler := openrpc.new_heromodels_handler()!
my_calendar := heromodels.calendar_new(
name: "My Calendar"
description: "My Calendar"
securitypolicy: 1
tags: ["tag1", "tag2"]
group_id: 1,
events: []u32{},
color: "#000000",
timezone: "UTC",
is_public: true,
)!
response := handler.handle(jsonrpc.new_request('calendar_set', json.encode(my_calendar)))!
println(response)
}

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.heropods
// Initialize factory
mut factory := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
println('=== HeroPods Refactored API Demo ===')
// Step 1: factory.new() now only creates a container definition/handle
// It does NOT create the actual container in the backend yet
mut container := factory.new(
name: 'myalpine'
image: .custom
custom_image_name: 'alpine_3_20'
docker_url: 'docker.io/library/alpine:3.20'
)!
println(' Container definition created: ${container.name}')
println(' (No actual container created in backend yet)')
// Step 2: container.start() handles creation and starting
// - Checks if container exists in backend
// - Creates it if it doesn't exist
// - Starts it if it exists but is stopped
println('\n--- First start() call ---')
container.start()!
println(' Container started successfully')
// Step 3: Multiple start() calls are now idempotent
println('\n--- Second start() call (should be idempotent) ---')
container.start()!
println(' Second start() call successful - no errors!')
// Step 4: Execute commands in the container and save results
println('\n--- Executing commands in container ---')
result1 := container.exec(cmd: 'ls -la /')!
println(' Command executed: ls -la /')
println('Result: ${result1}')
result2 := container.exec(cmd: 'echo "Hello from container!"')!
println(' Command executed: echo "Hello from container!"')
println('Result: ${result2}')
result3 := container.exec(cmd: 'uname -a')!
println(' Command executed: uname -a')
println('Result: ${result3}')
// Step 5: container.delete() works naturally on the instance
println('\n--- Deleting container ---')
container.delete()!
println(' Container deleted successfully')
println('\n=== Demo completed! ===')
println('The refactored API now works as expected:')
println('- factory.new() creates definition only')
println('- container.start() is idempotent')
println('- container.exec() works and returns results')
println('- container.delete() works on instances')

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.heropods
mut factory := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
mut container := factory.new(
name: 'myalpine'
image: .custom
custom_image_name: 'alpine_3_20'
docker_url: 'docker.io/library/alpine:3.20'
)!
container.start()!
container.exec(cmd: 'ls')!
container.stop()!

View File

@@ -1,105 +0,0 @@
# HeroRun - AI Agent Optimized Container Management
**Production-ready scripts for fast remote command execution**
## 🎯 Purpose
Optimized for AI agents that need rapid, reliable command execution with minimal latency and clean output.
## 🏗️ Base Image Types
HeroRun supports different base images through the `BaseImage` enum:
```v
pub enum BaseImage {
alpine // Standard Alpine Linux minirootfs (~5MB)
alpine_python // Alpine Linux with Python 3 pre-installed
}
```
### Usage Examples
**Standard Alpine Container:**
```v
base_image: .alpine // Default - minimal Alpine Linux
```
**Alpine with Python:**
```v
base_image: .alpine_python // Python 3 + pip pre-installed
```
## 📋 Three Scripts
### 1. `setup.vsh` - Environment Preparation
Creates container infrastructure on remote node.
```bash
./setup.vsh
```
**Output:** `Setup complete`
### 2. `execute.vsh` - Fast Command Execution
Executes commands on remote node with clean output only.
```bash
./execute.vsh "command" [context_id]
```
**Examples:**
```bash
./execute.vsh "ls /containers"
./execute.vsh "whoami"
./execute.vsh "echo 'Hello World'"
```
**Output:** Command result only (no verbose logging)
### 3. `cleanup.vsh` - Complete Teardown
Removes container and cleans up all resources.
```bash
./cleanup.vsh
```
**Output:** `Cleanup complete`
## ⚡ Performance Features
- **Clean Output**: Execute returns only command results
- **No Verbose Logging**: Silent operation for production use
- **Fast Execution**: Direct SSH without tmux overhead
- **AI Agent Ready**: Perfect for automated command execution
## 🚀 Usage Pattern
```bash
# Setup once
./setup.vsh
# Execute many commands (fast)
./execute.vsh "ls -la"
./execute.vsh "ps aux"
./execute.vsh "df -h"
# Cleanup when done
./cleanup.vsh
```
## 🎯 AI Agent Integration
Perfect for AI agents that need:
- Rapid command execution
- Clean, parseable output
- Minimal setup overhead
- Production-ready reliability
Each execute call returns only the command output, making it ideal for AI agents to parse and process results.

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor using proper modules
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'ai_agent_container'
keyname: 'id_ed25519'
)!
// Cleanup using tmux and osal modules
executor.cleanup()!
println('Cleanup complete')

View File

@@ -1,30 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
import os
// Get command from command line args
if os.args.len < 2 {
println('Usage: ./execute.vsh "command" [context_id]')
exit(1)
}
cmd := os.args[1]
// context_id := if os.args.len > 2 { os.args[2] } else { 'default' }
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor using proper modules
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'ai_agent_container'
keyname: 'id_ed25519'
)!
// Execute command using osal module for clean output
output := executor.execute(cmd)!
// Output only the command result
print(output)

View File

@@ -1,11 +0,0 @@
#!/bin/sh
set -e
echo "🎉 Hello from custom container entry point!"
echo "Container ID: $(hostname)"
echo "Current time: $(date)"
echo "Working directory: $(pwd)"
echo "Available commands:"
ls /bin | head -10
echo "..."
echo "✅ Container is working perfectly!"

View File

@@ -1,74 +0,0 @@
#!/bin/sh
set -e
echo "🐍 Starting Python HTTP server..."
# Allow overriding port via environment variable (default: 8000)
PORT=${PORT:-8000}
HOST=${HOST:-0.0.0.0}
# Check if Python is available
if ! command -v python >/dev/null 2>&1 && ! command -v python3 >/dev/null 2>&1; then
echo "❌ Python not found in this container"
echo "💡 To use Python server, you need a container with Python pre-installed"
echo " For now, starting a simple HTTP server using busybox httpd..."
# Create a simple index.html
mkdir -p /tmp/www
cat > /tmp/www/index.html << 'EOF'
<!DOCTYPE html>
<html>
<head>
<title>Container HTTP Server</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; }
.container { max-width: 600px; margin: 0 auto; }
.status { color: #28a745; }
.info { background: #f8f9fa; padding: 20px; border-radius: 5px; }
</style>
</head>
<body>
<div class="container">
<h1>🎉 Container HTTP Server</h1>
<p class="status">✅ Container is running successfully!</p>
<div class="info">
<h3>Server Information:</h3>
<ul>
<li><strong>Server:</strong> BusyBox httpd</li>
<li><strong>Port:</strong> 8000</li>
<li><strong>Container:</strong> Alpine Linux</li>
<li><strong>Status:</strong> Active</li>
</ul>
</div>
<p><em>Note: Python was not available, so we're using BusyBox httpd instead.</em></p>
</div>
</body>
</html>
EOF
echo "📁 Created simple web content at /tmp/www/"
echo "🌐 Would start HTTP server on $HOST:$PORT (if httpd was available)"
echo ""
echo "🎉 Container executed successfully!"
echo "✅ Entry point script is working"
echo "📋 Container contents:"
ls -la /tmp/www/
echo ""
echo "📄 Sample web content:"
cat /tmp/www/index.html | head -10
echo "..."
echo ""
echo "💡 To run a real HTTP server, use a container image with Python or httpd pre-installed"
else
# Use python3 if available, otherwise python
PYTHON_CMD="python3"
if ! command -v python3 >/dev/null 2>&1; then
PYTHON_CMD="python"
fi
echo "✅ Found Python: $PYTHON_CMD"
echo "🌐 Starting Python HTTP server on $HOST:$PORT"
# Use exec so signals (like Ctrl+C) are properly handled
exec $PYTHON_CMD -m http.server "$PORT" --bind "$HOST"
fi

View File

@@ -1,19 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor using proper module integration
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'ai_agent_container'
keyname: 'id_ed25519'
)!
// Setup using sshagent, tmux, hetznermanager, and osal modules
executor.setup()!
println('Setup complete')

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor with Alpine Python base image
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'python_alpine_container'
keyname: 'id_ed25519'
image_script: 'examples/virt/herorun/images/python_server.sh'
base_image: .alpine_python // Use Alpine with Python pre-installed
)!
// Setup container
executor.setup()!
// Create container with Python Alpine base and Python server script
mut container := executor.get_or_create_container(
name: 'python_alpine_container'
image_script: 'examples/virt/herorun/images/python_server.sh'
base_image: .alpine_python
)!
println(' Setup complete with Python Alpine container')
println('Container: python_alpine_container')
println('Base image: Alpine Linux with Python 3 pre-installed')
println('Entry point: python_server.sh')
// Test the container to show Python is available
println('\n🐍 Testing Python availability...')
python_test := executor.execute('runc exec python_alpine_container python3 --version') or {
println(' Python test failed: ${err}')
return
}
println(' Python version: ${python_test}')
println('\n🚀 Running Python HTTP server...')
println('Note: This will start the server and exit (use runc run for persistent server)')
// Run the container to start the Python server
result := executor.execute('runc run python_alpine_container') or {
println(' Container execution failed: ${err}')
return
}
println('📋 Server output:')
println(result)
println('\n🎉 Python Alpine container executed successfully!')
println('💡 The Python HTTP server would run on port 8000 if started persistently')

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor with image script for Python server
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'python_server_container'
keyname: 'id_ed25519'
image_script: 'examples/virt/herorun/images/python_server.sh' // Path to entry point script
)!
// Setup using sshagent, tmux, hetznermanager, and osal modules
executor.setup()!
// Create container with the Python server script
mut container := executor.get_or_create_container(
name: 'python_server_container'
image_script: 'examples/virt/herorun/images/python_server.sh'
)!
println('Setup complete with Python server container')
println('Container: python_server_container')
println('Entry point: examples/virt/herorun/images/python_server.sh (Python HTTP server)')
println('To start the server: runc run python_server_container')

View File

@@ -1,40 +0,0 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.herorun
// Create user with SSH key using sshagent module
mut user := herorun.new_user(keyname: 'id_ed25519')!
// Create executor with hello world script
mut executor := herorun.new_executor(
node_ip: '65.21.132.119'
user: 'root'
container_id: 'hello_world_container'
keyname: 'id_ed25519'
image_script: 'examples/virt/herorun/images/hello_world.sh'
)!
// Setup container
executor.setup()!
// Create container with hello world script
mut container := executor.get_or_create_container(
name: 'hello_world_container'
image_script: 'examples/virt/herorun/images/hello_world.sh'
)!
println(' Setup complete with Hello World container')
println('Container: hello_world_container')
println('Entry point: hello_world.sh')
// Run the container to demonstrate it works
println('\n🚀 Running container...')
result := executor.execute('runc run hello_world_container') or {
println(' Container execution failed: ${err}')
return
}
println('📋 Container output:')
println(result)
println('\n🎉 Container executed successfully!')

View File

@@ -1,23 +1,8 @@
{
"folders": [
{
"path": "lib"
"path": "."
},
{
"path": "aiprompts"
},
{
"path": "research"
},
{
"path": "examples"
},
{
"path": "cli"
},
{
"path": "manual"
}
],
"settings": {
"extensions.ignoreRecommendations": false
@@ -40,8 +25,7 @@
"simonsiefke.svg-preview",
"gruntfuggly.todo-tree",
"vosca.vscode-v-analyzer",
"tomoki1207.pdf",
"kilocode.kilo-code"
"tomoki1207.pdf"
]
}
}

Binary file not shown.

View File

@@ -99,6 +99,12 @@ check_release() {
}
ubuntu_sources_fix() {
# Check if we're on Ubuntu
if [[ "${OSNAME}" != "ubuntu" ]]; then
echo " Not running on Ubuntu. Skipping mirror fix."
return 1
fi
if check_release; then
local CODENAME
CODENAME=$(lsb_release -sc)

View File

@@ -2,7 +2,7 @@ module builder
import freeflowuniverse.herolib.data.ipaddress
type Executor = ExecutorLocal | ExecutorSSH
type Executor = ExecutorLocal | ExecutorSSH | ExecutorCrun
pub struct ExecutorNewArguments {
pub mut:

217
lib/builder/executor_crun.v Normal file
View File

@@ -0,0 +1,217 @@
module builder
import os
import rand
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
@[heap]
pub struct ExecutorCrun {
pub mut:
container_id string // container ID for crun
retry int = 1
debug bool = true
}
pub fn (mut executor ExecutorCrun) init() ! {
// Verify container exists and is running
result := osal.exec(cmd: 'crun state ${executor.container_id}', stdout: false) or {
return error('Container ${executor.container_id} not found or not accessible')
}
// Parse state to ensure container is running
if !result.output.contains('"status": "running"') {
return error('Container ${executor.container_id} is not running')
}
}
pub fn (mut executor ExecutorCrun) debug_on() {
executor.debug = true
}
pub fn (mut executor ExecutorCrun) debug_off() {
executor.debug = false
}
pub fn (mut executor ExecutorCrun) exec(args_ ExecArgs) !string {
mut args := args_
if executor.debug {
console.print_debug('execute in container ${executor.container_id}: ${args.cmd}')
}
mut cmd := 'crun exec ${executor.container_id} ${args.cmd}'
if args.cmd.contains('\n') {
// For multiline commands, write to temp file first
temp_script := '/tmp/crun_script_${rand.uuid_v4()}.sh'
script_content := texttools.dedent(args.cmd)
os.write_file(temp_script, script_content)!
// Copy script into container and execute
executor.file_write('/tmp/exec_script.sh', script_content)!
cmd = 'crun exec ${executor.container_id} bash /tmp/exec_script.sh'
}
res := osal.exec(cmd: cmd, stdout: args.stdout, debug: executor.debug)!
return res.output
}
pub fn (mut executor ExecutorCrun) exec_interactive(args_ ExecArgs) ! {
mut args := args_
if args.cmd.contains('\n') {
args.cmd = texttools.dedent(args.cmd)
executor.file_write('/tmp/interactive_script.sh', args.cmd)!
args.cmd = 'bash /tmp/interactive_script.sh'
}
cmd := 'crun exec -t ${executor.container_id} ${args.cmd}'
console.print_debug(cmd)
osal.execute_interactive(cmd)!
}
pub fn (mut executor ExecutorCrun) file_write(path string, text string) ! {
if executor.debug {
console.print_debug('Container ${executor.container_id} file write: ${path}')
}
// Write to temp file first, then copy into container
temp_file := '/tmp/crun_file_${rand.uuid_v4()}'
os.write_file(temp_file, text)!
defer { os.rm(temp_file) or {} }
// Use crun exec to copy file content
cmd := 'cat ${temp_file} | crun exec -i ${executor.container_id} tee ${path} > /dev/null'
osal.exec(cmd: cmd, stdout: false)!
}
pub fn (mut executor ExecutorCrun) file_read(path string) !string {
if executor.debug {
console.print_debug('Container ${executor.container_id} file read: ${path}')
}
return executor.exec(cmd: 'cat ${path}', stdout: false)
}
pub fn (mut executor ExecutorCrun) file_exists(path string) bool {
if executor.debug {
console.print_debug('Container ${executor.container_id} file exists: ${path}')
}
output := executor.exec(cmd: 'test -f ${path} && echo found || echo not found', stdout: false) or {
return false
}
return output.trim_space() == 'found'
}
pub fn (mut executor ExecutorCrun) delete(path string) ! {
if executor.debug {
console.print_debug('Container ${executor.container_id} delete: ${path}')
}
executor.exec(cmd: 'rm -rf ${path}', stdout: false)!
}
pub fn (mut executor ExecutorCrun) upload(args SyncArgs) ! {
// For container uploads, we need to copy files from host to container
// Use crun exec with tar for efficient transfer
mut src_path := pathlib.get(args.source)
if !src_path.exists() {
return error('Source path ${args.source} does not exist')
}
if src_path.is_dir() {
// For directories, use tar to transfer
temp_tar := '/tmp/crun_upload_${rand.uuid_v4()}.tar'
osal.exec(
cmd: 'tar -cf ${temp_tar} -C ${src_path.path_dir()} ${src_path.name()}'
stdout: false
)!
defer { os.rm(temp_tar) or {} }
// Extract in container
cmd := 'cat ${temp_tar} | crun exec -i ${executor.container_id} tar -xf - -C ${args.dest}'
osal.exec(cmd: cmd, stdout: args.stdout)!
} else {
// For single files
executor.file_write(args.dest, src_path.read()!)!
}
}
pub fn (mut executor ExecutorCrun) download(args SyncArgs) ! {
// Download from container to host
if executor.dir_exists(args.source) {
// For directories
temp_tar := '/tmp/crun_download_${rand.uuid_v4()}.tar'
cmd := 'crun exec ${executor.container_id} tar -cf - -C ${args.source} . > ${temp_tar}'
osal.exec(cmd: cmd, stdout: false)!
defer { os.rm(temp_tar) or {} }
// Extract on host
osal.exec(
cmd: 'mkdir -p ${args.dest} && tar -xf ${temp_tar} -C ${args.dest}'
stdout: args.stdout
)!
} else {
// For single files
content := executor.file_read(args.source)!
os.write_file(args.dest, content)!
}
}
pub fn (mut executor ExecutorCrun) environ_get() !map[string]string {
env := executor.exec(cmd: 'env', stdout: false) or {
return error('Cannot get environment from container ${executor.container_id}')
}
mut res := map[string]string{}
for line in env.split('\n') {
if line.contains('=') {
mut key, mut val := line.split_once('=') or { continue }
key = key.trim(' ')
val = val.trim(' ')
res[key] = val
}
}
return res
}
pub fn (mut executor ExecutorCrun) info() map[string]string {
return {
'category': 'crun'
'container_id': executor.container_id
'runtime': 'crun'
}
}
pub fn (mut executor ExecutorCrun) shell(cmd string) ! {
if cmd.len > 0 {
osal.execute_interactive('crun exec -t ${executor.container_id} ${cmd}')!
} else {
osal.execute_interactive('crun exec -t ${executor.container_id} /bin/sh')!
}
}
pub fn (mut executor ExecutorCrun) list(path string) ![]string {
if !executor.dir_exists(path) {
return error('Directory ${path} does not exist in container')
}
output := executor.exec(cmd: 'ls ${path}', stdout: false)!
mut res := []string{}
for line in output.split('\n') {
line_trimmed := line.trim_space()
if line_trimmed != '' {
res << line_trimmed
}
}
return res
}
pub fn (mut executor ExecutorCrun) dir_exists(path string) bool {
output := executor.exec(cmd: 'test -d ${path} && echo found || echo not found', stdout: false) or {
return false
}
return output.trim_space() == 'found'
}

View File

@@ -14,6 +14,8 @@ pub fn (mut node Node) exec(args ExecArgs) !string {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} else if mut node.executor is ExecutorCrun {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
}
panic('did not find right executor')
}
@@ -80,6 +82,8 @@ pub fn (mut node Node) exec_silent(cmd string) !string {
return node.executor.exec(cmd: cmd, stdout: false)
} else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: cmd, stdout: false)
} else if mut node.executor is ExecutorCrun {
return node.executor.exec(cmd: cmd, stdout: false)
}
panic('did not find right executor')
}
@@ -89,8 +93,11 @@ pub fn (mut node Node) exec_interactive(cmd_ string) ! {
node.executor.exec_interactive(cmd: cmd_)!
} else if mut node.executor is ExecutorSSH {
node.executor.exec_interactive(cmd: cmd_)!
} else if mut node.executor is ExecutorCrun {
node.executor.exec_interactive(cmd: cmd_)!
} else {
panic('did not find right executor')
}
panic('did not find right executor')
}
pub fn (mut node Node) file_write(path string, text string) ! {
@@ -98,6 +105,8 @@ pub fn (mut node Node) file_write(path string, text string) ! {
return node.executor.file_write(path, text)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_write(path, text)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_write(path, text)
}
panic('did not find right executor')
}
@@ -107,6 +116,8 @@ pub fn (mut node Node) file_read(path string) !string {
return node.executor.file_read(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_read(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_read(path)
}
panic('did not find right executor')
}
@@ -116,6 +127,8 @@ pub fn (mut node Node) file_exists(path string) bool {
return node.executor.file_exists(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.file_exists(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_exists(path)
}
panic('did not find right executor')
}
@@ -137,6 +150,8 @@ pub fn (mut node Node) delete(path string) ! {
return node.executor.delete(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.delete(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.delete(path)
}
panic('did not find right executor')
}
@@ -179,6 +194,8 @@ pub fn (mut node Node) download(args_ SyncArgs) ! {
return node.executor.download(args)
} else if mut node.executor is ExecutorSSH {
return node.executor.download(args)
} else if mut node.executor is ExecutorCrun {
return node.executor.download(args)
}
panic('did not find right executor')
}
@@ -208,6 +225,8 @@ pub fn (mut node Node) upload(args_ SyncArgs) ! {
return node.executor.upload(args)
} else if mut node.executor is ExecutorSSH {
return node.executor.upload(args)
} else if mut node.executor is ExecutorCrun {
return node.executor.upload(args)
}
panic('did not find right executor')
}
@@ -224,6 +243,8 @@ pub fn (mut node Node) environ_get(args EnvGetParams) !map[string]string {
return node.executor.environ_get()
} else if mut node.executor is ExecutorSSH {
return node.executor.environ_get()
} else if mut node.executor is ExecutorCrun {
return node.executor.environ_get()
}
panic('did not find right executor')
}
@@ -235,6 +256,8 @@ pub fn (mut node Node) info() map[string]string {
return node.executor.info()
} else if mut node.executor is ExecutorSSH {
return node.executor.info()
} else if mut node.executor is ExecutorCrun {
return node.executor.info()
}
panic('did not find right executor')
}
@@ -244,6 +267,8 @@ pub fn (mut node Node) shell(cmd string) ! {
return node.executor.shell(cmd)
} else if mut node.executor is ExecutorSSH {
return node.executor.shell(cmd)
} else if mut node.executor is ExecutorCrun {
return node.executor.shell(cmd)
}
panic('did not find right executor')
}
@@ -257,6 +282,8 @@ pub fn (mut node Node) list(path string) ![]string {
return node.executor.list(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.list(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.list(path)
}
panic('did not find right executor')
}
@@ -266,6 +293,8 @@ pub fn (mut node Node) dir_exists(path string) bool {
return node.executor.dir_exists(path)
} else if mut node.executor is ExecutorSSH {
return node.executor.dir_exists(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.dir_exists(path)
}
panic('did not find right executor')
}
@@ -275,8 +304,11 @@ pub fn (mut node Node) debug_off() {
node.executor.debug_off()
} else if mut node.executor is ExecutorSSH {
node.executor.debug_off()
} else if mut node.executor is ExecutorCrun {
node.executor.debug_off()
} else {
panic('did not find right executor')
}
panic('did not find right executor')
}
pub fn (mut node Node) debug_on() {
@@ -284,6 +316,9 @@ pub fn (mut node Node) debug_on() {
node.executor.debug_on()
} else if mut node.executor is ExecutorSSH {
node.executor.debug_on()
} else if mut node.executor is ExecutorCrun {
node.executor.debug_on()
} else {
panic('did not find right executor')
}
panic('did not find right executor')
}

View File

@@ -25,6 +25,8 @@ pub fn encode[T](obj T) ![]u8 {
d.add_u32(u32(obj.$(field.name)))
} $else $if field.typ is u64 {
d.add_u64(u64(obj.$(field.name)))
}$else $if field.typ is i64 {
d.add_i64(i64(obj.$(field.name)))
} $else $if field.typ is time.Time {
d.add_time(time.new(obj.$(field.name)))
// Arrays of primitive types

View File

@@ -8,7 +8,6 @@ import time
pub struct Calendar {
Base
pub mut:
group_id u32 // Associated group for permissions
events []u32 // IDs of calendar events (changed to u32 to match CalendarEvent)
color string // Hex color code
timezone string
@@ -19,7 +18,6 @@ pub mut:
pub struct CalendarArgs {
BaseArgs
pub mut:
group_id u32
events []u32
color string
timezone string
@@ -27,32 +25,23 @@ pub mut:
}
pub fn calendar_new(args CalendarArgs) !Calendar {
mut commentids := []u32{}
for comment in args.comments {
// Convert CommentArg to CommentArgExtended
extended_comment := CommentArgExtended{
comment: comment.comment
parent: 0
author: 0
}
commentids << comment_set(extended_comment)!
}
mut obj := Calendar{
id: args.id or { 0 } // Will be set by DB?
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 }
tags: tags2id(args.tags)!
comments: commentids
group_id: args.group_id
events: args.events
color: args.color
timezone: args.timezone
is_public: args.is_public
}
return obj
mut commentids:=[]u32{}
mut obj := Calendar{
id: args.id or {0} // Will be set by DB?
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or {0}
tags: tags2id(args.tags)!
comments: comments2ids(args.comments)!
group_id: args.group_id
events: args.events
color: args.color
timezone: args.timezone
is_public: args.is_public
}
return obj
}
pub fn (mut c Calendar) add_event(event_id u32) { // Changed event_id to u32

View File

@@ -0,0 +1,117 @@
module heromodels
import freeflowuniverse.herolib.data.encoder
import crypto.md5
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.ourtime
@[heap]
pub struct Comment {
Base
pub mut:
// id u32
comment string
parent u32 //id of parent comment if any, 0 means none
updated_at i64
author u32 //links to user
}
pub fn (self Comment) type_name() string {
return 'comments'
}
pub fn (self Comment) load(data []u8) !Comment {
return comment_load(data)!
}
pub fn (self Comment) dump() ![]u8{
// Create a new encoder
mut e := encoder.new()
e.add_u8(1)
e.add_u32(self.id)
e.add_string(self.comment)
e.add_u32(self.parent)
e.add_i64(self.updated_at)
e.add_u32(self.author)
return e.data
}
pub fn comment_load(data []u8) !Comment{
// Create a new decoder
mut e := encoder.decoder_new(data)
version := e.get_u8()!
if version != 1 {
panic("wrong version in comment load")
}
mut comment := Comment{}
comment.id = e.get_u32()!
comment.comment = e.get_string()!
comment.parent = e.get_u32()!
comment.updated_at = e.get_i64()!
comment.author = e.get_u32()!
return comment
}
pub struct CommentArg {
pub mut:
comment string
parent u32
author u32
}
pub fn comment_multiset(args []CommentArg) ![]u32 {
return comments2ids(args)!
}
pub fn comments2ids(args []CommentArg) ![]u32 {
return args.map(comment2id(it.comment)!)
}
pub fn comment2id(comment string) !u32 {
comment_fixed := comment.to_lower_ascii().trim_space()
mut redis := redisclient.core_get()!
return if comment_fixed.len > 0{
hash := md5.hexhash(comment_fixed)
comment_found := redis.hget("db:comments", hash)!
if comment_found == ""{
id := u32(redis.incr("db:comments:id")!)
redis.hset("db:comments", hash, id.str())!
redis.hset("db:comments", id.str(), comment_fixed)!
id
}else{
comment_found.u32()
}
} else { 0 }
}
//get new comment, not from the DB
pub fn comment_new(args CommentArg) !Comment{
mut o := Comment {
comment: args.comment
parent: args.parent
updated_at: ourtime.now().unix()
author: args.author
}
return o
}
pub fn comment_set(args CommentArg) !u32{
mut o := comment_new(args)!
// Use openrpcserver set function which now returns the ID
return set[Comment](mut o)!
}
pub fn comment_delete(id u32) ! {
delete[Comment](id)!
}
pub fn comment_exist(id u32) !bool{
return exists[Comment](id)!
}
pub fn comment_get(id u32) !Comment{
return get[Comment](id)!
}

View File

@@ -3,45 +3,49 @@ module heromodels
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.encoder
pub fn set[T](obj T) ! {
mut redis := redisclient.core_get()!
id := obj.id
data := encoder.encode(obj)!
redis.hset('db:${T.name}', id.str(), data.bytestr())!
pub fn set[T](mut obj_ T) !u32 {
mut redis := redisclient.core_get()!
id := u32(redis.llen(db_name[T]()) or {0})
obj_.id = id
data := encoder.encode(obj_) or {
return err
}
redis.hset(db_name[T](),id.str(),data.bytestr())!
return id
}
pub fn get[T](id u32) !T {
mut redis := redisclient.core_get()!
data := redis.hget('db:${T.name}', id.str())!
t := T{}
return encoder.decode[T](data.bytes())!
mut redis := redisclient.core_get()!
data := redis.hget(db_name[T](),id.str())!
t := T{}
return encoder.decode[T](data.bytes())!
}
pub fn exists[T](id u32) !bool {
name := T{}.type_name()
mut redis := redisclient.core_get()!
return redis.hexists('db:${name}', id.str())!
mut redis := redisclient.core_get()!
return redis.hexists(db_name[T](),id.str())!
}
pub fn delete[T](id u32) ! {
name := T{}.type_name()
mut redis := redisclient.core_get()!
redis.hdel('db:${name}', id.str())!
mut redis := redisclient.core_get()!
redis.hdel(db_name[T](), id.str())!
}
pub fn list[T]() ![]T {
mut redis := redisclient.core_get()!
ids := redis.hkeys('db:${name}')!
mut result := []T{}
for id in ids {
result << get[T](id.u32())!
}
return result
mut redis := redisclient.core_get()!
ids := redis.hkeys(db_name[T]())!
mut result := []T{}
for id in ids {
result << get[T](id.u32())!
}
return result
}
// make it easy to get a base object
pub fn new_from_base[T](args BaseArgs) !Base {
return T{
Base: new_base(args)!
}
return T { Base: new_base(args)! }
}
fn db_name[T]() string {
return "db:${T.name}"
}

View File

@@ -0,0 +1,93 @@
module heromodels
import crypto.md5
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.ourtime
// Group represents a collection of users with roles and permissions
@[heap]
pub struct Base {
pub mut:
id u32
name string
description string
created_at i64
updated_at i64
securitypolicy u32
tags u32 //when we set/get we always do as []string but this can then be sorted and md5ed this gies the unique id of tags
comments []u32
}
@[heap]
pub struct SecurityPolicy {
pub mut:
id u32
read []u32 //links to users & groups
write []u32 //links to users & groups
delete []u32 //links to users & groups
public bool
md5 string //this sorts read, write and delete u32 + hash, then do md5 hash, this allows to go from a random read/write/delete/public config to a hash
}
@[heap]
pub struct Tags {
pub mut:
id u32
names []string //unique per id
md5 string //of sorted names, to make easy to find unique id, each name lowercased and made ascii
}
/////////////////
@[params]
pub struct BaseArgs {
pub mut:
id ?u32
name string
description string
securitypolicy ?u32
tags []string
comments []CommentArg
}
//make it easy to get a base object
pub fn new_base(args BaseArgs) !Base {
mut redis := redisclient.core_get()!
commentids:=comment_multiset(args.comments)!
tags:=tags2id(args.tags)!
return Base {
id: args.id or { 0 }
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 }
tags: tags
comments: commentids
}
}
pub fn tags2id(tags []string) !u32 {
mut redis := redisclient.core_get()!
return if tags.len>0{
mut tags_fixed := tags.map(it.to_lower_ascii().trim_space()).filter(it != "")
tags_fixed.sort_ignore_case()
hash :=md5.hexhash(tags_fixed.join(","))
tags_found := redis.hget("db:tags", hash)!
return if tags_found == ""{
id := u32(redis.incr("db:tags:id")!)
redis.hset("db:tags", hash, id.str())!
redis.hset("db:tags", id.str(), tags_fixed.join(","))!
id
}else{
tags_found.u32()
}
} else {
0
}
}

View File

@@ -1,112 +0,0 @@
module openrpc
import json
import freeflowuniverse.herolib.hero.heromodels
// Comment-specific argument structures
@[params]
pub struct CommentGetArgs {
pub mut:
id ?u32
author ?u32
parent ?u32
}
@[params]
pub struct CommentDeleteArgs {
pub mut:
id u32
}
// comment_get retrieves comments based on the provided arguments
pub fn comment_get(params string) !string {
// Handle empty params
if params == 'null' || params == '{}' {
return error('No valid search criteria provided. Please specify id, author, or parent.')
}
args := json.decode(CommentGetArgs, params)!
// If ID is provided, get specific comment
if id := args.id {
comment := heromodels.comment_get(id)!
return json.encode(comment)
}
// If author is provided, find comments by author
if author := args.author {
return get_comments_by_author(author)!
}
// If parent is provided, find child comments
if parent := args.parent {
return get_comments_by_parent(parent)!
}
return error('No valid search criteria provided. Please specify id, author, or parent.')
}
// comment_set creates or updates a comment
pub fn comment_set(params string) !string {
comment_arg := json.decode(heromodels.CommentArgExtended, params)!
id := heromodels.comment_set(comment_arg)!
return json.encode({
'id': id
})
}
// comment_delete removes a comment by ID
pub fn comment_delete(params string) !string {
args := json.decode(CommentDeleteArgs, params)!
// Check if comment exists
if !heromodels.exists[heromodels.Comment](args.id)! {
return error('Comment with id ${args.id} does not exist')
}
// Delete using core method
heromodels.delete[heromodels.Comment](args.id)!
result_json := '{"success": true, "id": ${args.id}}'
return result_json
}
// comment_list returns all comment IDs
pub fn comment_list() !string {
comments := heromodels.list[heromodels.Comment]()!
mut ids := []u32{}
for comment in comments {
ids << comment.id
}
return json.encode(ids)
}
// Helper function to get comments by author
fn get_comments_by_author(author u32) !string {
all_comments := heromodels.list[heromodels.Comment]()!
mut matching_comments := []heromodels.Comment{}
for comment in all_comments {
if comment.author == author {
matching_comments << comment
}
}
return json.encode(matching_comments)
}
// Helper function to get comments by parent
fn get_comments_by_parent(parent u32) !string {
all_comments := heromodels.list[heromodels.Comment]()!
mut matching_comments := []heromodels.Comment{}
for comment in all_comments {
if comment.parent == parent {
matching_comments << comment
}
}
return json.encode(matching_comments)
}

View File

@@ -0,0 +1,75 @@
module openrpc
import json
import freeflowuniverse.herolib.schemas.openrpc
import freeflowuniverse.herolib.hero.heromodels
import freeflowuniverse.herolib.schemas.jsonrpc
import os
const openrpc_path = os.join_path(os.dir(@FILE), 'openrpc.json')
pub fn new_heromodels_handler() !openrpc.Handler {
mut openrpc_handler := openrpc.Handler {
specification: openrpc.new(path: openrpc_path)!
}
openrpc_handler.register_procedure_handle('comment_get', comment_get)
openrpc_handler.register_procedure_handle('comment_set', comment_set)
openrpc_handler.register_procedure_handle('comment_delete', comment_delete)
openrpc_handler.register_procedure_handle('comment_list', comment_list)
openrpc_handler.register_procedure_handle('calendar_get', calendar_get)
openrpc_handler.register_procedure_handle('calendar_set', calendar_set)
openrpc_handler.register_procedure_handle('calendar_delete', calendar_delete)
openrpc_handler.register_procedure_handle('calendar_list', calendar_list)
return openrpc_handler
}
pub fn comment_get(request jsonrpc.Request) !jsonrpc.Response {
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
result := heromodels.comment_get(payload) or { return jsonrpc.internal_error }
return jsonrpc.new_response(request.id, json.encode(result))
}
pub fn comment_set(request jsonrpc.Request) !jsonrpc.Response{
payload := jsonrpc.decode_payload[heromodels.CommentArg](request.params) or { return jsonrpc.invalid_params }
return jsonrpc.new_response(request.id, heromodels.comment_set(payload)!.str())
}
pub fn comment_delete(request jsonrpc.Request) !jsonrpc.Response {
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
return jsonrpc.new_response(request.id, '')
}
pub fn comment_list(request jsonrpc.Request) !jsonrpc.Response {
result := heromodels.list[heromodels.Comment]() or { return jsonrpc.internal_error }
return jsonrpc.new_response(request.id, json.encode(result))
}
pub fn calendar_get(request jsonrpc.Request) !jsonrpc.Response {
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
result := heromodels.get[heromodels.Calendar](payload) or { return jsonrpc.internal_error }
return jsonrpc.new_response(request.id, json.encode(result))
}
pub fn calendar_set(request jsonrpc.Request) !jsonrpc.Response{
mut payload := json.decode(heromodels.Calendar, request.params) or {
return jsonrpc.invalid_params }
id := heromodels.set[heromodels.Calendar](mut payload) or {
println('error setting calendar $err')
return jsonrpc.internal_error
}
return jsonrpc.new_response(request.id, id.str())
}
pub fn calendar_delete(request jsonrpc.Request) !jsonrpc.Response {
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
heromodels.delete[heromodels.Calendar](payload) or { return jsonrpc.internal_error }
return jsonrpc.new_response(request.id, '')
}
pub fn calendar_list(request jsonrpc.Request) !jsonrpc.Response {
result := heromodels.list[heromodels.Calendar]() or { return jsonrpc.internal_error }
return jsonrpc.new_response(request.id, json.encode(result))
}

View File

@@ -0,0 +1,110 @@
module openrpc
import json
import freeflowuniverse.herolib.hero.heromodels
// Comment-specific argument structures
@[params]
pub struct CommentGetArgs {
pub mut:
id ?u32
author ?u32
parent ?u32
}
@[params]
pub struct CommentDeleteArgs {
pub mut:
id u32
}
// // comment_get retrieves comments based on the provided arguments
// pub fn comment_get(params string) !string {
// // Handle empty params
// if params == 'null' || params == '{}' {
// return error('No valid search criteria provided. Please specify id, author, or parent.')
// }
// args := json.decode(CommentGetArgs, params)!
// // If ID is provided, get specific comment
// if id := args.id {
// comment := heromodels.comment_get(id)!
// return json.encode(comment)
// }
// // If author is provided, find comments by author
// if author := args.author {
// return get_comments_by_author(author)!
// }
// // If parent is provided, find child comments
// if parent := args.parent {
// return get_comments_by_parent(parent)!
// }
// return error('No valid search criteria provided. Please specify id, author, or parent.')
// }
// // comment_set creates or updates a comment
// pub fn comment_set(params string) !string {
// comment_arg := json.decode(heromodels.CommentArgExtended, params)!
// id := heromodels.comment_set(comment_arg)!
// return json.encode({'id': id})
// }
// // comment_delete removes a comment by ID
// pub fn comment_delete(params string) !string {
// args := json.decode(CommentDeleteArgs, params)!
// // Check if comment exists
// if !heromodels.exists[heromodels.Comment](args.id)! {
// return error('Comment with id ${args.id} does not exist')
// }
// // Delete using core method
// heromodels.delete[heromodels.Comment](args.id)!
// result_json := '{"success": true, "id": ${args.id}}'
// return result_json
// }
// // comment_list returns all comment IDs
// pub fn comment_list() !string {
// comments := heromodels.list[heromodels.Comment]()!
// mut ids := []u32{}
// for comment in comments {
// ids << comment.id
// }
// return json.encode(ids)
// }
// // Helper function to get comments by author
// fn get_comments_by_author(author u32) !string {
// all_comments := heromodels.list[heromodels.Comment]()!
// mut matching_comments := []heromodels.Comment{}
// for comment in all_comments {
// if comment.author == author {
// matching_comments << comment
// }
// }
// return json.encode(matching_comments)
// }
// // Helper function to get comments by parent
// fn get_comments_by_parent(parent u32) !string {
// all_comments := heromodels.list[heromodels.Comment]()!
// mut matching_comments := []heromodels.Comment{}
// for comment in all_comments {
// if comment.parent == parent {
// matching_comments << comment
// }
// }
// return json.encode(matching_comments)
// }

View File

@@ -0,0 +1,9 @@
module openrpc
import freeflowuniverse.herolib.schemas.openrpc
import freeflowuniverse.herolib.hero.heromodels
// new_heromodels_server creates a new HeroModels RPC server
pub fn test_new_heromodels_handler() ! {
handler := new_heromodels_handler()!
}

View File

@@ -1,52 +0,0 @@
module openrpc
import freeflowuniverse.herolib.schemas.openrpcserver
// HeroModelsServer extends the base openrpcserver.RPCServer with heromodels-specific functionality
pub struct HeroModelsServer {
openrpcserver.RPCServer
}
@[params]
pub struct HeroModelsServerArgs {
pub mut:
socket_path string = '/tmp/heromodels'
}
// new_heromodels_server creates a new HeroModels RPC server
pub fn new_heromodels_server(args HeroModelsServerArgs) !&HeroModelsServer {
base_server := openrpcserver.new_rpc_server(
socket_path: args.socket_path
)!
return &HeroModelsServer{
RPCServer: *base_server
}
}
// process extends the base process method with heromodels-specific methods
pub fn (mut server HeroModelsServer) process(method string, params_str string) !string {
// Route to heromodels-specific methods first
result := match method {
'comment_get' {
comment_get(params_str)!
}
'comment_set' {
comment_set(params_str)!
}
'comment_delete' {
comment_delete(params_str)!
}
'comment_list' {
comment_list()!
}
'rpc.discover' {
server.discover()!
}
else {
return server.create_error_response(-32601, 'Method not found', method)
}
}
return result
}

View File

@@ -0,0 +1,26 @@
module openrpc
import freeflowuniverse.herolib.schemas.openrpc
// HeroModelsServer extends the base openrpcserver.RPCServer with heromodels-specific functionality
pub struct HeroModelsServer {
openrpc.UNIXServer
}
@[params]
pub struct HeroModelsServerArgs {
pub mut:
socket_path string = '/tmp/heromodels'
}
// new_heromodels_server creates a new HeroModels RPC server
pub fn new_heromodels_server(args HeroModelsServerArgs) !&HeroModelsServer {
mut base_server := openrpc.new_unix_server(
new_heromodels_handler()!,
socket_path: args.socket_path
)!
return &HeroModelsServer{
UNIXServer: *base_server
}
}

View File

@@ -1,72 +1,47 @@
module heromodels
import freeflowuniverse.herolib.schemas.openrpcserver
import freeflowuniverse.herolib.schemas.openrpc
// Re-export types from openrpcserver for convenience
pub type Base = openrpcserver.Base
pub type BaseArgs = openrpcserver.BaseArgs
pub type CommentArg = openrpcserver.CommentArg
// HeroModelsServer extends the base openrpcserver.RPCServer with heromodels-specific functionality
pub struct HeroModelsServer {
openrpcserver.RPCServer
}
@[params]
pub struct HeroModelsServerArgs {
pub mut:
socket_path string = '/tmp/heromodels'
}
// // Re-export core methods from openrpcserver for convenience
// pub fn set[T](mut obj T) !u32 {
// return openrpcserver.set[T](mut obj)!
// }
// new_heromodels_server creates a new HeroModels RPC server
pub fn new_heromodels_server(args HeroModelsServerArgs) !&HeroModelsServer {
base_server := openrpcserver.new_rpc_server(
socket_path: args.socket_path
)!
// pub fn get[T](id u32) !T {
// return openrpcserver.get[T](id)!
// }
return &HeroModelsServer{
RPCServer: *base_server
}
}
// pub fn exists[T](id u32) !bool {
// return openrpcserver.exists[T](id)!
// }
// Re-export core methods from openrpcserver for convenience
pub fn set[T](mut obj T) !u32 {
return openrpcserver.set[T](mut obj)!
}
// pub fn delete[T](id u32) ! {
// openrpcserver.delete[T](id)!
// }
pub fn get[T](id u32) !T {
return openrpcserver.get[T](id)!
}
// pub fn list[T]() ![]T {
// return openrpcserver.list[T]()!
// }
pub fn exists[T](id u32) !bool {
return openrpcserver.exists[T](id)!
}
// // Re-export utility functions
// pub fn tags2id(tags []string) !u32 {
// return openrpcserver.tags2id(tags)!
// }
pub fn delete[T](id u32) ! {
openrpcserver.delete[T](id)!
}
// pub fn comment_multiset(args []CommentArg) ![]u32 {
// return openrpcserver.comment_multiset(args)!
// }
pub fn list[T]() ![]T {
return openrpcserver.list[T]()!
}
// pub fn comments2ids(args []CommentArg) ![]u32 {
// return openrpcserver.comments2ids(args)!
// }
// Re-export utility functions
pub fn tags2id(tags []string) !u32 {
return openrpcserver.tags2id(tags)!
}
// pub fn comment2id(comment string) !u32 {
// return openrpcserver.comment2id(comment)!
// }
pub fn comment_multiset(args []CommentArg) ![]u32 {
return openrpcserver.comment_multiset(args)!
}
pub fn comments2ids(args []CommentArg) ![]u32 {
return openrpcserver.comments2ids(args)!
}
pub fn comment2id(comment string) !u32 {
return openrpcserver.comment2id(comment)!
}
pub fn new_base(args BaseArgs) !Base {
return openrpcserver.new_base(args)!
}
// pub fn new_base(args BaseArgs) !Base {
// return openrpcserver.new_base(args)!
// }

View File

@@ -84,36 +84,27 @@ struct ProjectContent {
tags []string
}
pub fn new_project(name string, description string, group_id string) Project {
mut project := Project{
name: name
description: description
group_id: group_id
status: .planning
created_at: time.now().unix()
updated_at: time.now().unix()
swimlanes: [
Swimlane{
id: 'todo'
name: 'To Do'
order: 1
color: '#f1c40f'
},
Swimlane{
id: 'in_progress'
name: 'In Progress'
order: 2
color: '#3498db'
},
Swimlane{
id: 'done'
name: 'Done'
order: 3
color: '#2ecc71'
is_done: true
},
]
}
project.calculate_id()
return project
pub struct NewProject {
pub mut:
name string
description string
group_id string
}
pub fn new_project(params NewProject) !Project {
mut project := Project{
name: params.name
description: params.description
group_id: params.group_id
status: .planning
created_at: time.now().unix()
updated_at: time.now().unix()
swimlanes: [
Swimlane{id: 'todo', name: 'To Do', order: 1, color: '#f1c40f'},
Swimlane{id: 'in_progress', name: 'In Progress', order: 2, color: '#3498db'},
Swimlane{id: 'done', name: 'Done', order: 3, color: '#2ecc71', is_done: true}
]
}
project.calculate_id()
return project
}

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'herorunner'
classname:'HeroRunner'
singleton:0
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:0

View File

@@ -0,0 +1,67 @@
module herorunner
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.installers.ulist
import os
//////////////////// following actions are not specific to instance of the object
fn installed() !bool {
return false
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
fn upload() ! {
}
fn install() ! {
console.print_header('install herorunner')
osal.package_install('crun')!
// osal.exec(
// cmd: '
// '
// stdout: true
// name: 'herorunner_install'
// )!
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("zinit")!
// osal.process_kill_recursive(name:'zinit')!
// osal.cmd_delete('zinit')!
// osal.package_remove('
// podman
// conmon
// buildah
// skopeo
// runc
// ')!
// //will remove all paths where go/bin is found
// osal.profile_path_add_remove(paths2delete:"go/bin")!
// osal.rm("
// podman
// conmon
// buildah
// skopeo
// runc
// /var/lib/containers
// /var/lib/podman
// /var/lib/buildah
// /tmp/podman
// /tmp/conmon
// ")!
}

View File

@@ -0,0 +1,79 @@
module herorunner
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.ui.console
import json
import freeflowuniverse.herolib.osal.startupmanager
__global (
herorunner_global map[string]&HeroRunner
herorunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
}
pub fn new(args ArgsGet) !&HeroRunner {
return &HeroRunner{}
}
pub fn get(args ArgsGet) !&HeroRunner {
return new(args)!
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'herorunner.') {
return
}
mut install_actions := plbook.find(filter: 'herorunner.configure')!
if install_actions.len > 0 {
return error("can't configure herorunner, because no configuration allowed for this installer.")
}
mut other_actions := plbook.find(filter: 'herorunner.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action herorunner.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action herorunner.install')
install()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self HeroRunner) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self HeroRunner) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for herorunner
pub fn switch(name string) {
herorunner_default = name
}

View File

@@ -0,0 +1,34 @@
module herorunner
import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.data.encoderhero
import os
pub const version = '0.0.0'
const singleton = false
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct HeroRunner {
pub mut:
name string = 'default'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ HeroRunner) !HeroRunner {
mut mycfg := mycfg_
return mycfg
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_loads(heroscript string) !HeroRunner {
mut obj := encoderhero.decode[HeroRunner](heroscript)!
return obj
}

View File

@@ -0,0 +1,44 @@
# herorunner
To get started
```vlang
import freeflowuniverse.herolib.installers.something.herorunner as herorunner_installer
heroscript:="
!!herorunner.configure name:'test'
password: '1234'
port: 7701
!!herorunner.start name:'test' reset:1
"
herorunner_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= herorunner_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!herorunner.configure
homedir: '/home/user/herorunner'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -3,33 +3,33 @@ module core
import freeflowuniverse.herolib.core
fn test_package_management() {
platform_ := core.platform()!
// platform_ := core.platform()!
if platform_ == .osx {
// Check if brew is installed
if !cmd_exists('brew') {
eprintln('WARNING: Homebrew is not installed. Please install it to run package management tests on OSX.')
return
}
}
// if platform_ == .osx {
// // Check if brew is installed
// if !cmd_exists('brew') {
// eprintln('WARNING: Homebrew is not installed. Please install it to run package management tests on OSX.')
// return
// }
// }
is_wget_installed := cmd_exists('wget')
// is_wget_installed := cmd_exists('wget')
if is_wget_installed {
// Clean up - remove wget
package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
assert !cmd_exists('wget')
// Reinstalling wget as it was previously installed
package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
assert cmd_exists('wget')
return
}
// if is_wget_installed {
// // Clean up - remove wget
// package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
// assert !cmd_exists('wget')
// // Reinstalling wget as it was previously installed
// package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
// assert cmd_exists('wget')
// return
// }
// Intstall wget and verify it is installed
package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
assert cmd_exists('wget')
// // Intstall wget and verify it is installed
// package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
// assert cmd_exists('wget')
// Clean up - remove wget
package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
assert !cmd_exists('wget')
// // Clean up - remove wget
// package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
// assert !cmd_exists('wget')
}

View File

@@ -7,7 +7,7 @@ import time
import os
@[heap]
struct Pane {
pub struct Pane {
pub mut:
window &Window @[str: skip]
id int // pane id (e.g., %1, %2)
@@ -696,3 +696,22 @@ pub fn (p Pane) logging_status() string {
}
return 'disabled'
}
pub fn (mut p Pane) clear() ! {
// Kill current process in the pane
osal.exec(
cmd: 'tmux send-keys -t %${p.id} C-c'
stdout: false
name: 'tmux_pane_interrupt'
) or {}
// Reset pane by running a new bash
osal.exec(
cmd: "tmux send-keys -t %${p.id} '/bin/bash' Enter"
stdout: false
name: 'tmux_pane_reset_shell'
)!
// Update pane info
p.window.scan()!
}

View File

@@ -406,3 +406,22 @@ pub fn (mut w Window) stop_ttyd(port int) ! {
}
println('ttyd stopped for window ${w.name} on port ${port} (if it was running)')
}
// Get a pane by its ID
pub fn (mut w Window) pane_get(id int) !&Pane {
w.scan()! // refresh info from tmux
for pane in w.panes {
if pane.id == id {
return pane
}
}
return error('Pane with id ${id} not found in window ${w.name}. Available panes: ${w.panes}')
}
// Create a new pane (just a split with default shell)
pub fn (mut w Window) pane_new() !&Pane {
return w.pane_split(
cmd: '/bin/bash'
horizontal: true
)
}

View File

@@ -1,5 +1,6 @@
module jsonrpc
import x.json2 as json
import net.websocket
// This file implements a JSON-RPC 2.0 handler for WebSocket servers.
@@ -19,7 +20,7 @@ pub mut:
// 2. Execute the procedure with the extracted parameters
// 3. Return the result as a JSON-encoded string
// If an error occurs during any of these steps, it should be returned.
pub type ProcedureHandler = fn (payload string) !string
pub type ProcedureHandler = fn (request Request) !Response
// new_handler creates a new JSON-RPC handler with the specified procedure handlers.
//
@@ -39,10 +40,79 @@ pub fn new_handler(handler Handler) !&Handler {
// Parameters:
// - method: The name of the method to register
// - procedure: The procedure handler function to register
pub fn (mut handler Handler) register_procedure(method string, procedure ProcedureHandler) {
pub fn (mut handler Handler) register_procedure[T, U](method string, function fn (T) !U) {
procedure := Procedure[T, U]{
function: function
method: method
}
handler.procedures[procedure.method] = procedure.handle
}
// register_procedure registers a new procedure handler for the specified method.
//
// Parameters:
// - method: The name of the method to register
// - procedure: The procedure handler function to register
pub fn (mut handler Handler) register_procedure_void[T](method string, function fn (T) !) {
procedure := ProcedureVoid[T]{
function: function
method: method
}
handler.procedures[procedure.method] = procedure.handle
}
// register_procedure registers a new procedure handler for the specified method.
//
// Parameters:
// - method: The name of the method to register
// - procedure: The procedure handler function to register
pub fn (mut handler Handler) register_procedure_handle(method string, procedure ProcedureHandler) {
handler.procedures[method] = procedure
}
pub struct Procedure[T, U] {
pub mut:
method string
function fn (T) !U
}
pub struct ProcedureVoid[T] {
pub mut:
method string
function fn (T) !
}
pub fn (pw Procedure[T, U]) handle(request Request) !Response {
payload := decode_payload[T](request.params) or { return invalid_params }
result := pw.function(payload) or { return internal_error }
return new_response(request.id, '')
}
pub fn (pw ProcedureVoid[T]) handle(request Request) !Response {
payload := decode_payload[T](request.params) or { return invalid_params }
pw.function(payload) or { return internal_error }
return new_response(request.id, 'null')
}
pub fn decode_payload[T](payload string) !T {
$if T is string {
return payload
} $else $if T is int {
return payload.int()
} $else $if T is u32 {
return payload.u32()
} $else $if T is bool {
return payload.bool()
} $else {
return json.decode[T](payload) or { return error('Failed to decode payload: ${err}') }
}
panic('Unsupported type: ${T.name}')
}
fn error_to_jsonrpc(err IError) !RPCError {
return error('Internal error: ${err.msg()}')
}
// handler is a callback function compatible with the WebSocket server's message handler interface.
// It processes an incoming WebSocket message as a JSON-RPC request and returns the response.
//
@@ -53,9 +123,12 @@ pub fn (mut handler Handler) register_procedure(method string, procedure Procedu
// Returns:
// - The JSON-RPC response as a string
// Note: This method panics if an error occurs during handling
pub fn (handler Handler) handler(client &websocket.Client, message string) string {
return handler.handle(message) or { panic(err) }
}
// pub fn (handler Handler) handle_message(client &websocket.Client, message string) string {
// req := decode_request(message) or {
// return invalid_request }
// resp := handler.handle(req) or { panic(err) }
// return resp.encode()
// }
// handle processes a JSON-RPC request message and invokes the appropriate procedure handler.
// If the requested method is not found, it returns a method_not_found error response.
@@ -65,17 +138,13 @@ pub fn (handler Handler) handler(client &websocket.Client, message string) strin
//
// Returns:
// - The JSON-RPC response as a string, or an error if processing fails
pub fn (handler Handler) handle(message string) !string {
// Extract the method name from the request
method := decode_request_method(message)!
// log.info('Handling remote procedure call to method: ${method}')
// Look up the procedure handler for the requested method
procedure_func := handler.procedures[method] or {
// log.error('No procedure handler for method ${method} found')
return method_not_found
pub fn (handler Handler) handle(request Request) !Response {
procedure_func := handler.procedures[request.method] or {
return new_error(request.id, method_not_found)
}
// Execute the procedure handler with the request payload
response := procedure_func(message) or { panic(err) }
return response
}
return procedure_func(request) or {
panic(err)
}
}

View File

@@ -65,7 +65,7 @@ pub fn new_error_response(id int, error RPCError) Response {
// Returns:
// - A Response object or an error if parsing fails or the response is invalid
pub fn decode_response(data string) !Response {
raw := json2.raw_decode(data)!
raw := json2.raw_decode(data) or { return error('Failed to decode JSONRPC response ${data}\n${err}') }
raw_map := raw.as_map()
// Validate that the response contains either result or error, but not both or neither
@@ -105,7 +105,7 @@ pub fn decode_response(data string) !Response {
pub fn (resp Response) encode() string {
// Payload is already json string
if resp.error_ != none {
return '{"jsonrpc":"2.0","id":${resp.id},"error":${resp.error_.str()}}'
return '{"jsonrpc":"2.0","id":${resp.id},"error":${json2.encode(resp.error_)}}'
} else if resp.result != none {
return '{"jsonrpc":"2.0","id":${resp.id},"result":${resp.result}}'
}

View File

@@ -0,0 +1,21 @@
module reflection
import freeflowuniverse.herolib.schemas.jsonrpc
pub struct Handler[T] {
pub mut:
receiver T
}
pub fn new_handler[T](receiver T) Handler[T] {
return Handler[T]{
receiver: receiver
}
}
pub fn (mut h Handler[T]) handle(request jsonrpc.Request) !jsonrpc.Response {
receiver := h.receiver
$for method in receiver.methods {
println("method ${method}")
}
}

View File

@@ -0,0 +1,122 @@
module openrpc
import x.json2 as json
import net.unix
import time
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.schemas.jsonrpc
pub struct UNIXClient {
pub mut:
socket_path string
timeout int = 30 // Default timeout in seconds
}
@[params]
pub struct UNIXClientParams {
pub mut:
socket_path string = '/tmp/heromodels'
timeout int = 30
}
// new_unix_client creates a new OpenRPC Unix client
pub fn new_unix_client(params UNIXClientParams) &UNIXClient {
return &UNIXClient{
socket_path: params.socket_path
timeout: params.timeout
}
}
// call makes a JSON-RPC call to the server with typed parameters and result
pub fn (mut client UNIXClient) call_generic[T, D](method string, params T) !D {
// Create a generic request with typed parameters
response := client.call(method, json.encode(params))!
return json.decode[D](response)
}
// call_str makes a JSON-RPC call with string parameters and returns string result
pub fn (mut client UNIXClient) call(method string, params string) !string {
// Create a standard request with string parameters
request := jsonrpc.new_request(method, params)
// Send the request and get response
response_json := client.send_request(request.encode())!
// Decode response
response := jsonrpc.decode_response(response_json) or {
return error('Failed to decode response: ${err}')
}
// Validate response
response.validate() or {
return error('Invalid response: ${err}')
}
// Check ID matches
if response.id != request.id {
return error('Response ID ${response.id} does not match request ID ${request.id}')
}
// Return result or error
return response.result()
}
// discover calls the rpc.discover method to get the OpenRPC specification
pub fn (mut client UNIXClient) discover() !OpenRPC {
spec_json := client.call('rpc.discover', '')!
return decode(spec_json)!
}
// send_request_str sends a string request and returns string result
fn (mut client UNIXClient) send_request(request string) !string {
// Connect to Unix socket
mut conn := unix.connect_stream(client.socket_path) or {
return error('Failed to connect to Unix socket at ${client.socket_path}: ${err}')
}
defer {
conn.close() or { console.print_stderr('Error closing connection: ${err}') }
}
// Set timeout
if client.timeout > 0 {
conn.set_read_timeout(client.timeout * time.second)
conn.set_write_timeout(client.timeout * time.second)
}
// Send request
console.print_debug('Sending request: ${request}')
conn.write_string(request) or {
return error('Failed to send request: ${err}')
}
// Read response
mut buffer := []u8{len: 4096}
bytes_read := conn.read(mut buffer) or {
return error('Failed to read response: ${err}')
}
if bytes_read == 0 {
return error('No response received from server')
}
response := buffer[..bytes_read].bytestr()
console.print_debug('Received response: ${response}')
return response
}
// ping sends a simple ping to test connectivity
pub fn (mut client UNIXClient) ping() !bool {
// Try to discover the specification as a connectivity test
client.discover() or {
return error('Ping failed: ${err}')
}
return true
}
// close closes any persistent connections (currently no-op for Unix sockets)
pub fn (mut client UNIXClient) close() ! {
// Unix socket connections are closed per request, so nothing to do here
}

View File

@@ -0,0 +1,175 @@
module openrpc
import freeflowuniverse.herolib.schemas.jsonrpc
import freeflowuniverse.herolib.schemas.jsonschema
// Test struct for typed parameters
struct TestParams {
name string
value int
}
// Test struct for typed result
struct TestResult {
success bool
message string
}
// Example custom handler for testing
struct TestHandler {
}
fn (mut h TestHandler) handle(req jsonrpc.Request) !jsonrpc.Response {
match req.method {
'test.echo' {
return jsonrpc.new_response(req.id, req.params)
}
'test.add' {
// Simple addition test - expect params like '{"a": 5, "b": 3}'
return jsonrpc.new_response(req.id, '{"result": 8}')
}
'test.greet' {
// Greeting test - expect params like '{"name": "Alice"}'
return jsonrpc.new_response(req.id, '{"message": "Hello, World!"}')
}
else {
return jsonrpc.new_error_response(req.id, jsonrpc.method_not_found)
}
}
}
fn test_unix_client_basic() {
// This test requires a running server, so it's more of an integration test
// In practice, you would start a server in a separate goroutine or process
mut client := new_unix_client(
socket_path: '/tmp/test_heromodels'
timeout: 5
)
// Test string-based call
result := client.call('test.echo', '{"message": "hello"}') or {
println('Expected error since no server is running: ${err}')
return
}
println('Echo result: ${result}')
}
fn test_unix_client_typed() {
mut client := new_unix_client(
socket_path: '/tmp/test_heromodels'
timeout: 5
)
// Test typed call
params := TestParams{
name: 'test'
value: 42
}
result := client.call_generic[TestParams, TestResult]('test.process', params) or {
println('Expected error since no server is running: ${err}')
return
}
println('Typed result: ${result}')
}
fn test_unix_client_discover() {
mut client := new_unix_client(
socket_path: '/tmp/test_heromodels'
timeout: 5
)
// Test discovery
spec := client.discover() or {
println('Expected error since no server is running: ${err}')
return
}
println('OpenRPC spec version: ${spec.openrpc}')
println('Info title: ${spec.info.title}')
}
fn test_unix_client_ping() {
mut client := new_unix_client(
socket_path: '/tmp/test_heromodels'
timeout: 5
)
// Test ping
is_alive := client.ping() or {
println('Expected error since no server is running: ${err}')
return
}
println('Server is alive: ${is_alive}')
}
// Integration test that demonstrates full client-server interaction
fn test_full_integration() {
socket_path := '/tmp/test_heromodels_integration'
// Create a test OpenRPC specification
mut spec := OpenRPC{
openrpc: '1.3.0'
info: Info{
title: 'Test API'
version: '1.0.0'
}
methods: [
Method{
name: 'test.echo'
params: []
result: ContentDescriptor{
name: 'result'
schema: jsonschema.Schema{}
}
}
]
}
// Create handler
mut test_handler := TestHandler{}
handler := Handler{
specification: spec
handler: test_handler
}
// Start server in background
mut server := new_unix_server(handler, socket_path: socket_path) or {
println('Failed to create server: ${err}')
return
}
// Start server in a separate thread
spawn fn [mut server] () {
server.start() or {
println('Server error: ${err}')
}
}()
// Give server time to start
// time.sleep(100 * time.millisecond)
// Create client and test
mut client := new_unix_client(
socket_path: socket_path
timeout: 5
)
// Test the connection
result := client.call('test.echo', '{"test": "data"}') or {
println('Client call failed: ${err}')
server.close() or {}
return
}
println('Integration test result: ${result}')
// Clean up
server.close() or {
println('Failed to close server: ${err}')
}
}

View File

@@ -1,6 +1,7 @@
module codegen
import os
import json
import freeflowuniverse.herolib.core.code { Alias, Struct }
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.schemas.openrpc
@@ -10,14 +11,14 @@ const doc_path = '${os.dir(@FILE)}/testdata/openrpc.json'
fn test_generate_model() ! {
mut doc_file := pathlib.get_file(path: doc_path)!
content := doc_file.read()!
object := openrpc.decode(content)!
object := json.decode(openrpc.OpenRPC, content)!
model := generate_model(object)!
assert model.len == 3
assert model[0] is Alias
pet_id := model[0] as Alias
assert pet_id.name == 'PetId'
assert pet_id.typ.symbol == 'int'
assert pet_id.typ.symbol() == 'int'
assert model[1] is Struct
pet_struct := model[1] as Struct
@@ -26,23 +27,23 @@ fn test_generate_model() ! {
// test field is `id PetId @[required]`
assert pet_struct.fields[0].name == 'id'
assert pet_struct.fields[0].typ.symbol == 'PetId'
assert pet_struct.fields[0].typ.symbol() == 'PetId'
assert pet_struct.fields[0].attrs.len == 1
assert pet_struct.fields[0].attrs[0].name == 'required'
// test field is `name string @[required]`
assert pet_struct.fields[1].name == 'name'
assert pet_struct.fields[1].typ.symbol == 'string'
assert pet_struct.fields[1].typ.symbol() == 'string'
assert pet_struct.fields[1].attrs.len == 1
assert pet_struct.fields[1].attrs[0].name == 'required'
// test field is `tag string`
assert pet_struct.fields[2].name == 'tag'
assert pet_struct.fields[2].typ.symbol == 'string'
assert pet_struct.fields[2].typ.symbol() == 'string'
assert pet_struct.fields[2].attrs.len == 0
assert model[2] is Alias
pets_alias := model[2] as Alias
assert pets_alias.name == 'Pets'
assert pets_alias.typ.symbol == '[]Pet'
assert pets_alias.typ.symbol() == '[]Pet'
}

View File

@@ -43,7 +43,7 @@ pub fn (mut c HTTPController) index(mut ctx Context) veb.Result {
}
// Process the JSONRPC request with the OpenRPC handler
response := c.handler.handle(request) or {
response := c.handle(request) or {
return ctx.server_error('Handler error: ${err.msg()}')
}

View File

@@ -11,7 +11,7 @@ pub fn decode_json_any(data string) !Any {
pub fn decode_json_string(data string) !string {
mut o := decode(data)!
return json.encode(o)
return json.encode(o)
}
pub fn decode(data string) !OpenRPC {

View File

@@ -1,6 +1,7 @@
module openrpc
import x.json2
import json
import os
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.schemas.jsonschema
@@ -10,7 +11,7 @@ const doc_path = '${os.dir(@FILE)}/testdata/openrpc.json'
fn test_decode() ! {
mut doc_file := pathlib.get_file(path: doc_path)!
content := doc_file.read()!
object := decode(content)!
object := json.decode(OpenRPC, content)!
assert object.openrpc == '1.0.0-rc1'
assert object.methods.map(it.name) == ['list_pets', 'create_pet', 'get_pet']
assert object.methods.map(it.name) == ['list_pets', 'create_pet', 'get_pet', 'update_pet', 'delete_pet']
}

View File

@@ -7,50 +7,62 @@ import x.json2 { Any }
// eliminates undefined variable by calling prune on the initial encoding.
pub fn (doc OpenRPC) encode() !string {
encoded := json.encode(doc)
raw_decode := json2.raw_decode(encoded)!
mut doc_map := raw_decode.as_map()
pruned_map := prune(doc_map)
return json2.encode_pretty[Any](pruned_map)
// raw_decode := json2.raw_decode(encoded)!
// mut doc_map := raw_decode.as_map()
// pruned_map := prune(doc_map)
// return json2.encode_pretty[Any](pruned_map)
return encoded
}
// prune recursively prunes a map of Any type, pruning map keys where the value is the default value of the variable.
// this treats undefined values as null, which is ok for openrpc document encoding.
pub fn prune(obj Any) Any {
if obj is map[string]Any {
mut pruned_map := map[string]Any{}
for key, val in obj as map[string]Any {
if key == '_type' {
continue
}
pruned_val := prune(val)
if pruned_val.str() != '' {
pruned_map[key] = pruned_val
} else if key == 'methods' || key == 'params' {
pruned_map[key] = []Any{}
}
}
if pruned_map.keys().len != 0 {
return pruned_map
}
} else if obj is []Any {
mut pruned_arr := []Any{}
for val in obj as []Any {
pruned_val := prune(val)
if pruned_val.str() != '' {
pruned_arr << pruned_val
}
}
if pruned_arr.len != 0 {
return pruned_arr
}
} else if obj is string {
if obj != '' {
return obj
}
}
return ''
// encode encodes an OpenRPC document struct into json string.
// eliminates undefined variable by calling prune on the initial encoding.
pub fn (doc OpenRPC) encode_pretty() !string {
encoded := json.encode_pretty(doc)
// raw_decode := json2.raw_decode(encoded)!
// mut doc_map := raw_decode.as_map()
// pruned_map := prune(doc_map)
// return json2.encode_pretty[Any](pruned_map)
return encoded
}
// // prune recursively prunes a map of Any type, pruning map keys where the value is the default value of the variable.
// // this treats undefined values as null, which is ok for openrpc document encoding.
// pub fn prune(obj Any) Any {
// if obj is map[string]Any {
// mut pruned_map := map[string]Any{}
// for key, val in obj as map[string]Any {
// if key == '_type' {
// continue
// }
// pruned_val := prune(val)
// if pruned_val.str() != '' {
// pruned_map[key] = pruned_val
// } else if key == 'methods' || key == 'params' {
// pruned_map[key] = []Any{}
// }
// }
// if pruned_map.keys().len != 0 {
// return pruned_map
// }
// } else if obj is []Any {
// mut pruned_arr := []Any{}
// for val in obj as []Any {
// pruned_val := prune(val)
// if pruned_val.str() != '' {
// pruned_arr << pruned_val
// }
// }
// if pruned_arr.len != 0 {
// return pruned_arr
// }
// } else if obj is string {
// if obj != '' {
// return obj
// }
// }
// return ''
// }

View File

@@ -1,15 +1,9 @@
module openrpc
import x.json2 as json
// import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema { Schema, SchemaRef }
const blank_openrpc = '{
"openrpc": "1.0.0",
"info": {
"version": "1.0.0"
},
"methods": []
}'
const blank_openrpc = '{"openrpc": "1.0.0","info": {"version": "1.0.0"},"methods": []}'
// test if encode can correctly encode a blank OpenRPC
fn test_encode_blank() ! {
@@ -18,10 +12,9 @@ fn test_encode_blank() ! {
title: ''
version: '1.0.0'
}
methods: []Method{}
}
encoded := doc.encode()!
assert encoded.trim_space().split_into_lines().map(it.trim_space()) == blank_openrpc.split_into_lines().map(it.trim_space())
assert encoded == blank_openrpc
}
// test if can correctly encode an OpenRPC doc with a method
@@ -48,7 +41,7 @@ fn test_encode_with_method() ! {
},
]
}
encoded := doc.encode()!
encoded := doc.encode_pretty()!
assert encoded == '{
"openrpc": "1.0.0",
"info": {
@@ -96,6 +89,6 @@ fn test_encode() ! {
},
]
}
encoded := json.encode(doc)
encoded := doc.encode()!
assert encoded == '{"openrpc":"1.0.0","info":{"title":"","version":"1.0.0"},"methods":[{"name":"method_name","summary":"summary","description":"description for this method","params":[{"name":"sample descriptor","schema":{"\$schema":"","\$id":"","title":"","description":"","type":"string","properties":{},"additionalProperties":{},"required":[],"ref":"","items":{},"defs":{},"oneOf":[],"_type":"Schema"},"_type":"ContentDescriptor"}],"result":{},"deprecated":true}]}'
}

View File

@@ -1,6 +1,7 @@
module openrpc
import os
import json
@[params]
pub struct Params {
@@ -24,5 +25,5 @@ pub fn new(params Params) !OpenRPC {
params.text
}
return decode(text)!
return json.decode(OpenRPC, text)!
}

View File

@@ -2,26 +2,20 @@ module openrpc
import freeflowuniverse.herolib.schemas.jsonrpc
// The openrpc handler is a wrapper around a jsonrpc handler
pub struct Handler {
jsonrpc.Handler
pub:
specification OpenRPC @[required] // The OpenRPC specification
pub mut:
handler IHandler
}
pub interface IHandler {
mut:
handle(jsonrpc.Request) !jsonrpc.Response // Custom handler for other methods
}
@[params]
pub struct HandleParams {
timeout int = 60 // Timeout in seconds
retry int // Number of retries
}
// pub interface IHandler {
// mut:
// handle(jsonrpc.Request) !jsonrpc.Response // Custom handler for other methods
// }
// Handle a JSON-RPC request and return a response
pub fn (mut h Handler) handle(req jsonrpc.Request, params HandleParams) !jsonrpc.Response {
pub fn (mut h Handler) handle(req jsonrpc.Request) !jsonrpc.Response {
// Validate the incoming request
req.validate() or { return jsonrpc.new_error_response(req.id, jsonrpc.invalid_request) }
@@ -33,15 +27,12 @@ pub fn (mut h Handler) handle(req jsonrpc.Request, params HandleParams) !jsonrpc
}
// Validate the method exists in the specification
if req.method !in h.specification.methods.map(it.name) {
return jsonrpc.new_error_response(req.id, jsonrpc.method_not_found)
}
// Enforce timeout and retries (dummy implementation)
if params.timeout < 0 || params.retry < 0 {
return jsonrpc.new_error_response(req.id, jsonrpc.invalid_params)
}
// TODO: implement once auto add registered methods to spec
// if req.method !in h.specification.methods.map(it.name) {
// println("Method not found: " + req.method)
// return jsonrpc.new_error_response(req.id, jsonrpc.method_not_found)
// }
// Forward the request to the custom handler
return h.handler.handle(req)
return h.Handler.handle(req) or { panic(err) }
}

View File

@@ -9,20 +9,20 @@ import freeflowuniverse.herolib.schemas.jsonschema { Reference, SchemaRef }
pub struct OpenRPC {
pub mut:
openrpc string = '1.0.0' // This string MUST be the semantic version number of the OpenRPC Specification version that the OpenRPC document uses.
info Info // Provides metadata about the API.
servers []Server // An array of Server Objects, which provide connectivity information to a target server.
methods []Method // The available methods for the API.
components Components // An element to hold various schemas for the specification.
external_docs []ExternalDocs @[json: externalDocs] // Additional external documentation.
info Info @[omitempty] // Provides metadata about the API.
servers []Server @[omitempty]// An array of Server Objects, which provide connectivity information to a target server.
methods []Method @[omitempty]// The available methods for the API.
components Components @[omitempty] // An element to hold various schemas for the specification.
external_docs []ExternalDocs @[json: externalDocs; omitempty] // Additional external documentation.
}
// The object provides metadata about the API.
// The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.
pub struct Info {
pub:
title string // The title of the application.
description string // A verbose description of the application.
terms_of_service string @[json: termsOfService] // A URL to the Terms of Service for the API. MUST be in the format of a URL.
title string @[omitempty] // The title of the application.
description string @[omitempty] // A verbose description of the application.
terms_of_service string @[json: termsOfService; omitempty] // A URL to the Terms of Service for the API. MUST be in the format of a URL.
contact Contact @[omitempty] // The contact information for the exposed API.
license License @[omitempty] // The license information for the exposed API.
version string @[omitempty] // The version of the OpenRPC document (which is distinct from the OpenRPC Specification version or the API implementation version).
@@ -167,12 +167,12 @@ pub:
// All the fixed fields declared above are objects that MUST use keys that match the regular expression: ^[a-zA-Z0-9\.\-_]+$
pub struct Components {
pub mut:
content_descriptors map[string]ContentDescriptorRef @[json: contentDescriptors] // An object to hold reusable Content Descriptor Objects.
schemas map[string]SchemaRef // An object to hold reusable Schema Objects.
examples map[string]Example // An object to hold reusable Example Objects.
links map[string]Link // An object to hold reusable Link Objects.
error map[string]Error // An object to hold reusable Error Objects.
example_pairing_objects map[string]ExamplePairing @[json: examplePairingObjects] // An object to hold reusable Example Pairing Objects.
content_descriptors map[string]ContentDescriptorRef @[json: contentDescriptors; omitempty] // An object to hold reusable Content Descriptor Objects.
schemas map[string]SchemaRef @[omitempty] // An object to hold reusable Schema Objects.
examples map[string]Example @[omitempty] // An object to hold reusable Example Objects.
links map[string]Link @[omitempty] // An object to hold reusable Link Objects.
error map[string]Error @[omitempty] // An object to hold reusable Error Objects.
example_pairing_objects map[string]ExamplePairing @[json: examplePairingObjects; omitempty] // An object to hold reusable Example Pairing Objects.
tags map[string]Tag // An object to hold reusable Tag Objects.
}

View File

@@ -32,7 +32,7 @@ fn test_parse_example_pairing() ! {
params := example.params
assert params.len == 1
param0 := (params[0] as Example)
assert param0.value == "'input_string'"
assert param0.value.str() == "'input_string'"
}
const test_struct = Struct{
@@ -40,9 +40,7 @@ const test_struct = Struct{
fields: [
StructField{
name: 'TestField'
typ: Type{
symbol: 'int'
}
typ: code.type_i32
attrs: [Attribute{
name: 'example'
arg: '21'

View File

@@ -0,0 +1,107 @@
module openrpc
import json
import x.json2
import net.unix
import os
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.schemas.jsonrpc
pub struct UNIXServer {
pub mut:
listener &unix.StreamListener
socket_path string
handler Handler @[required]
}
@[params]
pub struct UNIXServerParams {
pub mut:
socket_path string = '/tmp/heromodels'
}
pub fn new_unix_server(handler Handler, params UNIXServerParams) !&UNIXServer {
// Remove existing socket file if it exists
if os.exists(params.socket_path) {
os.rm(params.socket_path)!
}
listener := unix.listen_stream(params.socket_path, unix.ListenOptions{})!
return &UNIXServer{
listener: listener
handler: handler
socket_path: params.socket_path
}
}
pub fn (mut server UNIXServer) start() ! {
console.print_header('Starting HeroModels OpenRPC Server on ${server.socket_path}')
for {
mut conn := server.listener.accept()!
spawn server.handle_connection(mut conn)
}
}
pub fn (mut server UNIXServer) close() ! {
server.listener.close()!
if os.exists(server.socket_path) {
os.rm(server.socket_path)!
}
}
fn (mut server UNIXServer) handle_connection(mut conn unix.StreamConn) {
defer {
conn.close() or { console.print_stderr('Error closing connection: ${err}') }
}
for {
// Read JSON-RPC request
mut buffer := []u8{len: 4096}
bytes_read := conn.read(mut buffer) or {
console.print_debug('Connection closed or error reading: ${err}')
break
}
if bytes_read == 0 {
break
}
request_data := buffer[..bytes_read].bytestr()
console.print_debug('Received request: ${request_data}')
// Process the JSON-RPC request
if response := server.process_request(request_data) {
// Send response only if we have a valid response
conn.write_string(response) or {
console.print_stderr('Error writing response: ${err}')
break
}
} else {
// Log the error but don't break the connection
// According to JSON-RPC 2.0 spec, if we can't decode the request ID,
// we should not send any response but keep the connection alive
console.print_debug('Invalid request received, no response sent: ${err}')
}
}
}
fn (mut server UNIXServer) process_request(request_data string) ?string {
// Parse JSON-RPC request using json2 to handle Any types
request := jsonrpc.decode_request(request_data) or {
// try decoding id to give error response
if id := jsonrpc.decode_request_id(request_data) {
// We can extract ID, so return proper JSON-RPC error response
return jsonrpc.new_error(id, jsonrpc.invalid_request).encode()
} else {
// Cannot extract ID from invalid JSON - return none (no response)
// This follows JSON-RPC 2.0 spec: no response when ID cannot be determined
return none
}
}
response := server.handler.handle(request) or {
return jsonrpc.new_error(request.id, jsonrpc.internal_error).encode()
}
return response.encode()
}

View File

@@ -0,0 +1,123 @@
module openrpc
import time
import json
import x.json2
import net.unix
import os
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.schemas.jsonrpc
const testdata_dir = os.join_path(os.dir(@FILE), 'testdata')
const openrpc_path = os.join_path(testdata_dir, 'openrpc.json')
pub fn test_new_unix_server() ! {
mut spec := OpenRPC{}
handler := Handler{
specification: new(path: openrpc_path)!
}
mut server := new_unix_server(handler)!
defer {
server.close() or {panic(err)}
}
spawn server.start()
// client()
}
pub fn test_unix_server_start() ! {
mut spec := OpenRPC{}
handler := Handler{
specification: new(path: openrpc_path)!
}
mut server := new_unix_server(handler)!
defer {
server.close() or {panic(err)}
}
spawn server.start()
// client()
}
pub fn test_unix_server_handle_connection() ! {
mut spec := OpenRPC{}
handler := Handler{
specification: new(path: openrpc_path)!
}
mut server := new_unix_server(handler)!
// Start server in background
spawn server.start()
// Give server time to start
// time.sleep(50 * time.millisecond)
// Connect to the server
mut conn := unix.connect_stream(server.socket_path)!
defer {
conn.close() or {panic(err)}
server.close() or {panic(err)}
}
// Test 1: Send rpc.discover request
discover_request := jsonrpc.new_request('rpc.discover', '')
request_json := discover_request.encode()
// Send the request
conn.write_string(request_json)!
// Read the response
mut buffer := []u8{len: 4096}
bytes_read := conn.read(mut buffer)!
response_data := buffer[..bytes_read].bytestr()
// Parse and validate response
response := jsonrpc.decode_response(response_data)!
assert response.id == discover_request.id
assert response.is_result()
assert !response.is_error()
// Validate that the result contains OpenRPC specification
result := response.result()!
assert result.len > 0
// Test 2: Send invalid JSON request
invalid_request := '{"invalid": "json"}'
conn.write_string(invalid_request)!
// Set a short read timeout to test no response behavior
conn.set_read_timeout(10 * time.millisecond)
// Try to read response - should timeout since server sends no response for invalid JSON
conn.wait_for_read() or {
// This is expected behavior - server should not respond to invalid JSON without extractable ID
console.print_debug('Expected timeout for invalid JSON request: ${err}')
assert err.msg().contains('timeout') || err.msg().contains('timed out')
// Reset timeout for next test
conn.set_read_timeout(30 * time.second)
}
// Test 3: Send request with non-existent method
nonexistent_request := jsonrpc.new_request('nonexistent.method', '{}')
nonexistent_json := nonexistent_request.encode()
conn.write_string(nonexistent_json)!
// Read method not found response
bytes_read3 := conn.read(mut buffer)!
method_error_data := buffer[..bytes_read3].bytestr()
method_error_response := jsonrpc.decode_response(method_error_data)!
assert method_error_response.is_error()
assert method_error_response.id == nonexistent_request.id
if error_obj := method_error_response.error() {
assert error_obj.code == jsonrpc.method_not_found.code
}
}

View File

@@ -1,171 +0,0 @@
module openrpcserver
import json
import x.json2
import net.unix
import os
import freeflowuniverse.herolib.ui.console
// THIS IS DEFAULT NEEDED FOR EACH OPENRPC SERVER WE MAKE
pub struct JsonRpcRequest {
pub:
jsonrpc string = '2.0'
method string
params string
id string
}
// JSON-RPC 2.0 response structure
pub struct JsonRpcResponse {
pub:
jsonrpc string = '2.0'
result string
error ?JsonRpcError
id string
}
// JSON-RPC 2.0 error structure
pub struct JsonRpcError {
pub:
code int
message string
data string
}
pub struct RPCServer {
pub mut:
listener &unix.StreamListener
socket_path string
}
@[params]
pub struct RPCServerArgs {
pub mut:
socket_path string = '/tmp/heromodels'
}
// Temporary struct for parsing incoming JSON-RPC requests using json2
struct JsonRpcRequestRaw {
jsonrpc string
method string
params json2.Any
id json2.Any
}
pub fn new_rpc_server(args RPCServerArgs) !&RPCServer {
// Remove existing socket file if it exists
if os.exists(args.socket_path) {
os.rm(args.socket_path)!
}
listener := unix.listen_stream(args.socket_path, unix.ListenOptions{})!
return &RPCServer{
listener: listener
socket_path: args.socket_path
}
}
pub fn (mut server RPCServer) start() ! {
console.print_header('Starting HeroModels OpenRPC Server on ${server.socket_path}')
for {
mut conn := server.listener.accept()!
spawn server.handle_connection(mut conn)
}
}
pub fn (mut server RPCServer) close() ! {
server.listener.close()!
if os.exists(server.socket_path) {
os.rm(server.socket_path)!
}
}
fn (mut server RPCServer) handle_connection(mut conn unix.StreamConn) {
defer {
conn.close() or { console.print_stderr('Error closing connection: ${err}') }
}
for {
// Read JSON-RPC request
mut buffer := []u8{len: 4096}
bytes_read := conn.read(mut buffer) or {
console.print_debug('Connection closed or error reading: ${err}')
break
}
if bytes_read == 0 {
break
}
request_data := buffer[..bytes_read].bytestr()
console.print_debug('Received request: ${request_data}')
// Process the JSON-RPC request
response := server.process_request(request_data) or {
server.create_error_response(-32603, 'Internal error: ${err}', 'null')
}
// Send response
conn.write_string(response) or {
console.print_stderr('Error writing response: ${err}')
break
}
}
}
fn (mut server RPCServer) process_request(request_data string) !string {
// Parse JSON-RPC request using json2 to handle Any types
request := json2.decode[JsonRpcRequestRaw](request_data)!
// Convert params to string representation
params_str := request.params.json_str()
// Convert id to string
id_str := request.id.json_str()
r := request.method.trim_space().to_lower()
// Route to appropriate method
result := server.process(r, params_str)!
return server.create_success_response(result, id_str)
}
// Default process method - should be overridden by implementations
pub fn (mut server RPCServer) process(method string, params_str string) !string {
return match method {
'rpc.discover' {
server.discover()!
}
else {
server.create_error_response(-32601, 'Method not found', method)
}
}
}
fn (mut server RPCServer) create_success_response(result string, id string) string {
response := JsonRpcResponse{
jsonrpc: '2.0'
result: result
id: id
}
return json.encode(response)
}
fn (mut server RPCServer) create_error_response(code int, message string, id string) string {
error := JsonRpcError{
code: code
message: message
data: 'null'
}
response := JsonRpcResponse{
jsonrpc: '2.0'
error: error
id: id
}
return json.encode(response)
}
// discover returns the OpenRPC specification for the service
pub fn (mut server RPCServer) discover() !string {
// Return a basic OpenRPC spec - should be overridden by implementations
return '{"openrpc": "1.2.6", "info": {"title": "OpenRPC Server", "version": "1.0.0"}, "methods": []}'
}

View File

@@ -70,7 +70,7 @@ pub fn decode_file_metadata(data []u8) !File {
// blocksize is max 2 bytes, so max 4gb entry size
blocksize := d.get_u16()!
for i in 0 .. blocksize {
chunk_ids << d.get_u32() or { return error('Failed to get block id ${err}') }
chunk_ids << d.get_u32()! or { return error('Failed to get block id ${err}') }
}
}

74
lib/virt/crun/crun_test.v Normal file
View File

@@ -0,0 +1,74 @@
module crun
import json
fn test_factory_creation() {
mut configs := map[string]&CrunConfig{}
config := new(mut configs, name: 'test')!
assert config.name == 'test'
assert config.spec.oci_version == '1.0.2'
}
fn test_json_generation() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
json_str := config.to_json()!
// Parse back to verify structure
parsed := json.decode(map[string]json.Any, json_str)!
assert parsed['ociVersion']! as string == '1.0.2'
process := parsed['process']! as map[string]json.Any
assert process['terminal']! as bool == true
}
fn test_configuration_methods() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
config.set_command(['/bin/echo', 'hello'])
.set_working_dir('/tmp')
.set_hostname('test-host')
assert config.spec.process.args == ['/bin/echo', 'hello']
assert config.spec.process.cwd == '/tmp'
assert config.spec.hostname == 'test-host'
}
fn test_validation() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
// Should validate successfully with defaults
config.validate()!
// Should fail with empty args
config.spec.process.args = []
if _ := config.validate() {
assert false, 'validation should have failed'
} else {
// Expected to fail
}
}
fn test_heropods_compatibility() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'heropods')!
// The default config should match heropods template structure
json_str := config.to_json()!
parsed := json.decode(map[string]json.Any, json_str)!
// Check key fields match template
assert parsed['ociVersion']! as string == '1.0.2'
process := parsed['process']! as map[string]json.Any
assert process['noNewPrivileges']! as bool == true
capabilities := process['capabilities']! as map[string]json.Any
bounding := capabilities['bounding']! as []json.Any
assert 'CAP_AUDIT_WRITE' in bounding.map(it as string)
assert 'CAP_KILL' in bounding.map(it as string)
assert 'CAP_NET_BIND_SERVICE' in bounding.map(it as string)
}

67
lib/virt/crun/example.v Normal file
View File

@@ -0,0 +1,67 @@
module crun
pub fn example_heropods_compatible() ! {
mut configs := map[string]&CrunConfig{}
// Create a container configuration compatible with heropods template
mut config := new(mut configs, name: 'heropods-example')!
// Configure to match the template
config.set_command(['/bin/sh'])
.set_working_dir('/')
.set_user(0, 0, [])
.add_env('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
.add_env('TERM', 'xterm')
.set_rootfs('${rootfs_path}', false) // This will be replaced by the actual path
.set_hostname('container')
.set_no_new_privileges(true)
// Add the specific rlimit from template
config.add_rlimit(.rlimit_nofile, 1024, 1024)
// Validate the configuration
config.validate()!
// Generate and print JSON
json_output := config.to_json()!
println(json_output)
// Save to file
config.save_to_file('/tmp/heropods_config.json')!
println('Heropods-compatible configuration saved to /tmp/heropods_config.json')
}
pub fn example_custom() ! {
mut configs := map[string]&CrunConfig{}
// Create a more complex container configuration
mut config := new(mut configs, name: 'custom-container')!
config.set_command(['/usr/bin/my-app', '--config', '/etc/myapp/config.yaml'])
.set_working_dir('/app')
.set_user(1000, 1000, [1001, 1002])
.add_env('MY_VAR', 'my_value')
.add_env('ANOTHER_VAR', 'another_value')
.set_rootfs('/path/to/rootfs', false)
.set_hostname('my-custom-container')
.set_memory_limit(1024 * 1024 * 1024) // 1GB
.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
.set_pids_limit(500)
.add_mount('/host/path', '/container/path', .bind, [.rw])
.add_mount('/tmp/cache', '/app/cache', .tmpfs, [.rw, .noexec])
.add_capability(.cap_sys_admin)
.remove_capability(.cap_net_raw)
.add_rlimit(.rlimit_nproc, 100, 50)
.set_no_new_privileges(true)
// Add some additional security hardening
config.add_masked_path('/proc/kcore')
.add_readonly_path('/proc/sys')
// Validate before use
config.validate()!
// Get the JSON
json_str := config.to_json()!
println('Custom container config:')
println(json_str)
}

344
lib/virt/crun/factory.v Normal file
View File

@@ -0,0 +1,344 @@
module crun
import freeflowuniverse.herolib.core.texttools
@[params]
pub struct FactoryArgs {
pub mut:
name string = "default"
}
pub struct CrunConfig {
pub mut:
name string
spec Spec
}
// Convert enum values to their string representations
pub fn (mount_type MountType) to_string() string {
return match mount_type {
.bind { 'bind' }
.tmpfs { 'tmpfs' }
.proc { 'proc' }
.sysfs { 'sysfs' }
.devpts { 'devpts' }
.nfs { 'nfs' }
.overlay { 'overlay' }
}
}
pub fn (option MountOption) to_string() string {
return match option {
.rw { 'rw' }
.ro { 'ro' }
.noexec { 'noexec' }
.nosuid { 'nosuid' }
.nodev { 'nodev' }
.rbind { 'rbind' }
.relatime { 'relatime' }
.strictatime { 'strictatime' }
.mode { 'mode=755' } // Default mode, can be customized
.size { 'size=65536k' } // Default size, can be customized
}
}
pub fn (cap Capability) to_string() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
pub fn (rlimit RlimitType) to_string() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Configuration methods with builder pattern
pub fn (mut config CrunConfig) set_command(args []string) &CrunConfig {
config.spec.process.args = args.clone()
return config
}
pub fn (mut config CrunConfig) set_working_dir(cwd string) &CrunConfig {
config.spec.process.cwd = cwd
return config
}
pub fn (mut config CrunConfig) set_user(uid u32, gid u32, additional_gids []u32) &CrunConfig {
config.spec.process.user = User{
uid: uid
gid: gid
additional_gids: additional_gids.clone()
}
return config
}
pub fn (mut config CrunConfig) add_env(key string, value string) &CrunConfig {
config.spec.process.env << '${key}=${value}'
return config
}
pub fn (mut config CrunConfig) set_rootfs(path string, readonly bool) &CrunConfig {
config.spec.root = Root{
path: path
readonly: readonly
}
return config
}
pub fn (mut config CrunConfig) set_hostname(hostname string) &CrunConfig {
config.spec.hostname = hostname
return config
}
pub fn (mut config CrunConfig) set_memory_limit(limit_bytes u64) &CrunConfig {
config.spec.linux.resources.memory.limit = limit_bytes
return config
}
pub fn (mut config CrunConfig) set_cpu_limits(period u64, quota i64, shares u64) &CrunConfig {
config.spec.linux.resources.cpu.period = period
config.spec.linux.resources.cpu.quota = quota
config.spec.linux.resources.cpu.shares = shares
return config
}
pub fn (mut config CrunConfig) set_pids_limit(limit i64) &CrunConfig {
config.spec.linux.resources.pids.limit = limit
return config
}
pub fn (mut config CrunConfig) add_mount(destination string, source string, typ MountType, options []MountOption) &CrunConfig {
config.spec.mounts << Mount{
destination: destination
typ: typ.to_string()
source: source
options: options.map(it.to_string())
}
return config
}
pub fn (mut config CrunConfig) add_capability(cap Capability) &CrunConfig {
cap_str := cap.to_string()
if cap_str !in config.spec.process.capabilities.bounding {
config.spec.process.capabilities.bounding << cap_str
}
if cap_str !in config.spec.process.capabilities.effective {
config.spec.process.capabilities.effective << cap_str
}
if cap_str !in config.spec.process.capabilities.permitted {
config.spec.process.capabilities.permitted << cap_str
}
return config
}
pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
cap_str := cap.to_string()
config.spec.process.capabilities.bounding = config.spec.process.capabilities.bounding.filter(it != cap_str)
config.spec.process.capabilities.effective = config.spec.process.capabilities.effective.filter(it != cap_str)
config.spec.process.capabilities.permitted = config.spec.process.capabilities.permitted.filter(it != cap_str)
return config
}
pub fn (mut config CrunConfig) add_rlimit(typ RlimitType, hard u64, soft u64) &CrunConfig {
config.spec.process.rlimits << Rlimit{
typ: typ.to_string()
hard: hard
soft: soft
}
return config
}
pub fn (mut config CrunConfig) set_no_new_privileges(value bool) &CrunConfig {
config.spec.process.no_new_privileges = value
return config
}
pub fn (mut config CrunConfig) add_masked_path(path string) &CrunConfig {
if path !in config.spec.linux.masked_paths {
config.spec.linux.masked_paths << path
}
return config
}
pub fn (mut config CrunConfig) add_readonly_path(path string) &CrunConfig {
if path !in config.spec.linux.readonly_paths {
config.spec.linux.readonly_paths << path
}
return config
}
pub fn new(mut configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
mut config := &CrunConfig{
name: name
spec: create_default_spec()
}
configs[name] = config
return config
}
pub fn get(configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
return configs[name] or {
return error('crun config with name "${name}" does not exist')
}
}
fn create_default_spec() Spec {
// Create default spec that matches the heropods template
mut spec := Spec{
oci_version: '1.0.2' // Set default here
platform: Platform{
os: 'linux'
arch: 'amd64'
}
process: Process{
terminal: true
user: User{
uid: 0
gid: 0
}
args: ['/bin/sh']
env: [
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
'TERM=xterm'
]
cwd: '/'
capabilities: Capabilities{
bounding: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
effective: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
inheritable: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
permitted: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
}
rlimits: [
Rlimit{
typ: 'RLIMIT_NOFILE'
hard: 1024
soft: 1024
}
]
no_new_privileges: true // No JSON annotation needed here
}
root: Root{
path: 'rootfs'
readonly: false
}
hostname: 'container'
mounts: create_default_mounts()
linux: Linux{
namespaces: create_default_namespaces()
masked_paths: [
'/proc/acpi',
'/proc/kcore',
'/proc/keys',
'/proc/latency_stats',
'/proc/timer_list',
'/proc/timer_stats',
'/proc/sched_debug',
'/proc/scsi',
'/sys/firmware'
]
readonly_paths: [
'/proc/asound',
'/proc/bus',
'/proc/fs',
'/proc/irq',
'/proc/sys',
'/proc/sysrq-trigger'
]
}
}
return spec
}
fn create_default_namespaces() []LinuxNamespace {
return [
LinuxNamespace{typ: 'pid'},
LinuxNamespace{typ: 'network'},
LinuxNamespace{typ: 'ipc'},
LinuxNamespace{typ: 'uts'},
LinuxNamespace{typ: 'mount'},
]
}
fn create_default_mounts() []Mount {
return [
Mount{
destination: '/proc'
typ: 'proc'
source: 'proc'
},
Mount{
destination: '/dev'
typ: 'tmpfs'
source: 'tmpfs'
options: ['nosuid', 'strictatime', 'mode=755', 'size=65536k']
},
Mount{
destination: '/sys'
typ: 'sysfs'
source: 'sysfs'
options: ['nosuid', 'noexec', 'nodev', 'ro']
},
]
}

View File

@@ -0,0 +1,867 @@
crun(1) General Commands Manual crun(1)
NAME
crun - a fast and lightweight OCI runtime
SYNOPSIS
crun [global options] command [command options] [arguments...]
DESCRIPTION
crun is a command line program for running Linux containers that follow
the Open Container Initiative (OCI) format.
COMMANDS
create Create a container. The runtime detaches from the container
process once the container environment is created. It is necessary to
successively use start for starting the container.
delete Remove definition for a container.
exec Exec a command in a running container.
list List known containers.
mounts add Add mounts while the container is running. It requires two
arguments: the container ID and a JSON file containing the mounts
section of the OCI config file. Each mount listed there is added to
the running container. The command is experimental and can be changed
without notice.
mounts remove Remove mounts while the container is running. It
requires two arguments: the container ID and a JSON file containing the
mounts section of the OCI config file. Only the destination attribute
for each mount is used. The command is experimental and can be changed
without notice.
kill Send the specified signal to the container init process. If no
signal is specified, SIGTERM is used.
ps Show the processes running in a container.
run Create and immediately start a container.
spec Generate a configuration file.
start Start a container that was previously created. A container
cannot be started multiple times.
state Output the state of a container.
pause Pause all the processes in the container.
resume Resume the processes in the container.
update Update container resource constraints.
checkpoint Checkpoint a running container using CRIU.
restore Restore a container from a checkpoint.
STATE
By default, when running as root user, crun saves its state under the
/run/crun directory. As unprivileged user, instead the XDG_RUNTIME_DIR
environment variable is honored, and the directory
$XDG_RUNTIME_DIR/crun is used. The global option --root overrides this
setting.
GLOBAL OPTIONS
--debug Produce verbose output.
--log=LOG-DESTINATION Define the destination for the error and warning
messages generated by crun. If the error happens late in the container
init process, when crun already stopped watching it, then it will be
printed to the container stderr.
It is specified in the form BACKEND:SPECIFIER.
These following backends are supported:
o file:PATH
o journald:IDENTIFIER
o syslog:IDENTIFIER
If no backend is specified, then file: is used by default.
--log-format=FORMAT Define the format of the log messages. It can
either be text, or json. The default is text.
--log-level=LEVEL Define the log level. It can either be debug,
warning or error. The default is error.
--no-pivot Use chroot(2) instead of pivot_root(2) when creating the
container. This option is not safe, and should be avoided.
--root=DIR Defines where to store the state for crun containers.
--systemd-cgroup Use systemd for configuring cgroups. If not
specified, the cgroup is created directly using the cgroupfs backend.
--cgroup-manager=MANAGER Specify what cgroup manager must be used.
Permitted values are cgroupfs, systemd and disabled.
-?, --help Print a help list.
--usage Print a short usage message.
-V, --version Print program version
CREATE OPTIONS
crun [global options] create [options] CONTAINER
--bundle=PATH Path to the OCI bundle, by default it is the current
directory.
--config=FILE Override the configuration file to use. The default
value is config.json.
--console-socket=SOCKET Path to a UNIX socket that will receive the
ptmx end of the tty for the container.
--no-new-keyring Keep the same session key
--preserve-fds=N Additional number of FDs to pass into the container.
--pid-file=PATH Path to the file that will contain the container
process PID.
RUN OPTIONS
crun [global options] run [options] CONTAINER
--bundle=BUNDLE Path to the OCI bundle, by default it is the current
directory.
--config=FILE Override the configuration file to use. The default
value is config.json.
--console-socket=SOCKET Path to a UNIX socket that will receive the
ptmx end of the tty for the container.
--no-new-keyring Keep the same session key.
--preserve-fds=N Additional number of FDs to pass into the container.
--pid-file=PATH Path to the file that will contain the container
process PID.
--detach Detach the container process from the current session.
DELETE OPTIONS
crun [global options] delete [options] CONTAINER
--force Delete the container even if it is still running.
--regex=REGEX Delete all the containers that satisfy the specified
regex.
EXEC OPTIONS
crun [global options] exec [options] CONTAINER CMD
--apparmor=PROFILE Set the apparmor profile for the process.
--console-socket=SOCKET Path to a UNIX socket that will receive the
ptmx end of the tty for the container.
--cwd=PATH Set the working directory for the process to PATH.
--cap=CAP Specify an additional capability to add to the process.
--detach Detach the container process from the current session.
--cgroup=PATH Specify a sub-cgroup path inside the container cgroup.
The path must already exist in the container cgroup.
--env=ENV Specify an environment variable.
--no-new-privs Set the no new privileges value for the process.
--preserve-fds=N Additional number of FDs to pass into the container.
--process=FILE Path to a file containing the process JSON
configuration.
--process-label=VALUE Set the asm process label for the process
commonly used with selinux.
--pid-file=PATH Path to the file that will contain the new process PID.
-t --tty Allocate a pseudo TTY.
**-u USERSPEC --user=USERSPEC Specify the user in the form UID[:GID].
LIST OPTIONS
crun [global options] list [options]
-q --quiet Show only the container ID.
KILL OPTIONS
crun [global options] kill [options] CONTAINER SIGNAL
--all Kill all the processes in the container.
--regex=REGEX Kill all the containers that satisfy the specified regex.
PS OPTIONS
crun [global options] ps [options]
--format=FORMAT Specify the output format. It must be either table or
json. By default table is used.
SPEC OPTIONS
crun [global options] spec [options]
-b DIR --bundle=DIR Path to the root of the bundle dir (default ".").
--rootless Generate a config.json file that is usable by an
unprivileged user.
UPDATE OPTIONS
crun [global options] update [options] CONTAINER
--blkio-weight=VALUE Specifies per cgroup weight.
--cpu-period=VALUE CPU CFS period to be used for hardcapping.
--cpu-quota=VALUE CPU CFS hardcap limit.
--cpu-rt-period=VALUE CPU realtime period to be used for hardcapping.
--cpu-rt-runtime=VALUE CPU realtime hardcap limit.
--cpu-share=VALUE CPU shares.
--cpuset-cpus=VALUE CPU(s) to use.
--cpuset-mems=VALUE Memory node(s) to use.
--kernel-memory=VALUE Kernel memory limit.
--kernel-memory-tcp=VALUE Kernel memory limit for TCP buffer.
--memory=VALUE Memory limit.
--memory-reservation=VALUE Memory reservation or soft_limit.
--memory-swap=VALUE Total memory usage.
--pids-limit=VALUE Maximum number of pids allowed in the container.
-r, --resources=FILE Path to the file containing the resources to
update.
CHECKPOINT OPTIONS
crun [global options] checkpoint [options] CONTAINER
--image-path=DIR Path for saving CRIU image files
--work-path=DIR Path for saving work files and logs
--leave-running Leave the process running after checkpointing
--tcp-established Allow open TCP connections
--ext-unix-sk Allow external UNIX sockets
--shell-job Allow shell jobs
--pre-dump Only checkpoint the container's memory without stopping the
container. It is not possible to restore a container from a pre-dump.
A pre-dump always needs a final checkpoint (without --pre-dump). It is
possible to make as many pre-dumps as necessary. For a second pre-dump
or for a final checkpoint it is necessary to use --parent-path to point
crun (and thus CRIU) to the pre-dump.
--parent-path=DIR Doing multiple pre-dumps or the final checkpoint
after one or multiple pre-dumps requires that crun (and thus CRIU)
knows the location of the pre-dump. It is important to use a relative
path from the actual checkpoint directory specified via --image-path.
It will fail if an absolute path is used.
--manage-cgroups-mode=MODE Specify which CRIU manage cgroup mode should
be used. Permitted values are soft, ignore, full or strict. Default is
soft.
RESTORE OPTIONS
crun [global options] restore [options] CONTAINER
-b DIR --bundle=DIR Container bundle directory (default ".")
--image-path=DIR Path for saving CRIU image files
--work-path=DIR Path for saving work files and logs
--tcp-established Allow open TCP connections
--ext-unix Allow external UNIX sockets
--shell-job Allow shell jobs
--detach Detach from the container's process
--pid-file=FILE Where to write the PID of the container
--manage-cgroups-mode=MODE Specify which CRIU manage cgroup mode should
be used. Permitted values are soft, ignore, full or strict. Default is
soft.
--lsm-profile=TYPE:NAME Specify an LSM profile to be used during
restore. TYPE can be either apparmor or selinux.
--lsm-mount-context=VALUE Specify a new LSM mount context to be used
during restore. This option replaces an existing mount context
information with the specified value. This is useful when restoring a
container into an existing Pod and selinux labels need to be changed
during restore.
Extensions to OCI
run.oci.mount_context_type=context
Set the mount context type on volumes mounted with SELinux labels.
Valid context types are:
context (default)
fscontext
defcontext
rootcontext
More information on how the context mount flags works see the mount(8)
man page.
run.oci.seccomp.receiver=PATH
If the annotation run.oci.seccomp.receiver=PATH is specified, the
seccomp listener is sent to the UNIX socket listening on the specified
path. It can also set with the RUN_OCI_SECCOMP_RECEIVER environment
variable. It is an experimental feature, and the annotation will be
removed once it is supported in the OCI runtime specs. It must be an
absolute path.
run.oci.seccomp.plugins=PATH
If the annotation run.oci.seccomp.plugins=PLUGIN1[:PLUGIN2]... is
specified, the seccomp listener fd is handled through the specified
plugins. The plugin must either be an absolute path or a file name
that is looked up by dlopen(3). More information on how the lookup is
performed are available on the ld.so(8) man page.
run.oci.seccomp_fail_unknown_syscall=1
If the annotation run.oci.seccomp_fail_unknown_syscall is present, then
crun will fail when an unknown syscall is encountered in the seccomp
configuration.
run.oci.seccomp_bpf_data=PATH
If the annotation run.oci.seccomp_bpf_data is present, then crun
ignores the seccomp section in the OCI configuration file and use the
specified data as the raw data to the seccomp(SECCOMP_SET_MODE_FILTER)
syscall. The data must be encoded in base64.
It is an experimental feature, and the annotation will be removed once
it is supported in the OCI runtime specs.
run.oci.keep_original_groups=1
If the annotation run.oci.keep_original_groups is present, then crun
will skip the setgroups syscall that is used to either set the
additional groups specified in the OCI configuration, or to reset the
list of additional groups if none is specified.
run.oci.pidfd_receiver=PATH
It is an experimental feature and will be removed once the feature is
in the OCI runtime specs.
If present, specify the path to the UNIX socket that will receive the
pidfd for the container process.
run.oci.systemd.force_cgroup_v1=/PATH
If the annotation run.oci.systemd.force_cgroup_v1=/PATH is present,
then crun will override the specified mount point /PATH with a cgroup
v1 mount made of a single hierarchy none,name=systemd. It is useful to
run on a cgroup v2 system containers using older versions of systemd
that lack support for cgroup v2.
Note: Your container host has to have the cgroup v1 mount already
present, otherwise this will not work. If you want to run the container
rootless, the user it runs under has to have permissions to this
mountpoint.
For example, as root:
mkdir /sys/fs/cgroup/systemd
mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
chown -R the_user.the_user /sys/fs/cgroup/systemd
run.oci.systemd.subgroup=SUBGROUP
Override the name for the systemd sub cgroup created under the systemd
scope, so the final cgroup will be like:
/sys/fs/cgroup/$PATH/$SUBGROUP
When it is set to the empty string, a sub cgroup is not created.
If not specified, it defaults to container on cgroup v2, and to "" on
cgroup v1.
e.g.
/sys/fs/cgroup//system.slice/foo-352700.scope/container
run.oci.delegate-cgroup=DELEGATED-CGROUP
If the run.oci.systemd.subgroup annotation is specified, yet another
sub-cgroup is created and the container process is moved here.
If a cgroup namespace is used, the cgroup namespace is created before
moving the container to the delegated cgroup.
/sys/fs/cgroup/$PATH/$SUBGROUP/$DELEGATED-CGROUP
The runtime doesn't apply any limit to the $DELEGATED-CGROUP sub-
cgroup, the runtime uses only $PATH/$SUBGROUP.
The container payload fully manages $DELEGATE-CGROUP, the limits
applied to $PATH/$SUBGROUP still applies to $DELEGATE-CGROUP.
Since cgroup delegation is not safe on cgroup v1, this option is
supported only on cgroup v2.
run.oci.hooks.stdout=FILE
If the annotation run.oci.hooks.stdout is present, then crun will open
the specified file and use it as the stdout for the hook processes.
The file is opened in append mode and it is created if it doesn't
already exist.
run.oci.hooks.stderr=FILE
If the annotation run.oci.hooks.stderr is present, then crun will open
the specified file and use it as the stderr for the hook processes.
The file is opened in append mode and it is created if it doesn't
already exist.
run.oci.handler=HANDLER
It is an experimental feature.
If specified, run the specified handler for execing the container. The
only supported values are krun and wasm.
o krun: When krun is specified, the libkrun.so shared object is loaded
and it is used to launch the container using libkrun.
o wasm: If specified, run the wasm handler for container. Allows
running wasm workload natively. Accepts a .wasm binary as input and
if .wat is provided it will be automatically compiled into a wasm
module. Stdout of wasm module is relayed back via crun.
tmpcopyup mount options
If the tmpcopyup option is specified for a tmpfs, then the path that is
shadowed by the tmpfs mount is recursively copied up to the tmpfs
itself.
copy-symlink mount options
If the copy-symlink option is specified, if the source of a bind mount
is a symlink, the symlink is recreated at the specified destination
instead of attempting a mount that would resolve the symlink itself.
If the destination already exists and it is not a symlink with the
expected content, crun will return an error.
dest-nofollow
When this option is specified for a bind mount, and the destination of
the bind mount is a symbolic link, crun will mount the symbolic link
itself at the target destination.
src-nofollow
When this option is specified for a bind mount, and the source of the
bind mount is a symbolic link, crun will use the symlink itself rather
than the file or directory the symbolic link points to.
r$FLAG mount options
If a r$FLAG mount option is specified then the flag $FLAG is set
recursively for each children mount.
These flags are supported:
o "rro"
o "rrw"
o "rsuid"
o "rnosuid"
o "rdev"
o "rnodev"
o "rexec"
o "rnoexec"
o "rsync"
o "rasync"
o "rdirsync"
o "rmand"
o "rnomand"
o "ratime"
o "rnoatime"
o "rdiratime"
o "rnodiratime"
o "rrelatime"
o "rnorelatime"
o "rstrictatime"
o "rnostrictatime"
idmap mount options
If the idmap option is specified then the mount is ID mapped using the
container target user namespace. This is an experimental feature and
can change at any time without notice.
The idmap option supports a custom mapping that can be different than
the user namespace used by the container.
The mapping can be specified after the idmap option like:
idmap=uids=0-1-10#10-11-10;gids=0-100-10.
For each triplet, the first value is the start of the backing file
system IDs that are mapped to the second value on the host. The length
of this mapping is given in the third value.
Multiple ranges are separated with #.
These values are written to the /proc/$PID/uid_map and
/proc/$PID/gid_map files to create the user namespace for the idmapped
mount.
The only two options that are currently supported after idmap are uids
and gids.
When a custom mapping is specified, a new user namespace is created for
the idmapped mount.
If no option is specified, then the container user namespace is used.
If the specified mapping is prepended with a '@' then the mapping is
considered relative to the container user namespace. The host ID for
the mapping is changed to account for the relative position of the
container user in the container user namespace.
For example, the mapping: uids=@1-3-10, given a configuration like
"uidMappings": [
{
"containerID": 0,
"hostID": 0,
"size": 1
},
{
"containerID": 1,
"hostID": 2,
"size": 1000
}
]
will be converted to the absolute value uids=1-4-10, where 4 is
calculated by adding 3 (container ID in the uids= mapping) and 1
(hostID - containerID for the user namespace mapping where containerID
= 1 is found).
The current implementation doesn't take into account multiple user
namespace ranges, so it is the caller's responsibility to split a
mapping if it overlaps multiple ranges in the user namespace. In such
a case, there won't be any error reported.
Automatically create user namespace
When running as user different than root, an user namespace is
automatically created even if it is not specified in the config file.
The current user is mapped to the ID 0 in the container, and any
additional id specified in the files /etc/subuid and /etc/subgid is
automatically added starting with ID 1.
CGROUP v1
Support for cgroup v1 is deprecated and will be removed in a future
release.
CGROUP v2
Note: cgroup v2 does not yet support control of realtime processes and
the cpu controller can only be enabled when all RT processes are in the
root cgroup. This will make crun fail while running alongside RT
processes.
If the cgroup configuration found is for cgroup v1, crun attempts a
conversion when running on a cgroup v2 system.
These are the OCI resources currently supported with cgroup v2 and how
they are converted when needed from the cgroup v1 configuration.
Memory controller
+------------+--------------------+----------------------+------------------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+------------+--------------------+----------------------+------------------+
|limit | memory.max | y = x | |
+------------+--------------------+----------------------+------------------+
|swap | memory.swap.max | y = x - memory_limit | the swap limit |
| | | | on cgroup v1 |
| | | | includes the |
| | | | memory usage too |
+------------+--------------------+----------------------+------------------+
|reservation | memory.low | y = x | |
+------------+--------------------+----------------------+------------------+
PIDs controller
+--------+--------------------+------------+---------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+--------+--------------------+------------+---------+
|limit | pids.max | y = x | |
+--------+--------------------+------------+---------+
CPU controller
+--------+--------------------+------------------+------------------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+--------+--------------------+------------------+------------------+
|shares | cpu.weight | y=10^((log2(x)^2 | |
| | | + 125 * log2(x)) | |
| | | / 612.0 - 7.0 / | |
| | | 34.0) | |
+--------+--------------------+------------------+------------------+
| | convert from | | |
| | [2-262144] to | | |
| | [1-10000] | | |
+--------+--------------------+------------------+------------------+
|period | cpu.max | y = x | period and quota |
| | | | are written |
| | | | together |
+--------+--------------------+------------------+------------------+
|quota | cpu.max | y = x | period and quota |
| | | | are written |
| | | | together |
+--------+--------------------+------------------+------------------+
blkio controller
+--------------+----------------------+-------------------------+------------------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+--------------+----------------------+-------------------------+------------------+
|weight | io.bfq.weight | y = x | |
+--------------+----------------------+-------------------------+------------------+
|weight_device | io.bfq.weight | y = x | |
+--------------+----------------------+-------------------------+------------------+
|weight | io.weight (fallback) | y = 1 + (x-10)*9999/990 | convert linearly |
| | | | from [10-1000] |
| | | | to [1-10000] |
+--------------+----------------------+-------------------------+------------------+
|weight_device | io.weight (fallback) | y = 1 + (x-10)*9999/990 | convert linearly |
| | | | from [10-1000] |
| | | | to [1-10000] |
+--------------+----------------------+-------------------------+------------------+
|rbps | io.max | y=x | |
+--------------+----------------------+-------------------------+------------------+
|wbps | io.max | y=x | |
+--------------+----------------------+-------------------------+------------------+
|riops | io.max | y=x | |
+--------------+----------------------+-------------------------+------------------+
|wiops | io.max | y=x | |
+--------------+----------------------+-------------------------+------------------+
cpuset controller
+--------+--------------------+------------+---------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+--------+--------------------+------------+---------+
|cpus | cpuset.cpus | y = x | |
+--------+--------------------+------------+---------+
|mems | cpuset.mems | y = x | |
+--------+--------------------+------------+---------+
hugetlb controller
+----------------+--------------------+------------+---------+
|OCI (x) | cgroup 2 value (y) | conversion | comment |
+----------------+--------------------+------------+---------+
|.limit_in_bytes | hugetlb..max | y = x | |
+----------------+--------------------+------------+---------+
User Commands crun(1)

238
lib/virt/crun/model.v Normal file
View File

@@ -0,0 +1,238 @@
module crun
// OCI Runtime Spec structures that can be directly encoded to JSON
pub struct Spec {
pub mut:
oci_version string
platform Platform
process Process
root Root
hostname string
mounts []Mount
linux Linux
hooks Hooks
}
pub struct Platform {
pub mut:
os string = 'linux'
arch string = 'amd64'
}
pub struct Process {
pub mut:
terminal bool = true
user User
args []string
env []string
cwd string = '/'
capabilities Capabilities
rlimits []Rlimit
no_new_privileges bool
}
pub struct User {
pub mut:
uid u32
gid u32
additional_gids []u32
}
pub struct Capabilities {
pub mut:
bounding []string
effective []string
inheritable []string
permitted []string
ambient []string
}
pub struct Rlimit {
pub mut:
typ string
hard u64
soft u64
}
pub struct Root {
pub mut:
path string
readonly bool
}
pub struct Mount {
pub mut:
destination string
typ string
source string
options []string
}
pub struct Linux {
pub mut:
namespaces []LinuxNamespace
resources LinuxResources
devices []LinuxDevice
masked_paths []string
readonly_paths []string
uid_mappings []LinuxIDMapping
gid_mappings []LinuxIDMapping
}
pub struct LinuxNamespace {
pub mut:
typ string
path string
}
pub struct LinuxResources {
pub mut:
memory Memory
cpu CPU
pids Pids
blkio BlockIO
}
pub struct Memory {
pub mut:
limit u64
reservation u64
swap u64
kernel u64
swappiness i64
}
pub struct CPU {
pub mut:
shares u64
quota i64
period u64
cpus string
mems string
}
pub struct Pids {
pub mut:
limit i64
}
pub struct BlockIO {
pub mut:
weight u16
}
pub struct LinuxDevice {
pub mut:
path string
typ string
major i64
minor i64
file_mode u32
uid u32
gid u32
}
pub struct LinuxIDMapping {
pub mut:
container_id u32
host_id u32
size u32
}
pub struct Hooks {
pub mut:
prestart []Hook
poststart []Hook
poststop []Hook
}
pub struct Hook {
pub mut:
path string
args []string
env []string
}
// Enums for type safety but convert to strings
pub enum MountType {
bind
tmpfs
proc
sysfs
devpts
nfs
overlay
}
pub enum MountOption {
rw
ro
noexec
nosuid
nodev
rbind
relatime
strictatime
mode
size
}
pub enum Capability {
cap_chown
cap_dac_override
cap_dac_read_search
cap_fowner
cap_fsetid
cap_kill
cap_setgid
cap_setuid
cap_setpcap
cap_linux_immutable
cap_net_bind_service
cap_net_broadcast
cap_net_admin
cap_net_raw
cap_ipc_lock
cap_ipc_owner
cap_sys_module
cap_sys_rawio
cap_sys_chroot
cap_sys_ptrace
cap_sys_pacct
cap_sys_admin
cap_sys_boot
cap_sys_nice
cap_sys_resource
cap_sys_time
cap_sys_tty_config
cap_mknod
cap_lease
cap_audit_write
cap_audit_control
cap_setfcap
cap_mac_override
cap_mac_admin
cap_syslog
cap_wake_alarm
cap_block_suspend
cap_audit_read
}
pub enum RlimitType {
rlimit_cpu
rlimit_fsize
rlimit_data
rlimit_stack
rlimit_core
rlimit_rss
rlimit_nproc
rlimit_nofile
rlimit_memlock
rlimit_as
rlimit_lock
rlimit_sigpending
rlimit_msgqueue
rlimit_nice
rlimit_rtprio
rlimit_rttime
}

4
lib/virt/crun/readme.md Normal file
View File

@@ -0,0 +1,4 @@
specs on https://github.com/opencontainers/runtime-spec

40
lib/virt/crun/tojson.v Normal file
View File

@@ -0,0 +1,40 @@
module crun
import json
import freeflowuniverse.herolib.core.pathlib
// Simple JSON generation using V's built-in json module
pub fn (config CrunConfig) to_json() !string {
return json.encode_pretty(config.spec)
}
// Convenience method to save JSON to file
pub fn (config CrunConfig) save_to_file(path string) ! {
json_content := config.to_json()!
mut file := pathlib.get_file(path: path, create: true)!
file.write(json_content)!
}
// Validate the configuration
pub fn (config CrunConfig) validate() ! {
if config.spec.oci_version == '' {
return error('ociVersion cannot be empty')
}
if config.spec.process.args.len == 0 {
return error('process.args cannot be empty')
}
if config.spec.root.path == '' {
return error('root.path cannot be empty')
}
// Validate that required capabilities are present
required_caps := ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
for cap in required_caps {
if cap !in config.spec.process.capabilities.bounding {
return error('missing required capability: ${cap}')
}
}
}

View File

@@ -0,0 +1,121 @@
{
"ociVersion": "1.0.2",
"process": {
"terminal": true,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"/bin/sh",
"-c",
"while true; do sleep 30; done"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "${rootfs_path}",
"readonly": false
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
],
"linux": {
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

View File

@@ -0,0 +1,244 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux
import freeflowuniverse.herolib.osal.core as osal
import time
import freeflowuniverse.herolib.builder
import json
pub struct Container {
pub mut:
name string
node ?&builder.Node
tmux_pane ?&tmux.Pane
factory &ContainerFactory
}
// Struct to parse JSON output of `crun state`
struct CrunState {
id string
status string
pid int
bundle string
created string
}
pub fn (mut self Container) start() ! {
// Check if container exists in crun
container_exists := self.container_exists_in_crun()!
if !container_exists {
// Container doesn't exist, create it first
console.print_debug('Container ${self.name} does not exist, creating it...')
osal.exec(
cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
stdout: true
)!
console.print_debug('Container ${self.name} created')
}
status := self.status()!
if status == .running {
console.print_debug('Container ${self.name} is already running')
return
}
// If container exists but is stopped, we need to delete and recreate it
// because crun doesn't allow restarting a stopped container
if container_exists && status != .running {
console.print_debug('Container ${self.name} exists but is stopped, recreating...')
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
osal.exec(
cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
stdout: true
)!
console.print_debug('Container ${self.name} recreated')
}
// start the container (crun start doesn't have --detach flag)
osal.exec(cmd: 'crun start ${self.name}', stdout: true)!
console.print_green('Container ${self.name} started')
}
pub fn (mut self Container) stop() ! {
status := self.status()!
if status == .stopped {
console.print_debug('Container ${self.name} is already stopped')
return
}
osal.exec(cmd: 'crun kill ${self.name} SIGTERM', stdout: false) or {}
time.sleep(2 * time.second)
// Force kill if still running
if self.status()! == .running {
osal.exec(cmd: 'crun kill ${self.name} SIGKILL', stdout: false) or {}
}
console.print_green('Container ${self.name} stopped')
}
pub fn (mut self Container) delete() ! {
// Check if container exists before trying to delete
if !self.container_exists_in_crun()! {
console.print_debug('Container ${self.name} does not exist, nothing to delete')
return
}
self.stop()!
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
// Remove from factory's container cache
if self.name in self.factory.containers {
self.factory.containers.delete(self.name)
}
console.print_green('Container ${self.name} deleted')
}
// Execute command inside the container
pub fn (mut self Container) exec(cmd_ osal.Command) !string {
// Ensure container is running
if self.status()! != .running {
self.start()!
}
// Use the builder node to execute inside container
mut node := self.node()!
console.print_debug('Executing command in container ${self.name}: ${cmd_.cmd}')
return node.exec(cmd: cmd_.cmd, stdout: cmd_.stdout)
}
pub fn (self Container) status() !ContainerStatus {
result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return .unknown }
// Parse JSON output from crun state
state := json.decode(CrunState, result.output) or { return .unknown }
return match state.status {
'running' { .running }
'stopped' { .stopped }
'paused' { .paused }
else { .unknown }
}
}
// Check if container exists in crun (regardless of its state)
fn (self Container) container_exists_in_crun() !bool {
// Try to get container state - if it fails, container doesn't exist
result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return false }
// If we get here, the container exists (even if stopped/paused)
return result.exit_code == 0
}
pub enum ContainerStatus {
running
stopped
paused
unknown
}
// Get CPU usage in percentage
pub fn (self Container) cpu_usage() !f64 {
// Use cgroup stats to get CPU usage
result := osal.exec(
cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/cpu.stat'
stdout: false
) or { return 0.0 }
for line in result.output.split_into_lines() {
if line.starts_with('usage_usec') {
usage := line.split(' ')[1].f64()
return usage / 1000000.0 // Convert to percentage
}
}
return 0.0
}
// Get memory usage in MB
pub fn (self Container) mem_usage() !f64 {
result := osal.exec(
cmd: 'cat /sys/fs/cgroup/system.slice/crun-${self.name}.scope/memory.current'
stdout: false
) or { return 0.0 }
bytes := result.output.trim_space().f64()
return bytes / (1024 * 1024) // Convert to MB
}
pub struct TmuxPaneArgs {
pub mut:
window_name string
pane_nr int
pane_name string // optional
cmd string // optional, will execute this cmd
reset bool // if true will reset everything and restart a cmd
env map[string]string // optional, will set these env vars in the pane
}
pub fn (mut self Container) tmux_pane(args TmuxPaneArgs) !&tmux.Pane {
mut t := tmux.new()!
session_name := 'herorun'
mut session := if t.session_exist(session_name) {
t.session_get(session_name)!
} else {
t.session_create(name: session_name)!
}
// Get or create window
mut window := session.window_get(name: args.window_name) or {
session.window_new(name: args.window_name)!
}
// Get existing pane by number, or create a new one
mut pane := window.pane_get(args.pane_nr) or { window.pane_new()! }
if args.reset {
pane.clear()!
}
// Set environment variables if provided
for key, value in args.env {
pane.send_keys('export ${key}="${value}"')!
}
// Execute command if provided
if args.cmd != '' {
pane.send_keys('crun exec ${self.name} ${args.cmd}')!
}
self.tmux_pane = pane
return pane
}
pub fn (mut self Container) node() !&builder.Node {
// If node already initialized, return it
if self.node != none {
return self.node
}
mut b := builder.new()!
mut exec := builder.ExecutorCrun{
container_id: self.name
debug: false
}
exec.init() or {
return error('Failed to init ExecutorCrun for container ${self.name}: ${err}')
}
// Create node using the factory method, then override the executor
mut node := b.node_new(name: 'container_${self.name}', ipaddr: 'localhost')!
node.executor = exec
node.platform = .alpine
node.cputype = .intel
node.done = map[string]string{}
node.environment = map[string]string{}
node.hostname = self.name
self.node = node
return node
}

View File

@@ -0,0 +1,149 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.installers.virt.herorunner as herorunner_installer
import os
import x.json2
// Updated enum to be more flexible
pub enum ContainerImageType {
alpine_3_20
ubuntu_24_04
ubuntu_25_04
custom // For custom images downloaded via podman
}
@[params]
pub struct ContainerNewArgs {
pub:
name string @[required]
image ContainerImageType = .alpine_3_20
custom_image_name string // Used when image = .custom
docker_url string // Docker image URL for new images
reset bool
}
pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
if args.name in self.containers && !args.reset {
return self.containers[args.name]
}
// Determine image to use
mut image_name := ''
mut rootfs_path := ''
match args.image {
.alpine_3_20 {
image_name = 'alpine'
rootfs_path = '${self.base_dir}/images/alpine/rootfs'
}
.ubuntu_24_04 {
image_name = 'ubuntu_24_04'
rootfs_path = '${self.base_dir}/images/ubuntu/24.04/rootfs'
}
.ubuntu_25_04 {
image_name = 'ubuntu_25_04'
rootfs_path = '${self.base_dir}/images/ubuntu/25.04/rootfs'
}
.custom {
if args.custom_image_name == '' {
return error('custom_image_name is required when using custom image type')
}
image_name = args.custom_image_name
rootfs_path = '${self.base_dir}/images/${image_name}/rootfs'
// If image not yet extracted, pull and unpack it
if !os.is_dir(rootfs_path) && args.docker_url != '' {
console.print_debug('Pulling image ${args.docker_url} with podman...')
self.podman_pull_and_export(args.docker_url, image_name, rootfs_path)!
}
}
}
// Verify rootfs exists
if !os.is_dir(rootfs_path) {
return error('Image rootfs not found: ${rootfs_path}. Please ensure the image is available.')
}
// Create container config (with terminal disabled) but don't create the container yet
self.create_container_config(args.name, rootfs_path)!
// Ensure crun is installed on host
if !osal.cmd_exists('crun') {
mut herorunner := herorunner_installer.new()!
herorunner.install()!
}
// Create container struct but don't create the actual container in crun yet
// The actual container creation will happen in container.start()
mut container := &Container{
name: args.name
factory: &self
}
self.containers[args.name] = container
return container
}
// Create OCI config.json from template
fn (self ContainerFactory) create_container_config(container_name string, rootfs_path string) ! {
config_dir := '${self.base_dir}/configs/${container_name}'
osal.exec(cmd: 'mkdir -p ${config_dir}', stdout: false)!
// Load template
mut config_content := $tmpl('config_template.json')
// Parse JSON with json2
mut root := json2.raw_decode(config_content)!
mut config := root.as_map()
// Get or create process map
mut process := if 'process' in config {
config['process'].as_map()
} else {
map[string]json2.Any{}
}
// Force disable terminal
process['terminal'] = json2.Any(false)
config['process'] = json2.Any(process)
// Write back to config.json
config_path := '${config_dir}/config.json'
mut p := pathlib.get_file(path: config_path, create: true)!
p.write(json2.encode_pretty(json2.Any(config)))!
}
// Use podman to pull image and extract rootfs
fn (self ContainerFactory) podman_pull_and_export(docker_url string, image_name string, rootfs_path string) ! {
// Pull image
osal.exec(
cmd: 'podman pull ${docker_url}'
stdout: true
)!
// Create temp container
temp_name := 'tmp_${image_name}_${os.getpid()}'
osal.exec(
cmd: 'podman create --name ${temp_name} ${docker_url}'
stdout: true
)!
// Export container filesystem
osal.exec(
cmd: 'mkdir -p ${rootfs_path}'
stdout: false
)!
osal.exec(
cmd: 'podman export ${temp_name} | tar -C ${rootfs_path} -xf -'
stdout: true
)!
// Cleanup temp container
osal.exec(
cmd: 'podman rm ${temp_name}'
stdout: false
)!
}

View File

@@ -0,0 +1,295 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.core.texttools
import os
import json
@[heap]
pub struct ContainerImage {
pub mut:
image_name string @[required] // image is located in ${self.factory.base_dir}/images/<image_name>/rootfs
docker_url string // optional docker image URL
rootfs_path string // path to the extracted rootfs
size_mb f64 // size in MB
created_at string // creation timestamp
factory &ContainerFactory @[skip; str: skip]
}
@[params]
pub struct ContainerImageArgs {
pub mut:
image_name string @[required] // image is located in ${self.factory.base_dir}/images/<image_name>/rootfs
docker_url string // docker image URL like "alpine:3.20" or "ubuntu:24.04"
reset bool
}
@[params]
pub struct ImageExportArgs {
pub mut:
dest_path string @[required] // destination .tgz file path
compress_level int = 6 // compression level 1-9
}
@[params]
pub struct ImageImportArgs {
pub mut:
source_path string @[required] // source .tgz file path
reset bool // overwrite if exists
}
// Create new image or get existing
pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&ContainerImage {
mut image_name := texttools.name_fix(args.image_name)
rootfs_path := '${self.base_dir}/images/${image_name}/rootfs'
// Check if image already exists
if image_name in self.images && !args.reset {
return self.images[image_name] or { panic('bug') }
}
// Ensure podman is installed
if !osal.cmd_exists('podman') {
return error('Podman is required for image management. Please install podman first.')
}
mut image := &ContainerImage{
image_name: image_name
docker_url: args.docker_url
rootfs_path: rootfs_path
factory: &self
}
// If docker_url is provided, download and extract the image
if args.docker_url != '' {
image.download_from_docker(args.docker_url, args.reset)!
} else {
// Check if rootfs directory exists
if !os.is_dir(rootfs_path) {
return error('Image rootfs not found at ${rootfs_path} and no docker_url provided')
}
}
// Update image metadata
image.update_metadata()!
self.images[image_name] = image
return image
}
// Download image from docker registry using podman
fn (mut self ContainerImage) download_from_docker(docker_url string, reset bool) ! {
console.print_header('Downloading image: ${docker_url}')
// Clean image name for local storage
image_dir := '${self.factory.base_dir}/images/${self.image_name}'
// Remove existing if reset is true
if reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create image directory
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Pull image using podman
console.print_debug('Pulling image: ${docker_url}')
osal.exec(cmd: 'podman pull ${docker_url}', stdout: true)!
// Create container from image (without running it)
temp_container := 'temp_${self.image_name}_extract'
osal.exec(cmd: 'podman create --name ${temp_container} ${docker_url}', stdout: false)!
// Export container filesystem
tar_file := '${image_dir}/rootfs.tar'
osal.exec(cmd: 'podman export ${temp_container} -o ${tar_file}', stdout: true)!
// Extract to rootfs directory
osal.exec(cmd: 'mkdir -p ${self.rootfs_path}', stdout: false)!
osal.exec(cmd: 'tar -xf ${tar_file} -C ${self.rootfs_path}', stdout: true)!
// Clean up temporary container and tar file
osal.exec(cmd: 'podman rm ${temp_container}', stdout: false) or {}
osal.exec(cmd: 'rm -f ${tar_file}', stdout: false) or {}
// Remove the pulled image from podman to save space (optional)
osal.exec(cmd: 'podman rmi ${docker_url}', stdout: false) or {}
console.print_green('Image ${docker_url} extracted to ${self.rootfs_path}')
}
// Update image metadata (size, creation time, etc.)
fn (mut self ContainerImage) update_metadata() ! {
if !os.is_dir(self.rootfs_path) {
return error('Rootfs path does not exist: ${self.rootfs_path}')
}
// Calculate size
result := osal.exec(cmd: 'du -sm ${self.rootfs_path}', stdout: false)!
result_parts := result.output.split_by_space()[0] or { panic('bug') }
size_str := result_parts.trim_space()
self.size_mb = size_str.f64()
// Get creation time
info := os.stat(self.rootfs_path) or { return error('stat failed: ${err}') }
self.created_at = info.ctime.str() // or mtime.str(), depending on what you want
}
// List all available images
pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
mut images := []&ContainerImage{}
images_base_dir := '${self.base_dir}/images'
if !os.is_dir(images_base_dir) {
return images
}
// Scan for image directories
dirs := os.ls(images_base_dir)!
for dir in dirs {
full_path := '${images_base_dir}/${dir}'
if os.is_dir(full_path) {
rootfs_path := '${full_path}/rootfs'
if os.is_dir(rootfs_path) {
// Create image object if not in cache
if dir !in self.images {
mut image := &ContainerImage{
image_name: dir
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata() or {
console.print_stderr('Failed to update metadata for image ${dir}: ${err}')
continue
}
self.images[dir] = image
}
images << self.images[dir] or { panic('bug') }
}
}
}
return images
}
// Export image to .tgz file
pub fn (mut self ContainerImage) export(args ImageExportArgs) ! {
if !os.is_dir(self.rootfs_path) {
return error('Image rootfs not found: ${self.rootfs_path}')
}
console.print_header('Exporting image ${self.image_name} to ${args.dest_path}')
// Ensure destination directory exists
dest_dir := os.dir(args.dest_path)
osal.exec(cmd: 'mkdir -p ${dest_dir}', stdout: false)!
// Create compressed archive
cmd := 'tar -czf ${args.dest_path} -C ${os.dir(self.rootfs_path)} ${os.base(self.rootfs_path)}'
osal.exec(cmd: cmd, stdout: true)!
console.print_green('Image exported successfully to ${args.dest_path}')
}
// Import image from .tgz file
pub fn (mut self ContainerFactory) image_import(args ImageImportArgs) !&ContainerImage {
if !os.exists(args.source_path) {
return error('Source file not found: ${args.source_path}')
}
// Extract image name from filename
filename := os.base(args.source_path)
image_name := filename.replace('.tgz', '').replace('.tar.gz', '')
image_name_clean := texttools.name_fix(image_name)
console.print_header('Importing image from ${args.source_path}')
image_dir := '${self.base_dir}/images/${image_name_clean}'
rootfs_path := '${image_dir}/rootfs'
// Check if image already exists
if os.is_dir(rootfs_path) && !args.reset {
return error('Image ${image_name_clean} already exists. Use reset=true to overwrite.')
}
// Remove existing if reset
if args.reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create directories
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Extract archive
osal.exec(cmd: 'tar -xzf ${args.source_path} -C ${image_dir}', stdout: true)!
// Create image object
mut image := &ContainerImage{
image_name: image_name_clean
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata()!
self.images[image_name_clean] = image
console.print_green('Image imported successfully: ${image_name_clean}')
return image
}
// Delete image
pub fn (mut self ContainerImage) delete() ! {
console.print_header('Deleting image: ${self.image_name}')
image_dir := os.dir(self.rootfs_path)
if os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: true)!
}
// Remove from factory cache
if self.image_name in self.factory.images {
self.factory.images.delete(self.image_name)
}
console.print_green('Image ${self.image_name} deleted successfully')
}
// Get image info as map
pub fn (self ContainerImage) info() map[string]string {
return {
'name': self.image_name
'docker_url': self.docker_url
'rootfs_path': self.rootfs_path
'size_mb': self.size_mb.str()
'created_at': self.created_at
}
}
// List available docker images that can be downloaded
pub fn list_available_docker_images() []string {
return [
'alpine:3.20',
'alpine:3.19',
'alpine:latest',
'ubuntu:24.04',
'ubuntu:22.04',
'ubuntu:20.04',
'ubuntu:latest',
'debian:12',
'debian:11',
'debian:latest',
'fedora:39',
'fedora:38',
'fedora:latest',
'archlinux:latest',
'centos:stream9',
'rockylinux:9',
'nginx:alpine',
'redis:alpine',
'postgres:15-alpine',
'node:20-alpine',
'python:3.12-alpine',
]
}

138
lib/virt/heropods/factory.v Normal file
View File

@@ -0,0 +1,138 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import time
import os
@[heap]
pub struct ContainerFactory {
pub mut:
tmux_session string
containers map[string]&Container
images map[string]&ContainerImage
base_dir string
}
@[params]
pub struct FactoryInitArgs {
pub:
reset bool
use_podman bool = true
}
pub fn new(args FactoryInitArgs) !ContainerFactory {
mut f := ContainerFactory{}
f.init(args)!
return f
}
fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
// Ensure base directories exist
self.base_dir = os.getenv_opt('CONTAINERS_DIR') or { os.home_dir() + '/.containers' }
osal.exec(
cmd: 'mkdir -p ${self.base_dir}/images ${self.base_dir}/configs ${self.base_dir}/runtime'
stdout: false
)!
if args.use_podman {
if !osal.cmd_exists('podman') {
console.print_stderr('Warning: podman not found. Install podman for better image management.')
console.print_debug('Install with: apt install podman (Ubuntu) or brew install podman (macOS)')
} else {
console.print_debug('Using podman for image management')
}
}
// Load existing images into cache
self.load_existing_images()!
// Setup default images if not using podman
if !args.use_podman {
self.setup_default_images(args.reset)!
}
}
fn (mut self ContainerFactory) setup_default_images(reset bool) ! {
console.print_header('Setting up default images...')
default_images := [ContainerImageType.alpine_3_20, .ubuntu_24_04, .ubuntu_25_04]
for img in default_images {
mut args := ContainerImageArgs{
image_name: img.str()
reset: reset
}
if img.str() !in self.images || reset {
console.print_debug('Preparing default image: ${img.str()}')
_ = self.image_new(args)!
}
}
}
// Load existing images from filesystem into cache
fn (mut self ContainerFactory) load_existing_images() ! {
images_base_dir := '${self.base_dir}/containers/images'
if !os.is_dir(images_base_dir) {
return
}
dirs := os.ls(images_base_dir) or { return }
for dir in dirs {
full_path := '${images_base_dir}/${dir}'
if os.is_dir(full_path) {
rootfs_path := '${full_path}/rootfs'
if os.is_dir(rootfs_path) {
mut image := &ContainerImage{
image_name: dir
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata() or {
console.print_stderr(' Failed to update metadata for image ${dir}: ${err}')
continue
}
self.images[dir] = image
console.print_debug('Loaded existing image: ${dir}')
}
}
}
}
pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
if args.name !in self.containers {
return error('Container "${args.name}" does not exist. Use factory.new() to create it first.')
}
return self.containers[args.name]
}
// Get image by name
pub fn (mut self ContainerFactory) image_get(name string) !&ContainerImage {
if name !in self.images {
return error('Image "${name}" not found in cache. Try importing or downloading it.')
}
return self.images[name]
}
// List all containers currently managed by crun
pub fn (self ContainerFactory) list() ![]Container {
mut containers := []Container{}
result := osal.exec(cmd: 'crun list --format json', stdout: false)!
// Parse crun list output (tab-separated)
lines := result.output.split_into_lines()
for line in lines {
if line.trim_space() == '' || line.starts_with('ID') {
continue
}
parts := line.split('\t')
if parts.len > 0 {
containers << Container{
name: parts[0]
factory: &self
}
}
}
return containers
}

View File

@@ -0,0 +1,5 @@
- use builder... for remote execution inside the container
- make an executor like we have for SSH but then for the container, so we can use this to execute commands inside the container
-

View File

View File

@@ -0,0 +1,119 @@
{
"ociVersion": "1.0.2",
"process": {
"terminal": true,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"/bin/sh"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "${rootfs_path}",
"readonly": false
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
],
"linux": {
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux

View File

@@ -1,8 +1,7 @@
module herorun
module herorun2
import freeflowuniverse.herolib.osal.tmux
import freeflowuniverse.herolib.osal.sshagent
import freeflowuniverse.herolib.virt.hetznermanager
import freeflowuniverse.herolib.osal.core as osal
import time
import os
@@ -18,7 +17,6 @@ pub mut:
session_name string
window_name string
agent sshagent.SSHAgent
hetzner &hetznermanager.HetznerManager
}
@[params]
@@ -218,7 +216,7 @@ fn (mut e Executor) create_container() ! {
}
}
setup_cmd=texttools.dedent(setup_cmd)
setup_cmd = texttools.dedent(setup_cmd)
remote_cmd := 'ssh ${e.node.settings.user}@${e.node.settings.node_ip} "${setup_cmd}"'
osal.exec(cmd: remote_cmd, stdout: false, name: 'container_create')!

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
import freeflowuniverse.herolib.ui.console

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
import os
import freeflowuniverse.herolib.ui.console

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
import freeflowuniverse.herolib.osal.core as osal

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
// Base image types for containers
pub enum BaseImage {

View File

@@ -1,4 +1,4 @@
module herorun
module herorun2
import freeflowuniverse.herolib.osal.sshagent

View File

@@ -1,68 +0,0 @@
module runc
fn example() {
root := Root{
path: '/rootfs'
readonly: true
}
process := Process{
terminal: true
user: User{
uid: 0
gid: 0
additional_gids: [u32(0)]
}
args: ['/bin/bash']
env: ['PATH=/usr/bin']
cwd: '/'
capabilities: Capabilities{
bounding: [Capability.cap_chown, Capability.cap_dac_override]
effective: [Capability.cap_chown]
inheritable: []
permitted: [Capability.cap_chown]
ambient: []
}
rlimits: [
Rlimit{
typ: .rlimit_nofile
hard: 1024
soft: 1024
},
]
}
linux := Linux{
namespaces: [
LinuxNamespace{
typ: 'pid'
path: ''
},
]
resources: LinuxResource{
blkio_weight: 1000
cpu_period: 100000
cpu_quota: 50000
cpu_shares: 1024
devices: []
memory_limit: 1024 * 1024 * 1024 // 1GB
}
devices: []
}
spec := Spec{
version: '1.0.0'
platform: Platform{
os: .linux
arch: .amd64
}
process: process
root: root
hostname: 'my-container'
mounts: []
linux: linux
hooks: Hooks{}
}
println(spec)
}

View File

@@ -1,221 +0,0 @@
module runc
struct LinuxNamespace {
typ string
path string
}
struct LinuxIDMapping {
container_id u32
host_id u32
size u32
}
struct LinuxResource {
blkio_weight u16
blkio_weight_device []string
blkio_throttle_read_bps_device []string
blkio_throttle_write_bps_device []string
blkio_throttle_read_iops_device []string
blkio_throttle_write_iops_device []string
cpu_period u64
cpu_quota i64
cpu_shares u64
cpuset_cpus string
cpuset_mems string
devices []string
memory_limit u64
memory_reservation u64
memory_swap_limit u64
memory_kernel_limit u64
memory_swappiness i64
pids_limit i64
}
struct LinuxDevice {
typ string
major int
minor int
permissions string
file_mode u32
uid u32
gid u32
}
struct Hooks {
prestart []string
poststart []string
poststop []string
}
// see https://github.com/opencontainers/runtime-spec/blob/main/config.md#process
struct Process {
terminal bool
user User
args []string
env []string // do as dict
cwd string
capabilities Capabilities
rlimits []Rlimit
}
// Enum for Rlimit types
enum RlimitType {
rlimit_cpu
rlimit_fsize
rlimit_data
rlimit_stack
rlimit_core
rlimit_rss
rlimit_nproc
rlimit_nofile
rlimit_memlock
rlimit_as
rlimit_lock
rlimit_sigpending
rlimit_msgqueue
rlimit_nice
rlimit_rtprio
rlimit_rttime
}
// Struct for Rlimit using enumerator
struct Rlimit {
typ RlimitType
hard u64
soft u64
}
struct User {
uid u32
gid u32
additional_gids []u32
}
struct Root {
path string
readonly bool
}
struct Linux {
namespaces []LinuxNamespace
resources LinuxResource
devices []LinuxDevice
}
struct Spec {
version string
platform Platform
process Process
root Root
hostname string
mounts []Mount
linux Linux
hooks Hooks
}
// Enum for supported operating systems
enum OSType {
linux
windows
darwin
solaris
// Add other OS types as needed
}
// Enum for supported architectures
enum ArchType {
amd64
arm64
arm
ppc64
s390x
// Add other architectures as needed
}
// Struct for Platform using enums
struct Platform {
os OSType
arch ArchType
}
// Enum for mount types
enum MountType {
bind
tmpfs
nfs
overlay
devpts
proc
sysfs
// Add other mount types as needed
}
// Enum for mount options
enum MountOption {
rw
ro
noexec
nosuid
nodev
rbind
relatime
// Add other options as needed
}
// Struct for Mount using enums
struct Mount {
destination string
typ MountType
source string
options []MountOption
}
enum Capability {
cap_chown
cap_dac_override
cap_dac_read_search
cap_fowner
cap_fsetid
cap_kill
cap_setgid
cap_setuid
cap_setpcap
cap_linux_immutable
cap_net_bind_service
cap_net_broadcast
cap_net_admin
cap_net_raw
cap_ipc_lock
cap_ipc_owner
cap_sys_module
cap_sys_rawio
cap_sys_chroot
cap_sys_ptrace
cap_sys_pacct
cap_sys_admin
cap_sys_boot
cap_sys_nice
cap_sys_resource
cap_sys_time
cap_sys_tty_config
cap_mknod
cap_lease
cap_audit_write
cap_audit_control
cap_setfcap
cap_mac_override
cap_mac_admin
cap_syslog
cap_wake_alarm
cap_block_suspend
cap_audit_read
}
struct Capabilities {
bounding []Capability
effective []Capability
inheritable []Capability
permitted []Capability
ambient []Capability
}

View File

@@ -1,7 +0,0 @@
specs on https://github.com/opencontainers/runtime-spec
use https://github.com/containers/youki to test the implementation, wrap it as part of runc module,
make installer for it

View File

@@ -1,153 +0,0 @@
module runc
import json
// Helper functions to convert enums to strings
fn (cap Capability) str() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
fn (rlimit RlimitType) str() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Function to convert Capabilities struct to JSON
fn (cap Capabilities) to_json() map[string][]string {
return {
'bounding': cap.bounding.map(it.str())
'effective': cap.effective.map(it.str())
'inheritable': cap.inheritable.map(it.str())
'permitted': cap.permitted.map(it.str())
'ambient': cap.ambient.map(it.str())
}
}
// Function to convert Rlimit struct to JSON
fn (rlimit Rlimit) to_json() map[string]json.Any {
return {
'type': rlimit.typ.str()
'hard': rlimit.hard
'soft': rlimit.soft
}
}
// Example function to generate the Process JSON
fn generate_process_json(proc Process) string {
// Convert the Process object to JSON
process_json := {
'terminal': proc.terminal
'user': {
'uid': proc.user.uid
'gid': proc.user.gid
'additionalGids': proc.user.additional_gids
}
'args': proc.args
'env': proc.env
'cwd': proc.cwd
'capabilities': proc.capabilities.to_json()
'rlimits': proc.rlimits.map(it.to_json())
}
// Convert the entire process map to JSON string
return json.encode_pretty(process_json)
}
pub fn example_json() {
// Example instantiation using enums and Process structure
user := User{
uid: 1000
gid: 1000
additional_gids: [1001, 1002]
}
capabilities := Capabilities{
bounding: [Capability.cap_chown, Capability.cap_dac_override]
effective: [Capability.cap_chown]
inheritable: []
permitted: [Capability.cap_chown]
ambient: []
}
rlimits := [
Rlimit{
typ: RlimitType.rlimit_nofile
hard: 1024
soft: 1024
},
Rlimit{
typ: RlimitType.rlimit_cpu
hard: 1000
soft: 500
},
]
process := Process{
terminal: true
user: user
args: ['/bin/bash']
env: ['PATH=/usr/bin']
cwd: '/'
capabilities: capabilities
rlimits: rlimits
}
// Generate the JSON for Process object
json_output := generate_process_json(process)
println(json_output)
}