merge and fix encoding

This commit is contained in:
Timur Gordon
2025-09-08 19:43:48 +02:00
154 changed files with 7913 additions and 2557 deletions

View File

@@ -34,6 +34,11 @@ jobs:
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!"
- run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}." - run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}."
- uses: maxim-lobanov/setup-xcode@v1
if: runner.os == 'macOS'
with:
xcode-version: latest-stable
- name: Check out repository code - name: Check out repository code
uses: actions/checkout@v4 uses: actions/checkout@v4

4
.gitignore vendored
View File

@@ -52,4 +52,6 @@ HTTP_REST_MCP_DEMO.md
MCP_HTTP_REST_IMPLEMENTATION_PLAN.md MCP_HTTP_REST_IMPLEMENTATION_PLAN.md
.roo .roo
.kilocode .kilocode
.continue .continue
tmux_logger
release

View File

@@ -39,7 +39,7 @@ bash /tmp/install_v.sh --analyzer --herolib
#do not forget to do the following this makes sure vtest and vrun exists #do not forget to do the following this makes sure vtest and vrun exists
cd ~/code/github/freeflowuniverse/herolib cd ~/code/github/freeflowuniverse/herolib
bash install_herolib.vsh v install_herolib.vsh
# IMPORTANT: Start a new shell after installation for paths to be set correctly # IMPORTANT: Start a new shell after installation for paths to be set correctly

View File

@@ -48,7 +48,7 @@ fn do() ! {
mut cmd := Command{ mut cmd := Command{
name: 'hero' name: 'hero'
description: 'Your HERO toolset.' description: 'Your HERO toolset.'
version: '1.0.29' version: '1.0.33'
} }
// herocmds.cmd_run_add_flags(mut cmd) // herocmds.cmd_run_add_flags(mut cmd)
@@ -103,4 +103,4 @@ fn main() {
// fn pre_func(cmd Command) ! { // fn pre_func(cmd Command) ! {
// herocmds.plbook_run(cmd)! // herocmds.plbook_run(cmd)!
// } // }

View File

@@ -52,7 +52,6 @@ println(' - API title: ${spec.info.title}')
println(' - API version: ${spec.info.version}') println(' - API version: ${spec.info.version}')
println(' - Methods available: ${spec.methods.len}') println(' - Methods available: ${spec.methods.len}')
// 2. List all services // 2. List all services
println('\n2. Listing all services...') println('\n2. Listing all services...')
services := client.service_list() or { services := client.service_list() or {

BIN
examples/osal/sshagent/sshagent Executable file

Binary file not shown.

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal.sshagent import freeflowuniverse.herolib.osal.sshagent
import freeflowuniverse.herolib.builder import freeflowuniverse.herolib.builder

View File

@@ -141,28 +141,26 @@ fn test_user_mgmt() ! {
*/ */
} }
fn main() { console.print_header('🔑 SSH Agent Example - HeroLib')
console.print_header('🔑 SSH Agent Example - HeroLib')
demo_sshagent_basic() or { demo_sshagent_basic() or {
console.print_stderr(' Basic demo failed: ${err}') console.print_stderr(' Basic demo failed: ${err}')
return return
}
demo_sshagent_key_management() or {
console.print_stderr(' Key management demo failed: ${err}')
return
}
demo_sshagent_with_existing_keys() or {
console.print_stderr(' Existing keys demo failed: ${err}')
return
}
test_user_mgmt() or {
console.print_stderr(' User management test failed: ${err}')
return
}
console.print_header('🎉 All SSH Agent demos completed successfully!')
} }
demo_sshagent_key_management() or {
console.print_stderr(' Key management demo failed: ${err}')
return
}
demo_sshagent_with_existing_keys() or {
console.print_stderr(' Existing keys demo failed: ${err}')
return
}
test_user_mgmt() or {
console.print_stderr(' User management test failed: ${err}')
return
}
console.print_header('🎉 All SSH Agent demos completed successfully!')

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env hero
// Enhanced Declarative Tmux Test with Redis State Tracking
// This demonstrates the new intelligent command management features
// Ensure a test session exists
!!tmux.session_ensure
name:"enhanced_test"
// Ensure a 4-pane window exists
!!tmux.window_ensure
name:"enhanced_test|demo"
cat:"4pane"
// Configure panes with intelligent state management
// The system will now:
// 1. Check if commands have changed using MD5 hashing
// 2. Verify if previous commands are still running
// 3. Kill and restart only when necessary
// 4. Ensure bash is the parent process
// 5. Reset panes when needed
// 6. Track all state in Redis
!!tmux.pane_ensure
name:"enhanced_test|demo|1"
label:"web_server"
cmd:"echo \"Starting web server...\" && python3 -m http.server 8000"
log:true
logpath:"/tmp/enhanced_logs"
logreset:true
!!tmux.pane_ensure
name:"enhanced_test|demo|2"
label:"monitor"
cmd:"echo \"Starting system monitor...\" && htop"
log:true
logpath:"/tmp/enhanced_logs"
!!tmux.pane_ensure
name:"enhanced_test|demo|3"
label:"logs"
cmd:"echo \"Monitoring logs...\" && tail -f /var/log/system.log"
log:true
logpath:"/tmp/enhanced_logs"
!!tmux.pane_ensure
name:"enhanced_test|demo|4"
label:"development"
cmd:"
echo \"Setting up development environment...\"
mkdir -p /tmp/dev_workspace
cd /tmp/dev_workspace
echo \"Development environment ready!\"
echo \"Current directory:\" && pwd
echo \"Available commands: ls, vim, git, etc.\"
"
log:true
logpath:"/tmp/enhanced_logs"
// Test the intelligent state management by running the same commands again
// The system should detect that commands haven't changed and skip re-execution
// for commands that are still running
!!tmux.pane_ensure
name:"enhanced_test|demo|1"
label:"web_server"
cmd:"echo \"Starting web server...\" && python3 -m http.server 8000"
log:true
logpath:"/tmp/enhanced_logs"
// Test command change detection by modifying a command slightly
!!tmux.pane_ensure
name:"enhanced_test|demo|2"
label:"monitor"
cmd:"echo \"Starting UPDATED system monitor...\" && htop"
log:true
logpath:"/tmp/enhanced_logs"
// This should kill the previous htop and start a new one because the command changed
// Test with a completely different command
!!tmux.pane_ensure
name:"enhanced_test|demo|3"
label:"network"
cmd:"echo \"Switching to network monitoring...\" && netstat -tuln"
log:true
logpath:"/tmp/enhanced_logs"
// This should kill the tail command and start netstat
// Test multi-line command with state tracking
!!tmux.pane_ensure
name:"enhanced_test|demo|4"
label:"advanced_dev"
cmd:"
echo \"Advanced development setup...\"
cd /tmp/dev_workspace
echo \"Creating project structure...\"
mkdir -p src tests docs
echo \"Project structure created:\"
ls -la
echo \"Ready for development!\"
"
log:true
logpath:"/tmp/enhanced_logs"
// The system will:
// - Compare MD5 hash of this multi-line command with the previous one
// - Detect that it's different
// - Kill the previous command
// - Execute this new command
// - Store the new state in Redis
// - Ensure bash is the parent process
// - Enable logging with the tmux_logger binary

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env hero
// Demonstration of multi-line command support in tmux heroscripts
// This example shows how to use multi-line commands in pane configurations
// Create a development session
!!tmux.session_create
name:"dev_multiline"
reset:true
// Create a 4-pane development workspace
!!tmux.window_ensure
name:"dev_multiline|workspace"
cat:"4pane"
// Pane 1: Development environment setup
!!tmux.pane_ensure
name:"dev_multiline|workspace|1"
label:"dev_setup"
cmd:'
echo "=== Development Environment Setup ==="
echo "Current directory: $(pwd)"
echo "Git status:"
git status --porcelain || echo "Not a git repository"
echo "Available disk space:"
df -h .
echo "Development setup complete"
'
// Pane 2: System monitoring
!!tmux.pane_ensure
name:"dev_multiline|workspace|2"
label:"monitoring"
cmd:'
echo "=== System Monitoring ==="
echo "System uptime:"
uptime
echo "Memory usage:"
free -h 2>/dev/null || vm_stat | head -5
echo "CPU info:"
sysctl -n machdep.cpu.brand_string 2>/dev/null || cat /proc/cpuinfo | grep "model name" | head -1
echo "Monitoring setup complete"
'
// Pane 3: Network diagnostics
!!tmux.pane_ensure
name:"dev_multiline|workspace|3"
label:"network"
cmd:'
echo "=== Network Diagnostics ==="
echo "Network interfaces:"
ifconfig | grep -E "^[a-z]|inet " | head -10
echo "DNS configuration:"
cat /etc/resolv.conf 2>/dev/null || scutil --dns | head -10
echo "Network diagnostics complete"
'
// Pane 4: File operations and cleanup
!!tmux.pane_ensure
name:"dev_multiline|workspace|4"
label:"file_ops"
cmd:'
echo "=== File Operations ==="
echo "Creating temporary workspace..."
mkdir -p /tmp/dev_workspace
cd /tmp/dev_workspace
echo "Current location: $(pwd)"
echo "Creating sample files..."
echo "Sample content" > sample.txt
echo "Another file" > another.txt
echo "Files created:"
ls -la
echo "File operations complete"
'

1
examples/osal/ubuntu/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
ubuntu_do

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal.ubuntu
import os
import time
ubuntu.fix_mirrors()!

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.heropods
// Initialize factory
mut factory := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
println('=== HeroPods Refactored API Demo ===')
// Step 1: factory.new() now only creates a container definition/handle
// It does NOT create the actual container in the backend yet
mut container := factory.new(
name: 'myalpine'
image: .custom
custom_image_name: 'alpine_3_20'
docker_url: 'docker.io/library/alpine:3.20'
)!
println(' Container definition created: ${container.name}')
println(' (No actual container created in backend yet)')
// Step 2: container.start() handles creation and starting
// - Checks if container exists in backend
// - Creates it if it doesn't exist
// - Starts it if it exists but is stopped
println('\n--- First start() call ---')
container.start()!
println(' Container started successfully')
// Step 3: Multiple start() calls are now idempotent
println('\n--- Second start() call (should be idempotent) ---')
container.start()!
println(' Second start() call successful - no errors!')
// Step 4: Execute commands in the container and save results
println('\n--- Executing commands in container ---')
result1 := container.exec(cmd: 'ls -la /')!
println(' Command executed: ls -la /')
println('Result: ${result1}')
result2 := container.exec(cmd: 'echo "Hello from container!"')!
println(' Command executed: echo "Hello from container!"')
println('Result: ${result2}')
result3 := container.exec(cmd: 'uname -a')!
println(' Command executed: uname -a')
println('Result: ${result3}')
// Step 5: container.delete() works naturally on the instance
println('\n--- Deleting container ---')
container.delete()!
println(' Container deleted successfully')
println('\n=== Demo completed! ===')
println('The refactored API now works as expected:')
println('- factory.new() creates definition only')
println('- container.start() is idempotent')
println('- container.exec() works and returns results')
println('- container.delete() works on instances')

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.virt.heropods
mut factory := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
mut container := factory.new(
name: 'myalpine'
image: .custom
custom_image_name: 'alpine_3_20'
docker_url: 'docker.io/library/alpine:3.20'
)!
container.start()!
container.exec(cmd: 'ls')!
container.stop()!

View File

@@ -1,4 +0,0 @@
mkdir -p cd /tmp/busyb
cd /tmp/busyb
podman export $(podman create busybox) | tar -C /tmp/busyb -xvf -

View File

@@ -1 +0,0 @@
apt install

View File

@@ -1,6 +0,0 @@
## busybox
- use docker, expand it into a directory

View File

@@ -1,23 +1,8 @@
{ {
"folders": [ "folders": [
{ {
"path": "lib" "path": "."
}, },
{
"path": "aiprompts"
},
{
"path": "research"
},
{
"path": "examples"
},
{
"path": "cli"
},
{
"path": "manual"
}
], ],
"settings": { "settings": {
"extensions.ignoreRecommendations": false "extensions.ignoreRecommendations": false
@@ -43,4 +28,4 @@
"tomoki1207.pdf" "tomoki1207.pdf"
] ]
} }
} }

View File

@@ -4,7 +4,7 @@ set -e
os_name="$(uname -s)" os_name="$(uname -s)"
arch_name="$(uname -m)" arch_name="$(uname -m)"
version='1.0.29' version='1.0.33'
# Base URL for GitHub releases # Base URL for GitHub releases
@@ -121,7 +121,9 @@ echo "Download URL for your platform: $url"
# Download the file # Download the file
curl -o /tmp/downloaded_file -L "$url" curl -o /tmp/downloaded_file -L "$url"
# Check if file size is greater than 10 MB set -e
# Check if file size is greater than 2 MB
file_size=$(du -m /tmp/downloaded_file | cut -f1) file_size=$(du -m /tmp/downloaded_file | cut -f1)
if [ "$file_size" -ge 2 ]; then if [ "$file_size" -ge 2 ]; then
# Create the target directory if it doesn't exist # Create the target directory if it doesn't exist
@@ -139,6 +141,6 @@ if [ "$file_size" -ge 2 ]; then
export PATH=$PATH:$hero_bin_path export PATH=$PATH:$hero_bin_path
hero -version hero -version
else else
echo "Downloaded file is less than 10 MB. Process aborted." echo "Downloaded file is less than 2 MB. Process aborted."
exit 1 exit 1
fi fi

View File

@@ -5,11 +5,11 @@ import flag
fn addtoscript(tofind string, toadd string) ! { fn addtoscript(tofind string, toadd string) ! {
home_dir := os.home_dir() home_dir := os.home_dir()
mut rc_file := '${home_dir}/.zshrc' mut rc_file := '${home_dir}/.zprofile'
if !os.exists(rc_file) { if !os.exists(rc_file) {
rc_file = '${home_dir}/.bashrc' rc_file = '${home_dir}/.bashrc'
if !os.exists(rc_file) { if !os.exists(rc_file) {
return error('No .zshrc or .bashrc found in home directory') return error('No .zprofile or .bashrc found in home directory')
} }
} }
@@ -65,15 +65,18 @@ println('Herolib installation completed successfully!')
// Add vtest alias // Add vtest alias
addtoscript('alias vtest=', "alias vtest='v -stats -enable-globals -show-c-output -n -w -cg -gc none -cc tcc test' ") or { addtoscript('alias vtest=', "alias vtest='v -stats -enable-globals -show-c-output -n -w -cg -gc none -cc tcc test' ") or {
eprintln('Failed to add vtest alias: ${err}') eprintln('Failed to add vtest alias: ${err}')
exit(1)
} }
// Add vrun alias // Add vrun alias
addtoscript('alias vrun=', "alias vrun='v -stats -enable-globals -show-c-output -n -w -cg -gc none -cc tcc run' ") or { addtoscript('alias vrun=', "alias vrun='v -stats -enable-globals -show-c-output -n -w -cg -gc none -cc tcc run' ") or {
eprintln('Failed to add vrun alias: ${err}') eprintln('Failed to add vrun alias: ${err}')
exit(1)
} }
addtoscript('HOME/hero/bin', 'export PATH="\$PATH:\$HOME/hero/bin"') or { addtoscript('HOME/hero/bin', 'export PATH="\$PATH:\$HOME/hero/bin"') or {
eprintln('Failed to add path to hero, ${err}') eprintln('Failed to add path to hero, ${err}')
exit(1)
} }
// ulimit -n 32000 // ulimit -n 32000

View File

@@ -34,24 +34,24 @@ for arg in "$@"; do
-h|--help) -h|--help)
print_help print_help
exit 0 exit 0
;; ;;
--reset) --reset)
RESET=true RESET=true
;; ;;
--remove) --remove)
REMOVE=true REMOVE=true
;; ;;
--herolib) --herolib)
HEROLIB=true HEROLIB=true
;; ;;
--analyzer) --analyzer)
INSTALL_ANALYZER=true INSTALL_ANALYZER=true
;; ;;
*) *)
echo "Unknown option: $arg" echo "Unknown option: $arg"
echo "Use -h or --help to see available options" echo "Use -h or --help to see available options"
exit 1 exit 1
;; ;;
esac esac
done done
@@ -66,8 +66,8 @@ function run_sudo() {
if [ "$(id -u)" -eq 0 ]; then if [ "$(id -u)" -eq 0 ]; then
# We are root, run the command directly # We are root, run the command directly
"$@" "$@"
# Check if sudo is installed # Check if sudo is installed
elif command_exists sudo; then elif command_exists sudo; then
# Use sudo to run the command # Use sudo to run the command
sudo "$@" sudo "$@"
else else
@@ -81,6 +81,65 @@ export DIR_BUILD="/tmp"
export DIR_CODE="$DIR_BASE/code" export DIR_CODE="$DIR_BASE/code"
export DIR_CODE_V="$DIR_BASE/_code" export DIR_CODE_V="$DIR_BASE/_code"
check_release() {
if ! command -v lsb_release >/dev/null 2>&1; then
echo "❌ lsb_release command not found. Install 'lsb-release' package first."
exit 1
fi
CODENAME=$(lsb_release -sc)
RELEASE=$(lsb_release -rs)
if dpkg --compare-versions "$RELEASE" lt "24.04"; then
echo " Detected Ubuntu $RELEASE ($CODENAME). Skipping mirror fix (requires 24.04+)."
return 1
fi
return 0
}
ubuntu_sources_fix() {
# Check if we're on Ubuntu
if [[ "${OSNAME}" != "ubuntu" ]]; then
echo " Not running on Ubuntu. Skipping mirror fix."
return 1
fi
if check_release; then
local CODENAME
CODENAME=$(lsb_release -sc)
local TIMESTAMP
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
echo "🔎 Fixing apt mirror setup for Ubuntu $(lsb_release -rs) ($CODENAME)..."
if [ -f /etc/apt/sources.list ]; then
echo "📦 Backing up /etc/apt/sources.list -> /etc/apt/sources.list.backup.$TIMESTAMP"
sudo mv /etc/apt/sources.list /etc/apt/sources.list.backup.$TIMESTAMP
fi
if [ -f /etc/apt/sources.list.d/ubuntu.sources ]; then
echo "📦 Backing up /etc/apt/sources.list.d/ubuntu.sources -> /etc/apt/sources.list.d/ubuntu.sources.backup.$TIMESTAMP"
sudo mv /etc/apt/sources.list.d/ubuntu.sources /etc/apt/sources.list.d/ubuntu.sources.backup.$TIMESTAMP
fi
echo "📝 Writing new /etc/apt/sources.list.d/ubuntu.sources"
sudo tee /etc/apt/sources.list.d/ubuntu.sources >/dev/null <<EOF
Types: deb
URIs: mirror://mirrors.ubuntu.com/mirrors.txt
Suites: $CODENAME $CODENAME-updates $CODENAME-backports $CODENAME-security
Components: main restricted universe multiverse
EOF
echo "🔄 Running apt update..."
sudo apt update -qq
echo "✅ Done! Your system now uses the rotating Ubuntu mirror list."
fi
}
function sshknownkeysadd { function sshknownkeysadd {
mkdir -p ~/.ssh mkdir -p ~/.ssh
touch ~/.ssh/known_hosts touch ~/.ssh/known_hosts
@@ -91,16 +150,16 @@ function sshknownkeysadd {
if ! grep git.threefold.info ~/.ssh/known_hosts > /dev/null if ! grep git.threefold.info ~/.ssh/known_hosts > /dev/null
then then
ssh-keyscan git.threefold.info >> ~/.ssh/known_hosts ssh-keyscan git.threefold.info >> ~/.ssh/known_hosts
fi fi
git config --global pull.rebase false git config --global pull.rebase false
} }
function package_check_install { function package_check_install {
local command_name="$1" local command_name="$1"
if command -v "$command_name" >/dev/null 2>&1; then if command -v "$command_name" >/dev/null 2>&1; then
echo "command '$command_name' is already installed." echo "command '$command_name' is already installed."
else else
package_install '$command_name' package_install '$command_name'
fi fi
} }
@@ -109,16 +168,16 @@ function package_install {
local command_name="$1" local command_name="$1"
if [[ "${OSNAME}" == "ubuntu" ]]; then if [[ "${OSNAME}" == "ubuntu" ]]; then
if is_github_actions; then if is_github_actions; then
run_sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential run_sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
else else
apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
fi fi
elif [[ "${OSNAME}" == "darwin"* ]]; then elif [[ "${OSNAME}" == "darwin"* ]]; then
brew install $command_name brew install $command_name
elif [[ "${OSNAME}" == "alpine"* ]]; then elif [[ "${OSNAME}" == "alpine"* ]]; then
apk add $command_name apk add $command_name
elif [[ "${OSNAME}" == "arch"* ]]; then elif [[ "${OSNAME}" == "arch"* ]]; then
pacman --noconfirm -Su $command_name pacman --noconfirm -Su $command_name
else else
echo "platform : ${OSNAME} not supported" echo "platform : ${OSNAME} not supported"
@@ -142,36 +201,39 @@ is_github_actions() {
function myplatform { function myplatform {
if [[ "${OSTYPE}" == "darwin"* ]]; then if [[ "${OSTYPE}" == "darwin"* ]]; then
export OSNAME='darwin' export OSNAME='darwin'
elif [ -e /etc/os-release ]; then elif [ -e /etc/os-release ]; then
# Read the ID field from the /etc/os-release file # Read the ID field from the /etc/os-release file
export OSNAME=$(grep '^ID=' /etc/os-release | cut -d= -f2) export OSNAME=$(grep '^ID=' /etc/os-release | cut -d= -f2)
if [ "${os_id,,}" == "ubuntu" ]; then if [ "${os_id,,}" == "ubuntu" ]; then
export OSNAME="ubuntu" export OSNAME="ubuntu"
fi fi
if [ "${OSNAME}" == "archarm" ]; then if [ "${OSNAME}" == "archarm" ]; then
export OSNAME="arch" export OSNAME="arch"
fi fi
if [ "${OSNAME}" == "debian" ]; then if [ "${OSNAME}" == "debian" ]; then
export OSNAME="ubuntu" export OSNAME="ubuntu"
fi fi
else else
echo "Unable to determine the operating system." echo "Unable to determine the operating system."
exit 1 exit 1
fi fi
# if [ "$(uname -m)" == "x86_64" ]; then # if [ "$(uname -m)" == "x86_64" ]; then
# echo "This system is running a 64-bit processor." # echo "This system is running a 64-bit processor."
# else # else
# echo "This system is not running a 64-bit processor." # echo "This system is not running a 64-bit processor."
# exit 1 # exit 1
# fi # fi
} }
myplatform myplatform
function os_update { function os_update {
if [[ "${OSNAME}" == "ubuntu" ]]; then
ubuntu_sources_fix
fi
echo ' - os update' echo ' - os update'
if [[ "${OSNAME}" == "ubuntu" ]]; then if [[ "${OSNAME}" == "ubuntu" ]]; then
if is_github_actions; then if is_github_actions; then
@@ -179,53 +241,53 @@ function os_update {
else else
rm -f /var/lib/apt/lists/lock rm -f /var/lib/apt/lists/lock
rm -f /var/cache/apt/archives/lock rm -f /var/cache/apt/archives/lock
rm -f /var/lib/dpkg/lock* rm -f /var/lib/dpkg/lock*
fi fi
export TERM=xterm export TERM=xterm
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
run_sudo dpkg --configure -a run_sudo dpkg --configure -a
run_sudo apt update -y run_sudo apt update -y
if is_github_actions; then if is_github_actions; then
echo "** IN GITHUB ACTIONS, DON'T DO UPDATE" echo "** IN GITHUB ACTIONS, DON'T DO UPDATE"
else else
set +e set +e
echo "** UPDATE" echo "** UPDATE"
apt-mark hold grub-efi-amd64-signed apt-mark hold grub-efi-amd64-signed
set -e set -e
apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
fi fi
#apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes #apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux make tcc gcc" package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux make gcc"
package_install "rclone rsync mc redis-server screen net-tools git dnsutils htop ca-certificates screen lsb-release binutils pkg-config libssl-dev iproute2" package_install "rclone rsync mc redis-server screen net-tools git dnsutils htop ca-certificates screen lsb-release binutils pkg-config libssl-dev iproute2"
elif [[ "${OSNAME}" == "darwin"* ]]; then elif [[ "${OSNAME}" == "darwin"* ]]; then
if command -v brew >/dev/null 2>&1; then if command -v brew >/dev/null 2>&1; then
echo ' - homebrew installed' echo ' - homebrew installed'
else else
export NONINTERACTIVE=1 export NONINTERACTIVE=1
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
unset NONINTERACTIVE unset NONINTERACTIVE
fi fi
set +e set +e
brew install mc redis curl tmux screen htop wget rclone tcc brew install mc redis curl tmux screen htop wget rclone tcc
set -e set -e
elif [[ "${OSNAME}" == "alpine"* ]]; then elif [[ "${OSNAME}" == "alpine"* ]]; then
apk update screen git htop tmux apk update screen git htop tmux
apk add mc curl rsync htop redis bash bash-completion screen git rclone apk add mc curl rsync htop redis bash bash-completion screen git rclone
sed -i 's#/bin/ash#/bin/bash#g' /etc/passwd sed -i 's#/bin/ash#/bin/bash#g' /etc/passwd
elif [[ "${OSNAME}" == "arch"* ]]; then elif [[ "${OSNAME}" == "arch"* ]]; then
pacman -Syy --noconfirm pacman -Syy --noconfirm
pacman -Syu --noconfirm pacman -Syu --noconfirm
pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone
# Check if builduser exists, create if not # Check if builduser exists, create if not
if ! id -u builduser > /dev/null 2>&1; then if ! id -u builduser > /dev/null 2>&1; then
useradd -m builduser useradd -m builduser
echo "builduser:$(openssl rand -base64 32 | sha256sum | base64 | head -c 32)" | chpasswd echo "builduser:$(openssl rand -base64 32 | sha256sum | base64 | head -c 32)" | chpasswd
echo 'builduser ALL=(ALL) NOPASSWD: ALL' | tee /etc/sudoers.d/builduser echo 'builduser ALL=(ALL) NOPASSWD: ALL' | tee /etc/sudoers.d/builduser
fi fi
# if [[ -n "${DEBUG}" ]]; then # if [[ -n "${DEBUG}" ]]; then
# execute_with_marker "paru_install" paru_install # execute_with_marker "paru_install" paru_install
# fi # fi
@@ -235,7 +297,7 @@ function os_update {
function hero_lib_pull { function hero_lib_pull {
pushd $DIR_CODE/github/freeflowuniverse/herolib 2>&1 >> /dev/null pushd $DIR_CODE/github/freeflowuniverse/herolib 2>&1 >> /dev/null
if [[ $(git status -s) ]]; then if [[ $(git status -s) ]]; then
echo "There are uncommitted changes in the Git repository herolib." echo "There are uncommitted changes in the Git repository herolib."
return 1 return 1
@@ -254,7 +316,7 @@ function hero_lib_get {
pushd $DIR_CODE/github/freeflowuniverse 2>&1 >> /dev/null pushd $DIR_CODE/github/freeflowuniverse 2>&1 >> /dev/null
git clone --depth 1 --no-single-branch https://github.com/freeflowuniverse/herolib.git git clone --depth 1 --no-single-branch https://github.com/freeflowuniverse/herolib.git
popd 2>&1 >> /dev/null popd 2>&1 >> /dev/null
fi fi
} }
# function install_secp256k1 { # function install_secp256k1 {
@@ -283,7 +345,7 @@ function hero_lib_get {
# else # else
# make install # make install
# fi # fi
# # Cleanup # # Cleanup
# cd .. # cd ..
# rm -rf secp256k1-0.3.2 v0.3.2.tar.gz # rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
@@ -311,7 +373,7 @@ remove_all() {
echo "Removing v-analyzer from system..." echo "Removing v-analyzer from system..."
run_sudo rm -f $(which v-analyzer) run_sudo rm -f $(which v-analyzer)
fi fi
# Remove v-analyzer path from rc files # Remove v-analyzer path from rc files
for RC_FILE in ~/.zshrc ~/.bashrc; do for RC_FILE in ~/.zshrc ~/.bashrc; do
if [ -f "$RC_FILE" ]; then if [ -f "$RC_FILE" ]; then
@@ -327,7 +389,7 @@ remove_all() {
echo "Cleaned up $RC_FILE" echo "Cleaned up $RC_FILE"
fi fi
done done
echo "V removal complete" echo "V removal complete"
} }
@@ -335,31 +397,31 @@ remove_all() {
# Function to check if a service is running and start it if needed # Function to check if a service is running and start it if needed
check_and_start_redis() { check_and_start_redis() {
# Normal service management for non-container environments # Normal service management for non-container environments
if [[ "${OSNAME}" == "ubuntu" ]] || [[ "${OSNAME}" == "debian" ]]; then if [[ "${OSNAME}" == "ubuntu" ]] || [[ "${OSNAME}" == "debian" ]]; then
# Handle Redis installation for GitHub Actions environment # Handle Redis installation for GitHub Actions environment
if is_github_actions; then if is_github_actions; then
# Import Redis GPG key # Import Redis GPG key
curl -fsSL https://packages.redis.io/gpg | run_sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg curl -fsSL https://packages.redis.io/gpg | run_sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
# Add Redis repository # Add Redis repository
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | run_sudo tee /etc/apt/sources.list.d/redis.list echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | run_sudo tee /etc/apt/sources.list.d/redis.list
# Install Redis # Install Redis
run_sudo apt-get update run_sudo apt-get update
run_sudo apt-get install -y redis run_sudo apt-get install -y redis
# Start Redis # Start Redis
redis-server --daemonize yes redis-server --daemonize yes
# Print versions # Print versions
redis-cli --version redis-cli --version
redis-server --version redis-server --version
return return
fi fi
# Check if running inside a container # Check if running inside a container
if grep -q "/docker/" /proc/1/cgroup || [ ! -d "/run/systemd/system" ]; then if grep -q "/docker/" /proc/1/cgroup || [ ! -d "/run/systemd/system" ]; then
echo "Running inside a container. Starting redis directly." echo "Running inside a container. Starting redis directly."
@@ -378,7 +440,7 @@ check_and_start_redis() {
fi fi
return return
fi fi
if systemctl is-active --quiet "redis"; then if systemctl is-active --quiet "redis"; then
echo "redis is already running." echo "redis is already running."
else else
@@ -391,7 +453,7 @@ check_and_start_redis() {
exit 1 exit 1
fi fi
fi fi
elif [[ "${OSNAME}" == "darwin"* ]]; then elif [[ "${OSNAME}" == "darwin"* ]]; then
# Check if we're in GitHub Actions # Check if we're in GitHub Actions
if is_github_actions; then if is_github_actions; then
echo "Running in GitHub Actions on macOS. Starting redis directly..." echo "Running in GitHub Actions on macOS. Starting redis directly..."
@@ -416,14 +478,14 @@ check_and_start_redis() {
brew services start redis brew services start redis
fi fi
fi fi
elif [[ "${OSNAME}" == "alpine"* ]]; then elif [[ "${OSNAME}" == "alpine"* ]]; then
if rc-service "redis" status | grep -q "running"; then if rc-service "redis" status | grep -q "running"; then
echo "redis is already running." echo "redis is already running."
else else
echo "redis is not running. Starting it..." echo "redis is not running. Starting it..."
rc-service "redis" start rc-service "redis" start
fi fi
elif [[ "${OSNAME}" == "arch"* ]]; then elif [[ "${OSNAME}" == "arch"* ]]; then
if systemctl is-active --quiet "redis"; then if systemctl is-active --quiet "redis"; then
echo "redis is already running." echo "redis is already running."
else else
@@ -437,7 +499,7 @@ check_and_start_redis() {
} }
v-install() { v-install() {
# Check if v is already installed and in PATH # Check if v is already installed and in PATH
if command_exists v; then if command_exists v; then
echo "V is already installed and in PATH." echo "V is already installed and in PATH."
@@ -445,8 +507,8 @@ v-install() {
# For now, just exit the function assuming it's okay # For now, just exit the function assuming it's okay
return 0 return 0
fi fi
# Only clone and install if directory doesn't exist # Only clone and install if directory doesn't exist
# Note: The original check was for ~/code/v, but the installation happens in ~/_code/v. # Note: The original check was for ~/code/v, but the installation happens in ~/_code/v.
if [ ! -d ~/_code/v ]; then if [ ! -d ~/_code/v ]; then
@@ -459,8 +521,8 @@ v-install() {
exit 1 exit 1
fi fi
fi fi
# Only clone and install if directory doesn't exist # Only clone and install if directory doesn't exist
# Note: The original check was for ~/code/v, but the installation happens in ~/_code/v. # Note: The original check was for ~/code/v, but the installation happens in ~/_code/v.
# Adjusting the check to the actual installation directory. # Adjusting the check to the actual installation directory.
@@ -474,48 +536,48 @@ v-install() {
fi fi
# Check if the built executable can report its version # Check if the built executable can report its version
if ! ~/_code/v/v -version > /dev/null 2>&1; then if ! ~/_code/v/v -version > /dev/null 2>&1; then
echo "Error: Built V executable (~/_code/v/v) failed to report version." echo "Error: Built V executable (~/_code/v/v) failed to report version."
exit 1 exit 1
fi fi
echo "V built successfully. Creating symlink..." echo "V built successfully. Creating symlink..."
run_sudo ./v symlink run_sudo ./v symlink
# Verify v is in path # Verify v is in path
if ! command_exists v; then if ! command_exists v; then
echo "Error: V installation failed or not in PATH" echo "Error: V installation failed or not in PATH"
echo "Please ensure ~/code/v is in your PATH" echo "Please ensure ~/code/v is in your PATH"
exit 1 exit 1
fi fi
echo "V installation successful!" echo "V installation successful!"
} }
v-analyzer() { v-analyzer() {
set -ex set -ex
# Install v-analyzer if requested # Install v-analyzer if requested
if [ "$INSTALL_ANALYZER" = true ]; then if [ "$INSTALL_ANALYZER" = true ]; then
echo "Installing v-analyzer..." echo "Installing v-analyzer..."
cd /tmp cd /tmp
v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh
# Check if v-analyzer bin directory exists # Check if v-analyzer bin directory exists
if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then
echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin" echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin"
echo "Please ensure v-analyzer was installed correctly" echo "Please ensure v-analyzer was installed correctly"
exit 1 exit 1
fi fi
echo "v-analyzer installation successful!" echo "v-analyzer installation successful!"
fi fi
# Add v-analyzer to PATH if installed # Add v-analyzer to PATH if installed
if [ -d "$HOME/.config/v-analyzer/bin" ]; then if [ -d "$HOME/.config/v-analyzer/bin" ]; then
V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"' V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"'
# Function to add path to rc file if not present # Function to add path to rc file if not present
add_to_rc() { add_to_rc() {
local RC_FILE="$1" local RC_FILE="$1"
@@ -529,7 +591,7 @@ v-analyzer() {
fi fi
fi fi
} }
# Add to both .zshrc and .bashrc if they exist # Add to both .zshrc and .bashrc if they exist
add_to_rc ~/.zshrc add_to_rc ~/.zshrc
if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then
@@ -546,29 +608,23 @@ if [ "$REMOVE" = true ]; then
exit 0 exit 0
fi fi
# Handle reset if requested
if [ "$RESET" = true ]; then
remove_all
echo "Reset complete"
fi
# Create code directory if it doesn't exist # Create code directory if it doesn't exist
mkdir -p ~/code mkdir -p ~/code
# Check if v needs to be installed # Check if v needs to be installed
if [ "$RESET" = true ] || ! command_exists v; then if [ "$RESET" = true ] || ! command_exists v; then
os_update os_update
sshknownkeysadd sshknownkeysadd
# Install secp256k1 # Install secp256k1
v-install v-install
fi fi

View File

@@ -3,9 +3,7 @@ module builder
import os import os
import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import v.embed_file
const heropath_ = os.dir(@FILE) + '/../' const heropath_ = os.dir(@FILE) + '/../'
@@ -52,10 +50,10 @@ pub mut:
pub fn (mut node Node) hero_install(args HeroInstallArgs) ! { pub fn (mut node Node) hero_install(args HeroInstallArgs) ! {
console.print_debug('install hero') console.print_debug('install hero')
mut bs := bootstrapper() bootstrapper()
myenv := node.environ_get()! myenv := node.environ_get()!
homedir := myenv['HOME'] or { return error("can't find HOME in env") } _ := myenv['HOME'] or { return error("can't find HOME in env") }
mut todo := []string{} mut todo := []string{}
if !args.compile { if !args.compile {

View File

@@ -2,7 +2,7 @@ module builder
import freeflowuniverse.herolib.data.ipaddress import freeflowuniverse.herolib.data.ipaddress
type Executor = ExecutorLocal | ExecutorSSH type Executor = ExecutorLocal | ExecutorSSH | ExecutorCrun
pub struct ExecutorNewArguments { pub struct ExecutorNewArguments {
pub mut: pub mut:

217
lib/builder/executor_crun.v Normal file
View File

@@ -0,0 +1,217 @@
module builder
import os
import rand
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
@[heap]
pub struct ExecutorCrun {
pub mut:
container_id string // container ID for crun
retry int = 1
debug bool = true
}
pub fn (mut executor ExecutorCrun) init() ! {
// Verify container exists and is running
result := osal.exec(cmd: 'crun state ${executor.container_id}', stdout: false) or {
return error('Container ${executor.container_id} not found or not accessible')
}
// Parse state to ensure container is running
if !result.output.contains('"status": "running"') {
return error('Container ${executor.container_id} is not running')
}
}
pub fn (mut executor ExecutorCrun) debug_on() {
executor.debug = true
}
pub fn (mut executor ExecutorCrun) debug_off() {
executor.debug = false
}
pub fn (mut executor ExecutorCrun) exec(args_ ExecArgs) !string {
mut args := args_
if executor.debug {
console.print_debug('execute in container ${executor.container_id}: ${args.cmd}')
}
mut cmd := 'crun exec ${executor.container_id} ${args.cmd}'
if args.cmd.contains('\n') {
// For multiline commands, write to temp file first
temp_script := '/tmp/crun_script_${rand.uuid_v4()}.sh'
script_content := texttools.dedent(args.cmd)
os.write_file(temp_script, script_content)!
// Copy script into container and execute
executor.file_write('/tmp/exec_script.sh', script_content)!
cmd = 'crun exec ${executor.container_id} bash /tmp/exec_script.sh'
}
res := osal.exec(cmd: cmd, stdout: args.stdout, debug: executor.debug)!
return res.output
}
pub fn (mut executor ExecutorCrun) exec_interactive(args_ ExecArgs) ! {
mut args := args_
if args.cmd.contains('\n') {
args.cmd = texttools.dedent(args.cmd)
executor.file_write('/tmp/interactive_script.sh', args.cmd)!
args.cmd = 'bash /tmp/interactive_script.sh'
}
cmd := 'crun exec -t ${executor.container_id} ${args.cmd}'
console.print_debug(cmd)
osal.execute_interactive(cmd)!
}
pub fn (mut executor ExecutorCrun) file_write(path string, text string) ! {
if executor.debug {
console.print_debug('Container ${executor.container_id} file write: ${path}')
}
// Write to temp file first, then copy into container
temp_file := '/tmp/crun_file_${rand.uuid_v4()}'
os.write_file(temp_file, text)!
defer { os.rm(temp_file) or {} }
// Use crun exec to copy file content
cmd := 'cat ${temp_file} | crun exec -i ${executor.container_id} tee ${path} > /dev/null'
osal.exec(cmd: cmd, stdout: false)!
}
pub fn (mut executor ExecutorCrun) file_read(path string) !string {
if executor.debug {
console.print_debug('Container ${executor.container_id} file read: ${path}')
}
return executor.exec(cmd: 'cat ${path}', stdout: false)
}
pub fn (mut executor ExecutorCrun) file_exists(path string) bool {
if executor.debug {
console.print_debug('Container ${executor.container_id} file exists: ${path}')
}
output := executor.exec(cmd: 'test -f ${path} && echo found || echo not found', stdout: false) or {
return false
}
return output.trim_space() == 'found'
}
pub fn (mut executor ExecutorCrun) delete(path string) ! {
if executor.debug {
console.print_debug('Container ${executor.container_id} delete: ${path}')
}
executor.exec(cmd: 'rm -rf ${path}', stdout: false)!
}
pub fn (mut executor ExecutorCrun) upload(args SyncArgs) ! {
// For container uploads, we need to copy files from host to container
// Use crun exec with tar for efficient transfer
mut src_path := pathlib.get(args.source)
if !src_path.exists() {
return error('Source path ${args.source} does not exist')
}
if src_path.is_dir() {
// For directories, use tar to transfer
temp_tar := '/tmp/crun_upload_${rand.uuid_v4()}.tar'
osal.exec(
cmd: 'tar -cf ${temp_tar} -C ${src_path.path_dir()} ${src_path.name()}'
stdout: false
)!
defer { os.rm(temp_tar) or {} }
// Extract in container
cmd := 'cat ${temp_tar} | crun exec -i ${executor.container_id} tar -xf - -C ${args.dest}'
osal.exec(cmd: cmd, stdout: args.stdout)!
} else {
// For single files
executor.file_write(args.dest, src_path.read()!)!
}
}
pub fn (mut executor ExecutorCrun) download(args SyncArgs) ! {
// Download from container to host
if executor.dir_exists(args.source) {
// For directories
temp_tar := '/tmp/crun_download_${rand.uuid_v4()}.tar'
cmd := 'crun exec ${executor.container_id} tar -cf - -C ${args.source} . > ${temp_tar}'
osal.exec(cmd: cmd, stdout: false)!
defer { os.rm(temp_tar) or {} }
// Extract on host
osal.exec(
cmd: 'mkdir -p ${args.dest} && tar -xf ${temp_tar} -C ${args.dest}'
stdout: args.stdout
)!
} else {
// For single files
content := executor.file_read(args.source)!
os.write_file(args.dest, content)!
}
}
pub fn (mut executor ExecutorCrun) environ_get() !map[string]string {
env := executor.exec(cmd: 'env', stdout: false) or {
return error('Cannot get environment from container ${executor.container_id}')
}
mut res := map[string]string{}
for line in env.split('\n') {
if line.contains('=') {
mut key, mut val := line.split_once('=') or { continue }
key = key.trim(' ')
val = val.trim(' ')
res[key] = val
}
}
return res
}
pub fn (mut executor ExecutorCrun) info() map[string]string {
return {
'category': 'crun'
'container_id': executor.container_id
'runtime': 'crun'
}
}
pub fn (mut executor ExecutorCrun) shell(cmd string) ! {
if cmd.len > 0 {
osal.execute_interactive('crun exec -t ${executor.container_id} ${cmd}')!
} else {
osal.execute_interactive('crun exec -t ${executor.container_id} /bin/sh')!
}
}
pub fn (mut executor ExecutorCrun) list(path string) ![]string {
if !executor.dir_exists(path) {
return error('Directory ${path} does not exist in container')
}
output := executor.exec(cmd: 'ls ${path}', stdout: false)!
mut res := []string{}
for line in output.split('\n') {
line_trimmed := line.trim_space()
if line_trimmed != '' {
res << line_trimmed
}
}
return res
}
pub fn (mut executor ExecutorCrun) dir_exists(path string) bool {
output := executor.exec(cmd: 'test -d ${path} && echo found || echo not found', stdout: false) or {
return false
}
return output.trim_space() == 'found'
}

View File

@@ -14,6 +14,8 @@ pub fn (mut node Node) exec(args ExecArgs) !string {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout) return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout) return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} else if mut node.executor is ExecutorCrun {
return node.executor.exec(cmd: args.cmd, stdout: args.stdout)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -80,6 +82,8 @@ pub fn (mut node Node) exec_silent(cmd string) !string {
return node.executor.exec(cmd: cmd, stdout: false) return node.executor.exec(cmd: cmd, stdout: false)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.exec(cmd: cmd, stdout: false) return node.executor.exec(cmd: cmd, stdout: false)
} else if mut node.executor is ExecutorCrun {
return node.executor.exec(cmd: cmd, stdout: false)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -89,8 +93,11 @@ pub fn (mut node Node) exec_interactive(cmd_ string) ! {
node.executor.exec_interactive(cmd: cmd_)! node.executor.exec_interactive(cmd: cmd_)!
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
node.executor.exec_interactive(cmd: cmd_)! node.executor.exec_interactive(cmd: cmd_)!
} else if mut node.executor is ExecutorCrun {
node.executor.exec_interactive(cmd: cmd_)!
} else {
panic('did not find right executor')
} }
panic('did not find right executor')
} }
pub fn (mut node Node) file_write(path string, text string) ! { pub fn (mut node Node) file_write(path string, text string) ! {
@@ -98,6 +105,8 @@ pub fn (mut node Node) file_write(path string, text string) ! {
return node.executor.file_write(path, text) return node.executor.file_write(path, text)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.file_write(path, text) return node.executor.file_write(path, text)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_write(path, text)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -107,6 +116,8 @@ pub fn (mut node Node) file_read(path string) !string {
return node.executor.file_read(path) return node.executor.file_read(path)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.file_read(path) return node.executor.file_read(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_read(path)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -116,6 +127,8 @@ pub fn (mut node Node) file_exists(path string) bool {
return node.executor.file_exists(path) return node.executor.file_exists(path)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.file_exists(path) return node.executor.file_exists(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.file_exists(path)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -137,6 +150,8 @@ pub fn (mut node Node) delete(path string) ! {
return node.executor.delete(path) return node.executor.delete(path)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.delete(path) return node.executor.delete(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.delete(path)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -179,6 +194,8 @@ pub fn (mut node Node) download(args_ SyncArgs) ! {
return node.executor.download(args) return node.executor.download(args)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.download(args) return node.executor.download(args)
} else if mut node.executor is ExecutorCrun {
return node.executor.download(args)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -208,6 +225,8 @@ pub fn (mut node Node) upload(args_ SyncArgs) ! {
return node.executor.upload(args) return node.executor.upload(args)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.upload(args) return node.executor.upload(args)
} else if mut node.executor is ExecutorCrun {
return node.executor.upload(args)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -224,6 +243,8 @@ pub fn (mut node Node) environ_get(args EnvGetParams) !map[string]string {
return node.executor.environ_get() return node.executor.environ_get()
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.environ_get() return node.executor.environ_get()
} else if mut node.executor is ExecutorCrun {
return node.executor.environ_get()
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -235,6 +256,8 @@ pub fn (mut node Node) info() map[string]string {
return node.executor.info() return node.executor.info()
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.info() return node.executor.info()
} else if mut node.executor is ExecutorCrun {
return node.executor.info()
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -244,6 +267,8 @@ pub fn (mut node Node) shell(cmd string) ! {
return node.executor.shell(cmd) return node.executor.shell(cmd)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.shell(cmd) return node.executor.shell(cmd)
} else if mut node.executor is ExecutorCrun {
return node.executor.shell(cmd)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -257,6 +282,8 @@ pub fn (mut node Node) list(path string) ![]string {
return node.executor.list(path) return node.executor.list(path)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.list(path) return node.executor.list(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.list(path)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -266,6 +293,8 @@ pub fn (mut node Node) dir_exists(path string) bool {
return node.executor.dir_exists(path) return node.executor.dir_exists(path)
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
return node.executor.dir_exists(path) return node.executor.dir_exists(path)
} else if mut node.executor is ExecutorCrun {
return node.executor.dir_exists(path)
} }
panic('did not find right executor') panic('did not find right executor')
} }
@@ -275,8 +304,11 @@ pub fn (mut node Node) debug_off() {
node.executor.debug_off() node.executor.debug_off()
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
node.executor.debug_off() node.executor.debug_off()
} else if mut node.executor is ExecutorCrun {
node.executor.debug_off()
} else {
panic('did not find right executor')
} }
panic('did not find right executor')
} }
pub fn (mut node Node) debug_on() { pub fn (mut node Node) debug_on() {
@@ -284,6 +316,9 @@ pub fn (mut node Node) debug_on() {
node.executor.debug_on() node.executor.debug_on()
} else if mut node.executor is ExecutorSSH { } else if mut node.executor is ExecutorSSH {
node.executor.debug_on() node.executor.debug_on()
} else if mut node.executor is ExecutorCrun {
node.executor.debug_on()
} else {
panic('did not find right executor')
} }
panic('did not find right executor')
} }

View File

@@ -18,6 +18,7 @@ pub mut:
pub fn this_remote_exec(args_ ThisRemoteArgs) !bool { pub fn this_remote_exec(args_ ThisRemoteArgs) !bool {
mut args := args_ mut args := args_
if args.script.trim_space().starts_with('/tmp/remote_') { if args.script.trim_space().starts_with('/tmp/remote_') {
// TODO: don't understand this
return false // means we need to execute return false // means we need to execute
} }
addr := texttools.to_array(args.nodes) addr := texttools.to_array(args.nodes)

View File

@@ -0,0 +1,4 @@
module builder
pub fn (mut node Node) ubuntu_sources_fix() {
}

View File

@@ -1,13 +1,8 @@
module mycelium module mycelium
import freeflowuniverse.herolib.osal.core as osal import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.ui
import os import os
import time
import json import json
pub fn check() bool { pub fn check() bool {

View File

@@ -2,7 +2,6 @@ module mycelium
import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook { PlayBook } import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.ui.console
import json import json
__global ( __global (

View File

@@ -150,21 +150,20 @@ pub fn plbook_code_get(cmd Command) !string {
// same as session_run_get but will also run the plbook // same as session_run_get but will also run the plbook
pub fn plbook_run(cmd Command) !(&playbook.PlayBook, string) { pub fn plbook_run(cmd Command) !(&playbook.PlayBook, string) {
heroscript := cmd.flags.get_string('heroscript') or { '' } heroscript := cmd.flags.get_string('heroscript') or { '' }
mut path := '' mut path := ''
mut plbook := if heroscript.len > 0 { mut plbook := if heroscript.len > 0 {
playbook.new(text: heroscript)! playbook.new(text: heroscript)!
} else { } else {
path path = plbook_code_get(cmd)!
= plbook_code_get(cmd)!
if path.len == 0 { if path.len == 0 {
return error(cmd.help_message()) return error(cmd.help_message())
} }
// add all actions inside to the plbook // add all actions inside to the plbook
playbook.new(path: path)! playbook.new(path: path)!
} }
dagu := cmd.flags.get_bool('dagu') or { false } dagu := cmd.flags.get_bool('dagu') or { false }
playcmds.run(plbook: plbook)! playcmds.run(plbook: plbook)!

View File

@@ -11,7 +11,9 @@ fn testsuite_begin() {
} }
fn test_logger() { fn test_logger() {
mut logger := new('/tmp/testlogs')! mut logger := new(LoggerFactoryArgs{
path: '/tmp/testlogs'
})!
// Test stdout logging // Test stdout logging
logger.log(LogItemArgs{ logger.log(LogItemArgs{

View File

@@ -1,6 +1,6 @@
module playbook module playbook
import freeflowuniverse.herolib.develop.gittools // Added import for gittools // import freeflowuniverse.herolib.develop.gittools // Added import for gittools
// REMARK: include is done in play_core // REMARK: include is done in play_core

View File

@@ -25,6 +25,8 @@ pub fn encode[T](obj T) ![]u8 {
d.add_u32(u32(obj.$(field.name))) d.add_u32(u32(obj.$(field.name)))
} $else $if field.typ is u64 { } $else $if field.typ is u64 {
d.add_u64(u64(obj.$(field.name))) d.add_u64(u64(obj.$(field.name)))
}$else $if field.typ is i64 {
d.add_i64(i64(obj.$(field.name)))
} $else $if field.typ is time.Time { } $else $if field.typ is time.Time {
d.add_time(time.new(obj.$(field.name))) d.add_time(time.new(obj.$(field.name)))
// Arrays of primitive types // Arrays of primitive types

View File

@@ -1,8 +1,6 @@
module encoderhero module encoderhero
import time
import freeflowuniverse.herolib.data.paramsparser import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.core.texttools
pub struct Decoder[T] { pub struct Decoder[T] {
pub mut: pub mut:

View File

@@ -4,7 +4,6 @@ import freeflowuniverse.herolib.data.paramsparser
import time import time
import v.reflection import v.reflection
import freeflowuniverse.herolib.data.ourtime import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.ui.console // import freeflowuniverse.herolib.ui.console
// Encoder encodes the an `Any` type into HEROSCRIPT representation. // Encoder encodes the an `Any` type into HEROSCRIPT representation.

View File

@@ -1,7 +1,5 @@
module encoderhero module encoderhero
import time
// byte array versions of the most common tokens/chars to avoid reallocations // byte array versions of the most common tokens/chars to avoid reallocations
const null_in_bytes = 'null' const null_in_bytes = 'null'

View File

@@ -25,7 +25,7 @@ fn test_ping() {
mut addr := IPAddress{ mut addr := IPAddress{
addr: '127.0.0.1' addr: '127.0.0.1'
} }
assert addr.ping(timeout: 3)! assert addr.ping(nr_ok: 3)!
assert addr.port == 0 assert addr.port == 0
} }
@@ -33,7 +33,7 @@ fn test_ping_fails() {
mut addr := IPAddress{ mut addr := IPAddress{
addr: '22.22.22.22' addr: '22.22.22.22'
} }
assert addr.ping(timeout: 3)! == false assert addr.ping(nr_ok: 3)! == false
assert addr.port == 0 assert addr.port == 0
assert addr.addr == '22.22.22.22' assert addr.addr == '22.22.22.22'
} }
@@ -56,7 +56,7 @@ fn test_ipv6() {
mut addr := new('202:6a34:cd78:b0d7:5521:8de7:218e:6680') or { panic(err) } mut addr := new('202:6a34:cd78:b0d7:5521:8de7:218e:6680') or { panic(err) }
assert addr.cat == .ipv6 assert addr.cat == .ipv6
assert addr.port == 0 assert addr.port == 0
// assert addr.ping(timeout: 3)! == false // assert addr.ping(nr_ok: 3)! == false
} }
fn test_ipv6b() { fn test_ipv6b() {

View File

@@ -23,7 +23,7 @@ pub mut:
} }
// is_running checks if the node is operational by pinging its address // is_running checks if the node is operational by pinging its address
fn (node &StreamerNode) is_running() bool { fn (node &StreamerNode) is_running() !bool {
return osal.ping(address: node.address, retry: 2)! return osal.ping(address: node.address, retry: 2)!
} }
@@ -198,7 +198,7 @@ pub fn (mut node StreamerNode) handle_ping_nodes() ! {
mut i := 0 mut i := 0
for i < node.workers.len { for i < node.workers.len {
worker := &node.workers[i] worker := &node.workers[i]
if !worker.is_running() { if !(worker.is_running() or { false }) {
log_event(event_type: 'logs', message: 'Worker ${worker.address} is not running') log_event(event_type: 'logs', message: 'Worker ${worker.address} is not running')
log_event(event_type: 'logs', message: 'Removing worker ${worker.public_key}') log_event(event_type: 'logs', message: 'Removing worker ${worker.public_key}')
node.workers.delete(i) node.workers.delete(i)
@@ -212,7 +212,7 @@ pub fn (mut node StreamerNode) handle_ping_nodes() ! {
} }
} }
} else { } else {
if !node.is_running() { if !(node.is_running() or { false }) {
return error('Worker node is not running') return error('Worker node is not running')
} }
if node.master_public_key.len == 0 { if node.master_public_key.len == 0 {

View File

@@ -244,7 +244,7 @@ pub fn (mut self Streamer) add_worker(params StreamerNodeParams) !StreamerNode {
mut worker_node := self.new_node(params)! mut worker_node := self.new_node(params)!
if !worker_node.is_running() { if !(worker_node.is_running() or { false }) {
return error('Worker node is not running') return error('Worker node is not running')
} }

View File

@@ -175,7 +175,7 @@ fn test_get_u64_default() {
assert params.get_u64_default('key3', 17)! == 17 assert params.get_u64_default('key3', 17)! == 17
} }
fn test_get_u32()! { fn test_get_u32() ! {
text := ' text := '
key1: val1 key1: val1
key2: 19 key2: 19

View File

@@ -2,7 +2,7 @@ module gittools
import crypto.md5 import crypto.md5
import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console // import freeflowuniverse.herolib.ui.console
import os import os
import json import json

View File

@@ -65,7 +65,7 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string {
// means current dir // means current dir
args.path = os.getwd() args.path = os.getwd()
mut curdiro := pathlib.get_dir(path: args.path, create: false)! mut curdiro := pathlib.get_dir(path: args.path, create: false)!
mut parentpath := curdiro.parent_find('.git') or { pathlib.Path{} } // mut parentpath := curdiro.parent_find('.git') or { pathlib.Path{} }
args.path = curdiro.path args.path = curdiro.path
} }
if !os.exists(args.path) { if !os.exists(args.path) {

View File

@@ -1,8 +1,8 @@
module gittools module gittools
import freeflowuniverse.herolib.core.redisclient // import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import time // import time
// ReposGetArgs defines arguments to retrieve repositories from the git structure. // ReposGetArgs defines arguments to retrieve repositories from the git structure.
// It includes filters by name, account, provider, and an option to clone a missing repo. // It includes filters by name, account, provider, and an option to clone a missing repo.

View File

@@ -27,7 +27,7 @@ fn (mut repo GitRepo) cache_get() ! {
if repo_json.len > 0 { if repo_json.len > 0 {
mut cached := json.decode(GitRepo, repo_json)! mut cached := json.decode(GitRepo, repo_json)!
cached.gs = repo.gs cached.gs = repo.gs
cached.config.remote_check_period = 3600 * 24 * 7 cached.config.remote_check_period = 3600 * 24 * 7
repo = cached repo = cached
} }
} }

View File

@@ -2,7 +2,7 @@ module gittools
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import os import os
import freeflowuniverse.herolib.core.pathlib // import freeflowuniverse.herolib.core.pathlib
@[params] @[params]
pub struct GitCloneArgs { pub struct GitCloneArgs {
@@ -40,17 +40,17 @@ pub fn (mut gitstructure GitStructure) clone(args GitCloneArgs) !&GitRepo {
gitstructure.repos[key_] = &repo gitstructure.repos[key_] = &repo
if repo.exists() { if repo.exists() {
console.print_green("Repository already exists at ${repo.path()}") console.print_green('Repository already exists at ${repo.path()}')
// Load the existing repository status // Load the existing repository status
repo.load_internal() or { repo.load_internal() or {
console.print_debug('Could not load existing repository status: ${err}') console.print_debug('Could not load existing repository status: ${err}')
} }
return &repo return &repo
} }
// Check if path exists but is not a git repository // Check if path exists but is not a git repository
if os.exists(repo.path()) { if os.exists(repo.path()) {
return error("Path exists but is not a git repository: ${repo.path()}") return error('Path exists but is not a git repository: ${repo.path()}')
} }
if args.sshkey.len > 0 { if args.sshkey.len > 0 {

View File

@@ -2,7 +2,7 @@ module gittools
import time import time
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import os // import os
@[params] @[params]
pub struct StatusUpdateArgs { pub struct StatusUpdateArgs {

View File

@@ -182,7 +182,7 @@ pub fn (mut gs GitStructure) check_repos_exist(args ReposActionsArgs) !string {
account: args.account account: args.account
provider: args.provider provider: args.provider
)! )!
if repos.len > 0 { if repos.len > 0 {
// Repository exists - print path and return success // Repository exists - print path and return success
if !args.script { if !args.script {

110
lib/hero/herocluster/example/example.vsh Normal file → Executable file
View File

@@ -1,67 +1,107 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import crypto.ed25519
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.hero.herocluster
import os
import rand
mut ctx := base.context()!
redis := ctx.redis()!
if os.args.len < 3 { if os.args.len < 3 {
eprintln('Usage: ./prog <node_id> <status>') eprintln('Usage: ./prog <node_id> <status>')
eprintln(' status: active|buffer') eprintln(' status: active|buffer')
return return
} }
node_id := os.args[1] node_id := os.args[1]
status_str := os.args[2] status_str := os.args[2]
status := match status_str { status := match status_str {
'active' { NodeStatus.active } 'active' {
'buffer' { NodeStatus.buffer } herocluster.NodeStatus.active
else { }
eprintln('Invalid status. Use: active|buffer') 'buffer' {
return herocluster.NodeStatus.buffer
} }
else {
eprintln('Invalid status. Use: active|buffer')
return
}
} }
// --- Generate ephemeral keys for demo --- // --- Generate ephemeral keys for demo ---
// In real use: load from PEM files // In real use: load from PEM files
priv, pub := ed25519.generate_key(rand.reader) or { panic(err) } pub_, priv := ed25519.generate_key()!
mut pubkeys := map[string]ed25519.PublicKey{} mut pubkeys := map[string]ed25519.PublicKey{}
pubkeys[node_id] = pub pubkeys[node_id] = pub_
// TODO: load all pubkeys from config file so every node knows others // TODO: load all pubkeys from config file so every node knows others
// Initialize all nodes (in real scenario, load from config) // Initialize all nodes (in real scenario, load from config)
mut all_nodes := map[string]Node{} mut all_nodes := map[string]herocluster.Node{}
all_nodes['node1'] = Node{id: 'node1', status: .active} all_nodes['node1'] = herocluster.Node{
all_nodes['node2'] = Node{id: 'node2', status: .active} id: 'node1'
all_nodes['node3'] = Node{id: 'node3', status: .active} status: .active
all_nodes['node4'] = Node{id: 'node4', status: .buffer} }
all_nodes['node2'] = herocluster.Node{
id: 'node2'
status: .active
}
all_nodes['node3'] = herocluster.Node{
id: 'node3'
status: .active
}
all_nodes['node4'] = herocluster.Node{
id: 'node4'
status: .buffer
}
// Set current node status // Set current node status
all_nodes[node_id].status = status all_nodes[node_id].status = status
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382'] servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382']
mut conns := []redis.Connection{} mut conns := []&redisclient.Redis{}
for s in servers { for s in servers {
mut c := redis.connect(redis.Options{ server: s }) or { redis_url := redisclient.get_redis_url(s) or {
panic('could not connect to redis $s: $err') eprintln('Warning: could not parse redis url ${s}: ${err}')
} continue
conns << c }
mut c := redisclient.core_get(redis_url) or {
eprintln('Warning: could not connect to redis ${s}: ${err}')
continue
}
conns << c
println('Connected to Redis server: ${s}')
} }
mut election := Election{ if conns.len == 0 {
clients: conns eprintln('Error: No Redis servers available. Please start at least one Redis server.')
pubkeys: pubkeys return
self: Node{
id: node_id
term: 0
leader: false
status: status
}
keys: Keys{ priv: priv, pub: pub }
all_nodes: all_nodes
buffer_nodes: ['node4'] // Initially node4 is buffer
} }
println('[$node_id] started as $status_str, connected to 4 redis servers.') mut election := &herocluster.Election{
clients: conns
pubkeys: pubkeys
self: herocluster.Node{
id: node_id
term: 0
leader: false
status: status
}
keys: herocluster.Keys{
priv: priv
pub: pub_
}
all_nodes: all_nodes
buffer_nodes: ['node4'] // Initially node4 is buffer
}
println('[${node_id}] started as ${status_str}, connected to 4 redis servers.')
// Start health monitoring in background // Start health monitoring in background
go election.health_monitor_loop() spawn election.health_monitor_loop()
// Start main heartbeat loop // Start main heartbeat loop
election.heartbeat_loop() election.heartbeat_loop()

View File

@@ -1,10 +1,8 @@
module herocluster module herocluster
import db.redis import freeflowuniverse.herolib.core.redisclient
import crypto.ed25519 import crypto.ed25519
import crypto.rand
import encoding.hex import encoding.hex
import os
import time import time
const election_timeout_ms = 3000 const election_timeout_ms = 3000
@@ -14,295 +12,318 @@ const health_check_interval_ms = 30000 // 30 seconds
// --- Crypto helpers --- // --- Crypto helpers ---
struct Keys { pub struct Keys {
priv ed25519.PrivateKey pub mut:
pub ed25519.PublicKey priv ed25519.PrivateKey
pub ed25519.PublicKey
} }
// sign a message // sign a message
fn (k Keys) sign(msg string) string { fn (k Keys) sign(msg string) string {
sig := ed25519.sign(k.priv, msg.bytes()) sig := ed25519.sign(k.priv, msg.bytes()) or { panic('Failed to sign message: ${err}') }
return hex.encode(sig) return hex.encode(sig)
} }
// verify signature // verify signature
fn verify(pub ed25519.PublicKey, msg string, sig_hex string) bool { fn verify(pubkey ed25519.PublicKey, msg string, sig_hex string) bool {
sig := hex.decode(sig_hex) or { return false } sig := hex.decode(sig_hex) or { return false }
return ed25519.verify(pub, msg.bytes(), sig) return ed25519.verify(pubkey, msg.bytes(), sig) or { false }
} }
// --- Node & Election --- // --- Node & Election ---
enum NodeStatus { pub enum NodeStatus {
active active
buffer buffer
unavailable unavailable
} }
struct Node { pub struct Node {
id string pub:
mut: id string
term int pub mut:
leader bool term int
voted_for string leader bool
status NodeStatus voted_for string
last_seen i64 // timestamp status NodeStatus
last_seen i64 // timestamp
} }
struct HealthReport { struct HealthReport {
reporter_id string reporter_id string
target_id string target_id string
status string // "available" or "unavailable" status string // "available" or "unavailable"
timestamp i64 timestamp i64
signature string signature string
} }
struct Election { pub struct Election {
mut: pub mut:
clients []redis.Connection clients []&redisclient.Redis
pubkeys map[string]ed25519.PublicKey pubkeys map[string]ed25519.PublicKey
self Node self Node
keys Keys keys Keys
all_nodes map[string]Node all_nodes map[string]Node
buffer_nodes []string buffer_nodes []string
} }
// Redis keys // Redis keys
fn vote_key(term int, node_id string) string { return 'vote:${term}:${node_id}' } fn vote_key(term int, node_id string) string {
fn health_key(reporter_id string, target_id string) string { return 'health:${reporter_id}:${target_id}' } return 'vote:${term}:${node_id}'
fn node_status_key(node_id string) string { return 'node_status:${node_id}' } }
fn health_key(reporter_id string, target_id string) string {
return 'health:${reporter_id}:${target_id}'
}
fn node_status_key(node_id string) string {
return 'node_status:${node_id}'
}
// Write vote (signed) to ALL redis servers // Write vote (signed) to ALL redis servers
fn (mut e Election) vote_for(candidate string) { fn (mut e Election) vote_for(candidate string) {
msg := '${e.self.term}:${candidate}' msg := '${e.self.term}:${candidate}'
sig_hex := e.keys.sign(msg) sig_hex := e.keys.sign(msg)
for mut c in e.clients { for mut c in e.clients {
k := vote_key(e.self.term, e.self.id) k := vote_key(e.self.term, e.self.id)
c.hset(k, 'candidate', candidate) or {} c.hset(k, 'candidate', candidate) or {}
c.hset(k, 'sig', sig_hex) or {} c.hset(k, 'sig', sig_hex) or {}
c.expire(k, 5) or {} c.expire(k, 5) or {}
} }
println('[${e.self.id}] voted for $candidate (term=${e.self.term})') println('[${e.self.id}] voted for ${candidate} (term=${e.self.term})')
} }
// Report node health status // Report node health status
fn (mut e Election) report_node_health(target_id string, status string) { fn (mut e Election) report_node_health(target_id string, status string) {
now := time.now().unix() now := time.now().unix()
msg := '${target_id}:${status}:${now}' msg := '${target_id}:${status}:${now}'
sig_hex := e.keys.sign(msg) sig_hex := e.keys.sign(msg)
report := HealthReport{ _ := HealthReport{
reporter_id: e.self.id reporter_id: e.self.id
target_id: target_id target_id: target_id
status: status status: status
timestamp: now timestamp: now
signature: sig_hex signature: sig_hex
} }
for mut c in e.clients { for mut c in e.clients {
k := health_key(e.self.id, target_id) k := health_key(e.self.id, target_id)
c.hset(k, 'status', status) or {} c.hset(k, 'status', status) or {}
c.hset(k, 'timestamp', now.str()) or {} c.hset(k, 'timestamp', now.str()) or {}
c.hset(k, 'signature', sig_hex) or {} c.hset(k, 'signature', sig_hex) or {}
c.expire(k, 86400) or {} // expire after 24 hours c.expire(k, 86400) or {} // expire after 24 hours
} }
println('[${e.self.id}] reported $target_id as $status') println('[${e.self.id}] reported ${target_id} as ${status}')
} }
// Collect health reports and check for consensus on unavailable nodes // Collect health reports and check for consensus on unavailable nodes
fn (mut e Election) check_node_availability() { fn (mut e Election) check_node_availability() {
now := time.now().unix() now := time.now().unix()
mut unavailable_reports := map[string]map[string]i64{} // target_id -> reporter_id -> timestamp mut unavailable_reports := map[string]map[string]i64{} // target_id -> reporter_id -> timestamp
for mut c in e.clients { for mut c in e.clients {
keys := c.keys('health:*') or { continue } keys := c.keys('health:*') or { continue }
for k in keys { for k in keys {
parts := k.split(':') parts := k.split(':')
if parts.len != 3 { continue } if parts.len != 3 {
reporter_id := parts[1] continue
target_id := parts[2] }
reporter_id := parts[1]
vals := c.hgetall(k) or { continue } target_id := parts[2]
status := vals['status']
timestamp_str := vals['timestamp'] vals := c.hgetall(k) or { continue }
sig_hex := vals['signature'] status := vals['status']
timestamp_str := vals['timestamp']
if reporter_id !in e.pubkeys { continue } sig_hex := vals['signature']
timestamp := timestamp_str.i64() if reporter_id !in e.pubkeys {
msg := '${target_id}:${status}:${timestamp}' continue
}
if verify(e.pubkeys[reporter_id], msg, sig_hex) {
if status == 'unavailable' && (now - timestamp) >= (node_unavailable_threshold_ms / 1000) { timestamp := timestamp_str.i64()
if target_id !in unavailable_reports { msg := '${target_id}:${status}:${timestamp}'
unavailable_reports[target_id] = map[string]i64{}
} if verify(e.pubkeys[reporter_id], msg, sig_hex) {
unavailable_reports[target_id][reporter_id] = timestamp if status == 'unavailable'
} && (now - timestamp) >= (node_unavailable_threshold_ms / 1000) {
} if target_id !in unavailable_reports {
} unavailable_reports[target_id] = map[string]i64{}
} }
unavailable_reports[target_id][reporter_id] = timestamp
// Check for consensus (2 out of 3 active nodes agree) }
for target_id, reports in unavailable_reports { }
if reports.len >= 2 && target_id in e.all_nodes { }
if e.all_nodes[target_id].status == .active { }
println('[${e.self.id}] Consensus reached: $target_id is unavailable for >1 day')
e.promote_buffer_node(target_id) // Check for consensus (2 out of 3 active nodes agree)
} for target_id, reports in unavailable_reports {
} if reports.len >= 2 && target_id in e.all_nodes {
} if e.all_nodes[target_id].status == .active {
println('[${e.self.id}] Consensus reached: ${target_id} is unavailable for >1 day')
e.promote_buffer_node(target_id)
}
}
}
} }
// Promote a buffer node to active status // Promote a buffer node to active status
fn (mut e Election) promote_buffer_node(failed_node_id string) { fn (mut e Election) promote_buffer_node(failed_node_id string) {
if e.buffer_nodes.len == 0 { if e.buffer_nodes.len == 0 {
println('[${e.self.id}] No buffer nodes available for promotion') println('[${e.self.id}] No buffer nodes available for promotion')
return return
} }
// Select first available buffer node // Select first available buffer node
buffer_id := e.buffer_nodes[0] buffer_id := e.buffer_nodes[0]
// Update node statuses // Update node statuses
if failed_node_id in e.all_nodes { if failed_node_id in e.all_nodes {
e.all_nodes[failed_node_id].status = .unavailable e.all_nodes[failed_node_id].status = .unavailable
} }
if buffer_id in e.all_nodes { if buffer_id in e.all_nodes {
e.all_nodes[buffer_id].status = .active e.all_nodes[buffer_id].status = .active
} }
// Remove from buffer list // Remove from buffer list
e.buffer_nodes = e.buffer_nodes.filter(it != buffer_id) e.buffer_nodes = e.buffer_nodes.filter(it != buffer_id)
// Announce the promotion // Announce the promotion
for mut c in e.clients { for mut c in e.clients {
k := node_status_key(buffer_id) k := node_status_key(buffer_id)
c.hset(k, 'status', 'active') or {} c.hset(k, 'status', 'active') or {}
c.hset(k, 'promoted_at', time.now().unix().str()) or {} c.hset(k, 'promoted_at', time.now().unix().str()) or {}
c.hset(k, 'replaced_node', failed_node_id) or {} c.hset(k, 'replaced_node', failed_node_id) or {}
// Mark failed node as unavailable // Mark failed node as unavailable
failed_k := node_status_key(failed_node_id) failed_k := node_status_key(failed_node_id)
c.hset(failed_k, 'status', 'unavailable') or {} c.hset(failed_k, 'status', 'unavailable') or {}
c.hset(failed_k, 'failed_at', time.now().unix().str()) or {} c.hset(failed_k, 'failed_at', time.now().unix().str()) or {}
} }
println('[${e.self.id}] Promoted buffer node $buffer_id to replace failed node $failed_node_id') println('[${e.self.id}] Promoted buffer node ${buffer_id} to replace failed node ${failed_node_id}')
} }
// Collect votes from ALL redis servers, verify signatures (only from active nodes) // Collect votes from ALL redis servers, verify signatures (only from active nodes)
fn (mut e Election) collect_votes(term int) map[string]int { fn (mut e Election) collect_votes(term int) map[string]int {
mut counts := map[string]int{} mut counts := map[string]int{}
mut seen := map[string]bool{} // avoid double-counting same vote from multiple servers mut seen := map[string]bool{} // avoid double-counting same vote from multiple servers
for mut c in e.clients { for mut c in e.clients {
keys := c.keys('vote:${term}:*') or { continue } keys := c.keys('vote:${term}:*') or { continue }
for k in keys { for k in keys {
if seen[k] { continue } if seen[k] {
seen[k] = true continue
vals := c.hgetall(k) or { continue } }
candidate := vals['candidate'] seen[k] = true
sig_hex := vals['sig'] vals := c.hgetall(k) or { continue }
voter_id := k.split(':')[2] candidate := vals['candidate']
sig_hex := vals['sig']
// Only count votes from active nodes voter_id := k.split(':')[2]
if voter_id !in e.pubkeys || voter_id !in e.all_nodes { continue }
if e.all_nodes[voter_id].status != .active { continue } // Only count votes from active nodes
if voter_id !in e.pubkeys || voter_id !in e.all_nodes {
msg := '${term}:${candidate}' continue
if verify(e.pubkeys[voter_id], msg, sig_hex) { }
counts[candidate]++ if e.all_nodes[voter_id].status != .active {
} else { continue
println('[${e.self.id}] invalid signature from $voter_id') }
}
} msg := '${term}:${candidate}'
} if verify(e.pubkeys[voter_id], msg, sig_hex) {
return counts counts[candidate]++
} else {
println('[${e.self.id}] invalid signature from ${voter_id}')
}
}
}
return counts
} }
// Run election (only active nodes participate) // Run election (only active nodes participate)
fn (mut e Election) run_election() { fn (mut e Election) run_election() {
if e.self.status != .active { if e.self.status != .active {
return // Buffer nodes don't participate in elections return
} }
e.self.term++
e.vote_for(e.self.id)
// wait a bit for other nodes to also vote e.self.term++
time.sleep(500 * time.millisecond) e.vote_for(e.self.id)
votes := e.collect_votes(e.self.term) // wait a bit for other nodes to also vote
active_node_count := e.all_nodes.values().filter(it.status == .active).len time.sleep(500 * time.millisecond)
majority_threshold := (active_node_count / 2) + 1
votes := e.collect_votes(e.self.term)
for cand, cnt in votes { active_node_count := e.all_nodes.values().filter(it.status == .active).len
if cnt >= majority_threshold { majority_threshold := (active_node_count / 2) + 1
if cand == e.self.id {
println('[${e.self.id}] I AM LEADER (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)') for cand, cnt in votes {
e.self.leader = true if cnt >= majority_threshold {
} else { if cand == e.self.id {
println('[${e.self.id}] sees LEADER = $cand (term=${e.self.term}, votes=$cnt, active_nodes=$active_node_count)') println('[${e.self.id}] I AM LEADER (term=${e.self.term}, votes=${cnt}, active_nodes=${active_node_count})')
e.self.leader = false e.self.leader = true
} } else {
} println('[${e.self.id}] sees LEADER = ${cand} (term=${e.self.term}, votes=${cnt}, active_nodes=${active_node_count})')
} e.self.leader = false
}
}
}
} }
// Health monitoring loop (runs in background) // Health monitoring loop (runs in background)
fn (mut e Election) health_monitor_loop() { pub fn (mut e Election) health_monitor_loop() {
for { for {
if e.self.status == .active { if e.self.status == .active {
// Check health of other nodes // Check health of other nodes
for node_id, node in e.all_nodes { for node_id, _ in e.all_nodes {
if node_id == e.self.id { continue } if node_id == e.self.id {
continue
// Simple health check: try to read a heartbeat key }
mut is_available := false
for mut c in e.clients { // Simple health check: try to read a heartbeat key
heartbeat_key := 'heartbeat:${node_id}' mut is_available := false
val := c.get(heartbeat_key) or { continue } for mut c in e.clients {
last_heartbeat := val.i64() heartbeat_key := 'heartbeat:${node_id}'
if (time.now().unix() - last_heartbeat) < 60 { // 60 seconds threshold val := c.get(heartbeat_key) or { continue }
is_available = true last_heartbeat := val.i64()
break if (time.now().unix() - last_heartbeat) < 60 { // 60 seconds threshold
} is_available = true
} break
}
status := if is_available { 'available' } else { 'unavailable' } }
e.report_node_health(node_id, status)
} status := if is_available { 'available' } else { 'unavailable' }
e.report_node_health(node_id, status)
// Check for consensus on failed nodes }
e.check_node_availability()
} // Check for consensus on failed nodes
e.check_node_availability()
time.sleep(health_check_interval_ms * time.millisecond) }
}
time.sleep(health_check_interval_ms * time.millisecond)
}
} }
// Heartbeat loop // Heartbeat loop
fn (mut e Election) heartbeat_loop() { pub fn (mut e Election) heartbeat_loop() {
for { for {
// Update own heartbeat // Update own heartbeat
now := time.now().unix() now := time.now().unix()
for mut c in e.clients { for mut c in e.clients {
heartbeat_key := 'heartbeat:${e.self.id}' heartbeat_key := 'heartbeat:${e.self.id}'
c.set(heartbeat_key, now.str()) or {} c.set(heartbeat_key, now.str()) or {}
c.expire(heartbeat_key, 120) or {} // expire after 2 minutes c.expire(heartbeat_key, 120) or {} // expire after 2 minutes
} }
if e.self.status == .active { if e.self.status == .active {
if e.self.leader { if e.self.leader {
println('[${e.self.id}] Heartbeat term=${e.self.term} (LEADER)') println('[${e.self.id}] Heartbeat term=${e.self.term} (LEADER)')
} else { } else {
e.run_election() e.run_election()
} }
} else if e.self.status == .buffer { } else if e.self.status == .buffer {
println('[${e.self.id}] Buffer node monitoring cluster') println('[${e.self.id}] Buffer node monitoring cluster')
} }
time.sleep(heartbeat_interval_ms * time.millisecond) time.sleep(heartbeat_interval_ms * time.millisecond)
} }
} }

View File

@@ -6,24 +6,24 @@ import time
// Calendar represents a collection of events // Calendar represents a collection of events
@[heap] @[heap]
pub struct Calendar { pub struct Calendar {
Base Base
pub mut: pub mut:
group_id u32 // Associated group for permissions group_id u32 // Associated group for permissions
events []u32 // IDs of calendar events (changed to u32 to match CalendarEvent) events []u32 // IDs of calendar events (changed to u32 to match CalendarEvent)
color string // Hex color code color string // Hex color code
timezone string timezone string
is_public bool is_public bool
} }
@[params] @[params]
pub struct CalendarArgs { pub struct CalendarArgs {
BaseArgs BaseArgs
pub mut: pub mut:
group_id u32 group_id u32
events []u32 events []u32
color string color string
timezone string timezone string
is_public bool is_public bool
} }
pub fn calendar_new(args CalendarArgs) !Calendar { pub fn calendar_new(args CalendarArgs) !Calendar {
@@ -47,18 +47,18 @@ pub fn calendar_new(args CalendarArgs) !Calendar {
} }
pub fn (mut c Calendar) add_event(event_id u32) { // Changed event_id to u32 pub fn (mut c Calendar) add_event(event_id u32) { // Changed event_id to u32
if event_id !in c.events { if event_id !in c.events {
c.events << event_id c.events << event_id
c.updated_at = ourtime.now().unix() // Use Base's updated_at c.updated_at = ourtime.now().unix() // Use Base's updated_at
} }
} }
pub fn (mut c Calendar) dump() []u8 { pub fn (mut c Calendar) dump() []u8 {
//TODO: implement based on lib/data/encoder/readme.md // TODO: implement based on lib/data/encoder/readme.md
return []u8{} return []u8{}
} }
pub fn calendar_load(data []u8) Calendar { pub fn calendar_load(data []u8) Calendar {
//TODO: implement based on lib/data/encoder/readme.md // TODO: implement based on lib/data/encoder/readme.md
return Calendar{} return Calendar{}
} }

View File

@@ -9,256 +9,253 @@ import freeflowuniverse.herolib.core.redisclient
// CalendarEvent represents a single event in a calendar // CalendarEvent represents a single event in a calendar
@[heap] @[heap]
pub struct CalendarEvent { pub struct CalendarEvent {
Base Base
pub mut: pub mut:
title string title string
start_time i64 // Unix timestamp start_time i64 // Unix timestamp
end_time i64 // Unix timestamp end_time i64 // Unix timestamp
location string location string
attendees []u32 // IDs of user groups attendees []u32 // IDs of user groups
fs_items []u32 // IDs of linked files or dirs fs_items []u32 // IDs of linked files or dirs
calendar_id u32 // Associated calendar calendar_id u32 // Associated calendar
status EventStatus status EventStatus
is_all_day bool is_all_day bool
is_recurring bool is_recurring bool
recurrence []RecurrenceRule //normally empty recurrence []RecurrenceRule // normally empty
reminder_mins []int // Minutes before event for reminders reminder_mins []int // Minutes before event for reminders
color string // Hex color code color string // Hex color code
timezone string timezone string
} }
pub struct Attendee { pub struct Attendee {
pub mut: pub mut:
user_id u32 user_id u32
status AttendanceStatus status AttendanceStatus
role AttendeeRole role AttendeeRole
} }
pub enum AttendanceStatus { pub enum AttendanceStatus {
no_response no_response
accepted accepted
declined declined
tentative tentative
} }
pub enum AttendeeRole { pub enum AttendeeRole {
required required
optional optional
organizer organizer
} }
pub enum EventStatus { pub enum EventStatus {
draft draft
published published
cancelled cancelled
completed completed
} }
pub struct RecurrenceRule { pub struct RecurrenceRule {
pub mut: pub mut:
frequency RecurrenceFreq frequency RecurrenceFreq
interval int // Every N frequencies interval int // Every N frequencies
until i64 // End date (Unix timestamp) until i64 // End date (Unix timestamp)
count int // Number of occurrences count int // Number of occurrences
by_weekday []int // Days of week (0=Sunday) by_weekday []int // Days of week (0=Sunday)
by_monthday []int // Days of month by_monthday []int // Days of month
} }
pub enum RecurrenceFreq { pub enum RecurrenceFreq {
none none
daily daily
weekly weekly
monthly monthly
yearly yearly
} }
@[params] @[params]
pub struct CalendarEventArgs { pub struct CalendarEventArgs {
BaseArgs BaseArgs
pub mut: pub mut:
title string title string
start_time string // use ourtime module to go from string to epoch start_time string // use ourtime module to go from string to epoch
end_time string // use ourtime module to go from string to epoch end_time string // use ourtime module to go from string to epoch
location string location string
attendees []u32 // IDs of user groups attendees []u32 // IDs of user groups
fs_items []u32 // IDs of linked files or dirs fs_items []u32 // IDs of linked files or dirs
calendar_id u32 // Associated calendar calendar_id u32 // Associated calendar
status EventStatus status EventStatus
is_all_day bool is_all_day bool
is_recurring bool is_recurring bool
recurrence []RecurrenceRule recurrence []RecurrenceRule
reminder_mins []int // Minutes before event for reminders reminder_mins []int // Minutes before event for reminders
color string // Hex color code color string // Hex color code
timezone string timezone string
} }
pub fn calendar_event_new(args CalendarEventArgs) !CalendarEvent { pub fn calendar_event_new(args CalendarEventArgs) !CalendarEvent {
// Convert tags to u32 ID // Convert tags to u32 ID
tags_id := tags2id(args.tags)! tags_id := tags2id(args.tags)!
return CalendarEvent{ return CalendarEvent{
// Base fields // Base fields
id: args.id or { 0 } id: args.id or { 0 }
name: args.name name: args.name
description: args.description description: args.description
created_at: ourtime.now().unix() created_at: ourtime.now().unix()
updated_at: ourtime.now().unix() updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 } securitypolicy: args.securitypolicy or { 0 }
tags: tags_id tags: tags_id
comments: comments2ids(args.comments)! comments: comments2ids(args.comments)!
// CalendarEvent specific fields // CalendarEvent specific fields
title: args.title title: args.title
start_time: ourtime.new(args.start_time)!.unix() start_time: ourtime.new(args.start_time)!.unix()
end_time: ourtime.new(args.end_time)!.unix() end_time: ourtime.new(args.end_time)!.unix()
location: args.location location: args.location
attendees: args.attendees attendees: args.attendees
fs_items: args.fs_items fs_items: args.fs_items
calendar_id: args.calendar_id calendar_id: args.calendar_id
status: args.status status: args.status
is_all_day: args.is_all_day is_all_day: args.is_all_day
is_recurring: args.is_recurring is_recurring: args.is_recurring
recurrence: args.recurrence recurrence: args.recurrence
reminder_mins: args.reminder_mins reminder_mins: args.reminder_mins
color: args.color color: args.color
timezone: args.timezone timezone: args.timezone
} }
} }
pub fn (mut e CalendarEvent) dump() ![]u8 { pub fn (mut e CalendarEvent) dump() ![]u8 {
// Create a new encoder // Create a new encoder
mut enc := encoder.new() mut enc := encoder.new()
// Add version byte // Add version byte
enc.add_u8(1) enc.add_u8(1)
// Encode Base fields // Encode Base fields
enc.add_u32(e.id) enc.add_u32(e.id)
enc.add_string(e.name) enc.add_string(e.name)
enc.add_string(e.description) enc.add_string(e.description)
enc.add_i64(e.created_at) enc.add_i64(e.created_at)
enc.add_i64(e.updated_at) enc.add_i64(e.updated_at)
enc.add_u32(e.securitypolicy) enc.add_u32(e.securitypolicy)
enc.add_u32(e.tags) enc.add_u32(e.tags)
enc.add_list_u32(e.comments) enc.add_list_u32(e.comments)
// Encode CalendarEvent specific fields // Encode CalendarEvent specific fields
enc.add_string(e.title) enc.add_string(e.title)
enc.add_string(e.description) enc.add_string(e.description)
enc.add_i64(e.start_time) enc.add_i64(e.start_time)
enc.add_i64(e.end_time) enc.add_i64(e.end_time)
enc.add_string(e.location) enc.add_string(e.location)
enc.add_list_u32(e.attendees) enc.add_list_u32(e.attendees)
enc.add_list_u32(e.fs_items) enc.add_list_u32(e.fs_items)
enc.add_u32(e.calendar_id) enc.add_u32(e.calendar_id)
enc.add_u8(u8(e.status)) enc.add_u8(u8(e.status))
enc.add_bool(e.is_all_day) enc.add_bool(e.is_all_day)
enc.add_bool(e.is_recurring) enc.add_bool(e.is_recurring)
// Encode recurrence array // Encode recurrence array
enc.add_u16(u16(e.recurrence.len)) enc.add_u16(u16(e.recurrence.len))
for rule in e.recurrence { for rule in e.recurrence {
enc.add_u8(u8(rule.frequency)) enc.add_u8(u8(rule.frequency))
enc.add_int(rule.interval) enc.add_int(rule.interval)
enc.add_i64(rule.until) enc.add_i64(rule.until)
enc.add_int(rule.count) enc.add_int(rule.count)
enc.add_list_int(rule.by_weekday) enc.add_list_int(rule.by_weekday)
enc.add_list_int(rule.by_monthday) enc.add_list_int(rule.by_monthday)
} }
enc.add_list_int(e.reminder_mins) enc.add_list_int(e.reminder_mins)
enc.add_string(e.color) enc.add_string(e.color)
enc.add_string(e.timezone) enc.add_string(e.timezone)
return enc.data return enc.data
} }
pub fn (ce CalendarEvent) load(data []u8) !CalendarEvent { pub fn (ce CalendarEvent) load(data []u8) !CalendarEvent {
// Create a new decoder // Create a new decoder
mut dec := encoder.decoder_new(data) mut dec := encoder.decoder_new(data)
// Read version byte // Read version byte
version := dec.get_u8()! version := dec.get_u8()!
if version != 1 { if version != 1 {
return error('wrong version in calendar event load') return error('wrong version in calendar event load')
} }
// Decode Base fields // Decode Base fields
id := dec.get_u32()! id := dec.get_u32()!
name := dec.get_string()! name := dec.get_string()!
description := dec.get_string()! description := dec.get_string()!
created_at := dec.get_i64()! created_at := dec.get_i64()!
updated_at := dec.get_i64()! updated_at := dec.get_i64()!
securitypolicy := dec.get_u32()! securitypolicy := dec.get_u32()!
tags := dec.get_u32()! tags := dec.get_u32()!
comments := dec.get_list_u32()! comments := dec.get_list_u32()!
// Decode CalendarEvent specific fields // Decode CalendarEvent specific fields
title := dec.get_string()! title := dec.get_string()!
description2 := dec.get_string()! // Second description field description2 := dec.get_string()! // Second description field
start_time := dec.get_i64()! start_time := dec.get_i64()!
end_time := dec.get_i64()! end_time := dec.get_i64()!
location := dec.get_string()! location := dec.get_string()!
attendees := dec.get_list_u32()! attendees := dec.get_list_u32()!
fs_items := dec.get_list_u32()! fs_items := dec.get_list_u32()!
calendar_id := dec.get_u32()! calendar_id := dec.get_u32()!
status := unsafe { EventStatus(dec.get_u8()!) } status := unsafe { EventStatus(dec.get_u8()!) }
is_all_day := dec.get_bool()! is_all_day := dec.get_bool()!
is_recurring := dec.get_bool()! is_recurring := dec.get_bool()!
// Decode recurrence array // Decode recurrence array
recurrence_len := dec.get_u16()! recurrence_len := dec.get_u16()!
mut recurrence := []RecurrenceRule{} mut recurrence := []RecurrenceRule{}
for _ in 0..recurrence_len { for _ in 0 .. recurrence_len {
frequency := unsafe{RecurrenceFreq(dec.get_u8()!)} frequency := unsafe { RecurrenceFreq(dec.get_u8()!) }
interval := dec.get_int()! interval := dec.get_int()!
until := dec.get_i64()! until := dec.get_i64()!
count := dec.get_int()! count := dec.get_int()!
by_weekday := dec.get_list_int()! by_weekday := dec.get_list_int()!
by_monthday := dec.get_list_int()! by_monthday := dec.get_list_int()!
recurrence << RecurrenceRule{ recurrence << RecurrenceRule{
frequency: frequency frequency: frequency
interval: interval interval: interval
until: until until: until
count: count count: count
by_weekday: by_weekday by_weekday: by_weekday
by_monthday: by_monthday by_monthday: by_monthday
} }
} }
reminder_mins := dec.get_list_int()! reminder_mins := dec.get_list_int()!
color := dec.get_string()! color := dec.get_string()!
timezone := dec.get_string()! timezone := dec.get_string()!
return CalendarEvent{ return CalendarEvent{
// Base fields // Base fields
id: id id: id
name: name name: name
description: description description: description
created_at: created_at created_at: created_at
updated_at: updated_at updated_at: updated_at
securitypolicy: securitypolicy securitypolicy: securitypolicy
tags: tags tags: tags
comments: comments comments: comments
// CalendarEvent specific fields // CalendarEvent specific fields
title: title title: title
start_time: start_time start_time: start_time
end_time: end_time end_time: end_time
location: location location: location
attendees: attendees attendees: attendees
fs_items: fs_items fs_items: fs_items
calendar_id: calendar_id calendar_id: calendar_id
status: status status: status
is_all_day: is_all_day is_all_day: is_all_day
is_recurring: is_recurring is_recurring: is_recurring
recurrence: recurrence recurrence: recurrence
reminder_mins: reminder_mins reminder_mins: reminder_mins
color: color color: color
timezone: timezone timezone: timezone
} }
} }

View File

@@ -8,57 +8,57 @@ import json
@[heap] @[heap]
pub struct ChatGroup { pub struct ChatGroup {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
description string description string
group_id string // Associated group for permissions group_id string // Associated group for permissions
chat_type ChatType chat_type ChatType
messages []string // IDs of chat messages messages []string // IDs of chat messages
created_at i64 created_at i64
updated_at i64 updated_at i64
last_activity i64 last_activity i64
is_archived bool is_archived bool
tags []string tags []string
} }
pub enum ChatType { pub enum ChatType {
public_channel public_channel
private_channel private_channel
direct_message direct_message
group_message group_message
} }
pub fn (mut c ChatGroup) calculate_id() { pub fn (mut c ChatGroup) calculate_id() {
content := json.encode(ChatGroupContent{ content := json.encode(ChatGroupContent{
name: c.name name: c.name
description: c.description description: c.description
group_id: c.group_id group_id: c.group_id
chat_type: c.chat_type chat_type: c.chat_type
is_archived: c.is_archived is_archived: c.is_archived
tags: c.tags tags: c.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
c.id = hash.hex()[..48] c.id = hash.hex()[..48]
} }
struct ChatGroupContent { struct ChatGroupContent {
name string name string
description string description string
group_id string group_id string
chat_type ChatType chat_type ChatType
is_archived bool is_archived bool
tags []string tags []string
} }
pub fn new_chat_group(name string, group_id string, chat_type ChatType) ChatGroup { pub fn new_chat_group(name string, group_id string, chat_type ChatType) ChatGroup {
mut chat_group := ChatGroup{ mut chat_group := ChatGroup{
name: name name: name
group_id: group_id group_id: group_id
chat_type: chat_type chat_type: chat_type
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
last_activity: time.now().unix() last_activity: time.now().unix()
} }
chat_group.calculate_id() chat_group.calculate_id()
return chat_group return chat_group
} }

View File

@@ -8,97 +8,97 @@ import json
@[heap] @[heap]
pub struct ChatMessage { pub struct ChatMessage {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
content string content string
chat_group_id string // Associated chat group chat_group_id string // Associated chat group
sender_id string // User ID of sender sender_id string // User ID of sender
parent_messages []MessageLink // Referenced/replied messages parent_messages []MessageLink // Referenced/replied messages
fs_files []string // IDs of linked files fs_files []string // IDs of linked files
message_type MessageType message_type MessageType
status MessageStatus status MessageStatus
created_at i64 created_at i64
updated_at i64 updated_at i64
edited_at i64 edited_at i64
deleted_at i64 deleted_at i64
reactions []MessageReaction reactions []MessageReaction
mentions []string // User IDs mentioned in message mentions []string // User IDs mentioned in message
tags []string tags []string
} }
pub struct MessageLink { pub struct MessageLink {
pub mut: pub mut:
message_id string message_id string
link_type MessageLinkType link_type MessageLinkType
} }
pub enum MessageLinkType { pub enum MessageLinkType {
reply reply
reference reference
forward forward
quote quote
} }
pub enum MessageType { pub enum MessageType {
text text
image image
file file
voice voice
video video
system system
announcement announcement
} }
pub enum MessageStatus { pub enum MessageStatus {
sent sent
delivered delivered
read read
failed failed
deleted deleted
} }
pub struct MessageReaction { pub struct MessageReaction {
pub mut: pub mut:
user_id string user_id string
emoji string emoji string
timestamp i64 timestamp i64
} }
pub fn (mut m ChatMessage) calculate_id() { pub fn (mut m ChatMessage) calculate_id() {
content := json.encode(MessageContent{ content := json.encode(MessageContent{
content: m.content content: m.content
chat_group_id: m.chat_group_id chat_group_id: m.chat_group_id
sender_id: m.sender_id sender_id: m.sender_id
parent_messages: m.parent_messages parent_messages: m.parent_messages
fs_files: m.fs_files fs_files: m.fs_files
message_type: m.message_type message_type: m.message_type
mentions: m.mentions mentions: m.mentions
tags: m.tags tags: m.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
m.id = hash.hex()[..48] m.id = hash.hex()[..48]
} }
struct MessageContent { struct MessageContent {
content string content string
chat_group_id string chat_group_id string
sender_id string sender_id string
parent_messages []MessageLink parent_messages []MessageLink
fs_files []string fs_files []string
message_type MessageType message_type MessageType
mentions []string mentions []string
tags []string tags []string
} }
pub fn new_chat_message(content string, chat_group_id string, sender_id string) ChatMessage { pub fn new_chat_message(content string, chat_group_id string, sender_id string) ChatMessage {
mut message := ChatMessage{ mut message := ChatMessage{
content: content content: content
chat_group_id: chat_group_id chat_group_id: chat_group_id
sender_id: sender_id sender_id: sender_id
message_type: .text message_type: .text
status: .sent status: .sent
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
} }
message.calculate_id() message.calculate_id()
return message return message
} }

View File

@@ -0,0 +1,117 @@
module heromodels
import freeflowuniverse.herolib.data.encoder
import crypto.md5
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.ourtime
@[heap]
pub struct Comment {
Base
pub mut:
// id u32
comment string
parent u32 //id of parent comment if any, 0 means none
updated_at i64
author u32 //links to user
}
pub fn (self Comment) type_name() string {
return 'comments'
}
pub fn (self Comment) load(data []u8) !Comment {
return comment_load(data)!
}
pub fn (self Comment) dump() ![]u8{
// Create a new encoder
mut e := encoder.new()
e.add_u8(1)
e.add_u32(self.id)
e.add_string(self.comment)
e.add_u32(self.parent)
e.add_i64(self.updated_at)
e.add_u32(self.author)
return e.data
}
pub fn comment_load(data []u8) !Comment{
// Create a new decoder
mut e := encoder.decoder_new(data)
version := e.get_u8()!
if version != 1 {
panic("wrong version in comment load")
}
mut comment := Comment{}
comment.id = e.get_u32()!
comment.comment = e.get_string()!
comment.parent = e.get_u32()!
comment.updated_at = e.get_i64()!
comment.author = e.get_u32()!
return comment
}
pub struct CommentArg {
pub mut:
comment string
parent u32
author u32
}
pub fn comment_multiset(args []CommentArg) ![]u32 {
return comments2ids(args)!
}
pub fn comments2ids(args []CommentArg) ![]u32 {
return args.map(comment2id(it.comment)!)
}
pub fn comment2id(comment string) !u32 {
comment_fixed := comment.to_lower_ascii().trim_space()
mut redis := redisclient.core_get()!
return if comment_fixed.len > 0{
hash := md5.hexhash(comment_fixed)
comment_found := redis.hget("db:comments", hash)!
if comment_found == ""{
id := u32(redis.incr("db:comments:id")!)
redis.hset("db:comments", hash, id.str())!
redis.hset("db:comments", id.str(), comment_fixed)!
id
}else{
comment_found.u32()
}
} else { 0 }
}
//get new comment, not from the DB
pub fn comment_new(args CommentArg) !Comment{
mut o := Comment {
comment: args.comment
parent: args.parent
updated_at: ourtime.now().unix()
author: args.author
}
return o
}
pub fn comment_set(args CommentArg) !u32{
mut o := comment_new(args)!
// Use openrpcserver set function which now returns the ID
return set[Comment](mut o)!
}
pub fn comment_delete(id u32) ! {
delete[Comment](id)!
}
pub fn comment_exist(id u32) !bool{
return exists[Comment](id)!
}
pub fn comment_get(id u32) !Comment{
return get[Comment](id)!
}

View File

@@ -4,12 +4,13 @@ import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.encoder import freeflowuniverse.herolib.data.encoder
pub fn set[T](mut obj_ T) !u32 { pub fn set[T](mut obj_ T) !u32 {
// mut obj_ := T{...obj}
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
id := u32(redis.llen(db_name[T]())!) id := u32(redis.llen(db_name[T]()) or {0})
obj_.id = id obj_.id = id
// data := encoder.encode(obj_)! data := encoder.encode(obj_) or {
redis.hset(db_name[T](),id.str(),'data.bytestr()')! return err
}
redis.hset(db_name[T](),id.str(),data.bytestr())!
return id return id
} }
@@ -40,11 +41,11 @@ pub fn list[T]() ![]T {
return result return result
} }
//make it easy to get a base object // make it easy to get a base object
pub fn new_from_base[T](args BaseArgs) !Base { pub fn new_from_base[T](args BaseArgs) !Base {
return T { Base: new_base(args)! } return T { Base: new_base(args)! }
} }
fn db_name[T]() string { fn db_name[T]() string {
return "db:${T.name}" return "db:${T.name}"
} }

View File

@@ -0,0 +1,93 @@
module heromodels
import crypto.md5
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.ourtime
// Group represents a collection of users with roles and permissions
@[heap]
pub struct Base {
pub mut:
id u32
name string
description string
created_at i64
updated_at i64
securitypolicy u32
tags u32 //when we set/get we always do as []string but this can then be sorted and md5ed this gies the unique id of tags
comments []u32
}
@[heap]
pub struct SecurityPolicy {
pub mut:
id u32
read []u32 //links to users & groups
write []u32 //links to users & groups
delete []u32 //links to users & groups
public bool
md5 string //this sorts read, write and delete u32 + hash, then do md5 hash, this allows to go from a random read/write/delete/public config to a hash
}
@[heap]
pub struct Tags {
pub mut:
id u32
names []string //unique per id
md5 string //of sorted names, to make easy to find unique id, each name lowercased and made ascii
}
/////////////////
@[params]
pub struct BaseArgs {
pub mut:
id ?u32
name string
description string
securitypolicy ?u32
tags []string
comments []CommentArg
}
//make it easy to get a base object
pub fn new_base(args BaseArgs) !Base {
mut redis := redisclient.core_get()!
commentids:=comment_multiset(args.comments)!
tags:=tags2id(args.tags)!
return Base {
id: args.id or { 0 }
name: args.name
description: args.description
created_at: ourtime.now().unix()
updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 }
tags: tags
comments: commentids
}
}
pub fn tags2id(tags []string) !u32 {
mut redis := redisclient.core_get()!
return if tags.len>0{
mut tags_fixed := tags.map(it.to_lower_ascii().trim_space()).filter(it != "")
tags_fixed.sort_ignore_case()
hash :=md5.hexhash(tags_fixed.join(","))
tags_found := redis.hget("db:tags", hash)!
return if tags_found == ""{
id := u32(redis.incr("db:tags:id")!)
redis.hset("db:tags", hash, id.str())!
redis.hset("db:tags", id.str(), tags_fixed.join(","))!
id
}else{
tags_found.u32()
}
} else {
0
}
}

View File

@@ -5,7 +5,6 @@ import json
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.hero.heromodels.openrpc import freeflowuniverse.herolib.hero.heromodels.openrpc
fn send_request(mut conn unix.StreamConn, request openrpc.JsonRpcRequest) ! { fn send_request(mut conn unix.StreamConn, request openrpc.JsonRpcRequest) ! {
request_json := json.encode(request) request_json := json.encode(request)
conn.write_string(request_json)! conn.write_string(request_json)!
@@ -31,9 +30,9 @@ console.print_item('Connected to server')
console.print_header('Test 1: Discover OpenRPC Specification') console.print_header('Test 1: Discover OpenRPC Specification')
discover_request := openrpc.JsonRpcRequest{ discover_request := openrpc.JsonRpcRequest{
jsonrpc: '2.0' jsonrpc: '2.0'
method: 'discover' method: 'discover'
params: 'null' params: 'null'
id: '1' id: '1'
} }
send_request(mut conn, discover_request)! send_request(mut conn, discover_request)!
@@ -46,9 +45,9 @@ comment_json := '{"comment": "This is a test comment from OpenRPC client", "pare
create_request := openrpc.JsonRpcRequest{ create_request := openrpc.JsonRpcRequest{
jsonrpc: '2.0' jsonrpc: '2.0'
method: 'comment_set' method: 'comment_set'
params: comment_json params: comment_json
id: '2' id: '2'
} }
send_request(mut conn, create_request)! send_request(mut conn, create_request)!
@@ -59,9 +58,9 @@ console.print_item('Comment created: ${create_response}')
console.print_header('Test 3: List All Comments') console.print_header('Test 3: List All Comments')
list_request := openrpc.JsonRpcRequest{ list_request := openrpc.JsonRpcRequest{
jsonrpc: '2.0' jsonrpc: '2.0'
method: 'comment_list' method: 'comment_list'
params: 'null' params: 'null'
id: '3' id: '3'
} }
send_request(mut conn, list_request)! send_request(mut conn, list_request)!
@@ -74,9 +73,9 @@ get_args_json := '{"author": 1}'
get_request := openrpc.JsonRpcRequest{ get_request := openrpc.JsonRpcRequest{
jsonrpc: '2.0' jsonrpc: '2.0'
method: 'comment_get' method: 'comment_get'
params: get_args_json params: get_args_json
id: '4' id: '4'
} }
send_request(mut conn, get_request)! send_request(mut conn, get_request)!
@@ -84,5 +83,3 @@ get_response := read_response(mut conn)!
console.print_item('Comments by author: ${get_response}') console.print_item('Comments by author: ${get_response}')
console.print_header('All tests completed successfully!') console.print_header('All tests completed successfully!')

View File

@@ -1,6 +1,5 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
// Create a user // Create a user
mut user := new_user('John Doe', 'john@example.com') mut user := new_user('John Doe', 'john@example.com')
@@ -18,7 +17,8 @@ mut issue := new_project_issue('Fix login bug', project.id, user.id, .bug)
mut calendar := new_calendar('Team Calendar', group.id) mut calendar := new_calendar('Team Calendar', group.id)
// Create an event // Create an event
mut event := new_calendar_event('Sprint Planning', 1672531200, 1672534800, calendar.id, user.id) mut event := new_calendar_event('Sprint Planning', 1672531200, 1672534800, calendar.id,
user.id)
calendar.add_event(event.id) calendar.add_event(event.id)
// Create a filesystem // Create a filesystem
@@ -34,4 +34,4 @@ println('Issue ID: ${issue.id}')
println('Calendar ID: ${calendar.id}') println('Calendar ID: ${calendar.id}')
println('Event ID: ${event.id}') println('Event ID: ${event.id}')
println('Filesystem ID: ${fs.id}') println('Filesystem ID: ${fs.id}')
println('Blob ID: ${blob.id}') println('Blob ID: ${blob.id}')

View File

@@ -20,4 +20,4 @@ console.print_item('Press Ctrl+C to stop the server')
// Keep the main thread alive // Keep the main thread alive
for { for {
time.sleep(1 * time.second) time.sleep(1 * time.second)
} }

View File

@@ -8,45 +8,45 @@ import json
@[heap] @[heap]
pub struct Fs { pub struct Fs {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
description string description string
group_id string // Associated group for permissions group_id string // Associated group for permissions
root_dir_id string // ID of root directory root_dir_id string // ID of root directory
created_at i64 created_at i64
updated_at i64 updated_at i64
quota_bytes i64 // Storage quota in bytes quota_bytes i64 // Storage quota in bytes
used_bytes i64 // Current usage in bytes used_bytes i64 // Current usage in bytes
tags []string tags []string
} }
pub fn (mut f Fs) calculate_id() { pub fn (mut f Fs) calculate_id() {
content := json.encode(FsContent{ content := json.encode(FsContent{
name: f.name name: f.name
description: f.description description: f.description
group_id: f.group_id group_id: f.group_id
quota_bytes: f.quota_bytes quota_bytes: f.quota_bytes
tags: f.tags tags: f.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
f.id = hash.hex()[..48] f.id = hash.hex()[..48]
} }
struct FsContent { struct FsContent {
name string name string
description string description string
group_id string group_id string
quota_bytes i64 quota_bytes i64
tags []string tags []string
} }
pub fn new_fs(name string, group_id string) Fs { pub fn new_fs(name string, group_id string) Fs {
mut fs := Fs{ mut fs := Fs{
name: name name: name
group_id: group_id group_id: group_id
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
} }
fs.calculate_id() fs.calculate_id()
return fs return fs
} }

View File

@@ -7,35 +7,35 @@ import crypto.blake3
@[heap] @[heap]
pub struct FsBlob { pub struct FsBlob {
pub mut: pub mut:
id string // blake192 hash of content id string // blake192 hash of content
data []u8 // Binary data (max 1MB) data []u8 // Binary data (max 1MB)
size_bytes int // Size in bytes size_bytes int // Size in bytes
created_at i64 created_at i64
mime_type string mime_type string
encoding string // e.g., "gzip", "none" encoding string // e.g., "gzip", "none"
} }
pub fn (mut b FsBlob) calculate_id() { pub fn (mut b FsBlob) calculate_id() {
hash := blake3.sum256(b.data) hash := blake3.sum256(b.data)
b.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars b.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
} }
pub fn new_fs_blob(data []u8) !FsBlob { pub fn new_fs_blob(data []u8) !FsBlob {
if data.len > 1024 * 1024 { // 1MB limit if data.len > 1024 * 1024 { // 1MB limit
return error('Blob size exceeds 1MB limit') return error('Blob size exceeds 1MB limit')
} }
mut blob := FsBlob{ mut blob := FsBlob{
data: data data: data
size_bytes: data.len size_bytes: data.len
created_at: time.now().unix() created_at: time.now().unix()
encoding: 'none' encoding: 'none'
} }
blob.calculate_id() blob.calculate_id()
return blob return blob
} }
pub fn (b FsBlob) verify_integrity() bool { pub fn (b FsBlob) verify_integrity() bool {
hash := blake3.sum256(b.data) hash := blake3.sum256(b.data)
return hash.hex()[..48] == b.id return hash.hex()[..48] == b.id
} }

View File

@@ -8,46 +8,46 @@ import json
@[heap] @[heap]
pub struct FsDir { pub struct FsDir {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
fs_id string // Associated filesystem fs_id string // Associated filesystem
parent_id string // Parent directory ID (empty for root) parent_id string // Parent directory ID (empty for root)
group_id string // Associated group for permissions group_id string // Associated group for permissions
children []string // Child directory and file IDs children []string // Child directory and file IDs
created_at i64 created_at i64
updated_at i64 updated_at i64
tags []string tags []string
} }
pub fn (mut d FsDir) calculate_id() { pub fn (mut d FsDir) calculate_id() {
content := json.encode(DirContent{ content := json.encode(DirContent{
name: d.name name: d.name
fs_id: d.fs_id fs_id: d.fs_id
parent_id: d.parent_id parent_id: d.parent_id
group_id: d.group_id group_id: d.group_id
tags: d.tags tags: d.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
d.id = hash.hex()[..48] d.id = hash.hex()[..48]
} }
struct DirContent { struct DirContent {
name string name string
fs_id string fs_id string
parent_id string parent_id string
group_id string group_id string
tags []string tags []string
} }
pub fn new_fs_dir(name string, fs_id string, parent_id string, group_id string) FsDir { pub fn new_fs_dir(name string, fs_id string, parent_id string, group_id string) FsDir {
mut dir := FsDir{ mut dir := FsDir{
name: name name: name
fs_id: fs_id fs_id: fs_id
parent_id: parent_id parent_id: parent_id
group_id: group_id group_id: group_id
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
} }
dir.calculate_id() dir.calculate_id()
return dir return dir
} }

View File

@@ -8,58 +8,58 @@ import json
@[heap] @[heap]
pub struct FsFile { pub struct FsFile {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
fs_id string // Associated filesystem fs_id string // Associated filesystem
directories []string // Directory IDs where this file exists directories []string // Directory IDs where this file exists
blobs []string // Blake192 IDs of file content blobs blobs []string // Blake192 IDs of file content blobs
size_bytes i64 // Total file size size_bytes i64 // Total file size
mime_type string mime_type string
checksum string // Overall file checksum checksum string // Overall file checksum
created_at i64 created_at i64
updated_at i64 updated_at i64
accessed_at i64 accessed_at i64
tags []string tags []string
metadata map[string]string // Custom metadata metadata map[string]string // Custom metadata
} }
pub fn (mut f FsFile) calculate_id() { pub fn (mut f FsFile) calculate_id() {
content := json.encode(FileContent{ content := json.encode(FileContent{
name: f.name name: f.name
fs_id: f.fs_id fs_id: f.fs_id
directories: f.directories directories: f.directories
blobs: f.blobs blobs: f.blobs
size_bytes: f.size_bytes size_bytes: f.size_bytes
mime_type: f.mime_type mime_type: f.mime_type
checksum: f.checksum checksum: f.checksum
tags: f.tags tags: f.tags
metadata: f.metadata metadata: f.metadata
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
f.id = hash.hex()[..48] f.id = hash.hex()[..48]
} }
struct FileContent { struct FileContent {
name string name string
fs_id string fs_id string
directories []string directories []string
blobs []string blobs []string
size_bytes i64 size_bytes i64
mime_type string mime_type string
checksum string checksum string
tags []string tags []string
metadata map[string]string metadata map[string]string
} }
pub fn new_fs_file(name string, fs_id string, directories []string) FsFile { pub fn new_fs_file(name string, fs_id string, directories []string) FsFile {
mut file := FsFile{ mut file := FsFile{
name: name name: name
fs_id: fs_id fs_id: fs_id
directories: directories directories: directories
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
accessed_at: time.now().unix() accessed_at: time.now().unix()
} }
file.calculate_id() file.calculate_id()
return file return file
} }

View File

@@ -8,54 +8,54 @@ import json
@[heap] @[heap]
pub struct FsSymlink { pub struct FsSymlink {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
fs_id string // Associated filesystem fs_id string // Associated filesystem
parent_id string // Parent directory ID parent_id string // Parent directory ID
target_id string // ID of target file or directory target_id string // ID of target file or directory
target_type SymlinkTargetType target_type SymlinkTargetType
created_at i64 created_at i64
updated_at i64 updated_at i64
tags []string tags []string
} }
pub enum SymlinkTargetType { pub enum SymlinkTargetType {
file file
directory directory
} }
pub fn (mut s FsSymlink) calculate_id() { pub fn (mut s FsSymlink) calculate_id() {
content := json.encode(SymlinkContent{ content := json.encode(SymlinkContent{
name: s.name name: s.name
fs_id: s.fs_id fs_id: s.fs_id
parent_id: s.parent_id parent_id: s.parent_id
target_id: s.target_id target_id: s.target_id
target_type: s.target_type target_type: s.target_type
tags: s.tags tags: s.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
s.id = hash.hex()[..48] s.id = hash.hex()[..48]
} }
struct SymlinkContent { struct SymlinkContent {
name string name string
fs_id string fs_id string
parent_id string parent_id string
target_id string target_id string
target_type SymlinkTargetType target_type SymlinkTargetType
tags []string tags []string
} }
pub fn new_fs_symlink(name string, fs_id string, parent_id string, target_id string, target_type SymlinkTargetType) FsSymlink { pub fn new_fs_symlink(name string, fs_id string, parent_id string, target_id string, target_type SymlinkTargetType) FsSymlink {
mut symlink := FsSymlink{ mut symlink := FsSymlink{
name: name name: name
fs_id: fs_id fs_id: fs_id
parent_id: parent_id parent_id: parent_id
target_id: target_id target_id: target_id
target_type: target_type target_type: target_type
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
} }
symlink.calculate_id() symlink.calculate_id()
return symlink return symlink
} }

View File

@@ -8,74 +8,74 @@ import json
@[heap] @[heap]
pub struct Group { pub struct Group {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
description string description string
members []GroupMember members []GroupMember
subgroups []string // IDs of child groups subgroups []string // IDs of child groups
parent_group string // ID of parent group parent_group string // ID of parent group
created_at i64 created_at i64
updated_at i64 updated_at i64
is_public bool is_public bool
tags []string tags []string
} }
pub struct GroupMember { pub struct GroupMember {
pub mut: pub mut:
user_id string user_id string
role GroupRole role GroupRole
joined_at i64 joined_at i64
} }
pub enum GroupRole { pub enum GroupRole {
reader reader
writer writer
admin admin
owner owner
} }
pub fn (mut g Group) calculate_id() { pub fn (mut g Group) calculate_id() {
content := json.encode(GroupContent{ content := json.encode(GroupContent{
name: g.name name: g.name
description: g.description description: g.description
members: g.members members: g.members
subgroups: g.subgroups subgroups: g.subgroups
parent_group: g.parent_group parent_group: g.parent_group
is_public: g.is_public is_public: g.is_public
tags: g.tags tags: g.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
g.id = hash.hex()[..48] g.id = hash.hex()[..48]
} }
struct GroupContent { struct GroupContent {
name string name string
description string description string
members []GroupMember members []GroupMember
subgroups []string subgroups []string
parent_group string parent_group string
is_public bool is_public bool
tags []string tags []string
} }
pub fn new_group(name string, description string) Group { pub fn new_group(name string, description string) Group {
mut group := Group{ mut group := Group{
name: name name: name
description: description description: description
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
is_public: false is_public: false
} }
group.calculate_id() group.calculate_id()
return group return group
} }
pub fn (mut g Group) add_member(user_id string, role GroupRole) { pub fn (mut g Group) add_member(user_id string, role GroupRole) {
g.members << GroupMember{ g.members << GroupMember{
user_id: user_id user_id: user_id
role: role role: role
joined_at: time.now().unix() joined_at: time.now().unix()
} }
g.updated_at = time.now().unix() g.updated_at = time.now().unix()
g.calculate_id() g.calculate_id()
} }

View File

@@ -44,4 +44,4 @@ import freeflowuniverse.herolib.schemas.openrpc
// pub fn new_base(args BaseArgs) !Base { // pub fn new_base(args BaseArgs) !Base {
// return openrpcserver.new_base(args)! // return openrpcserver.new_base(args)!
// } // }

View File

@@ -8,80 +8,80 @@ import json
@[heap] @[heap]
pub struct Project { pub struct Project {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
description string description string
group_id string // Associated group for permissions group_id string // Associated group for permissions
swimlanes []Swimlane swimlanes []Swimlane
milestones []Milestone milestones []Milestone
issues []string // IDs of project issues issues []string // IDs of project issues
fs_files []string // IDs of linked files fs_files []string // IDs of linked files
status ProjectStatus status ProjectStatus
start_date i64 start_date i64
end_date i64 end_date i64
created_at i64 created_at i64
updated_at i64 updated_at i64
tags []string tags []string
} }
pub struct Swimlane { pub struct Swimlane {
pub mut: pub mut:
id string id string
name string name string
description string description string
order int order int
color string color string
is_done bool is_done bool
} }
pub struct Milestone { pub struct Milestone {
pub mut: pub mut:
id string id string
name string name string
description string description string
due_date i64 due_date i64
completed bool completed bool
issues []string // IDs of issues in this milestone issues []string // IDs of issues in this milestone
} }
pub enum ProjectStatus { pub enum ProjectStatus {
planning planning
active active
on_hold on_hold
completed completed
cancelled cancelled
} }
pub fn (mut p Project) calculate_id() { pub fn (mut p Project) calculate_id() {
content := json.encode(ProjectContent{ content := json.encode(ProjectContent{
name: p.name name: p.name
description: p.description description: p.description
group_id: p.group_id group_id: p.group_id
swimlanes: p.swimlanes swimlanes: p.swimlanes
milestones: p.milestones milestones: p.milestones
issues: p.issues issues: p.issues
fs_files: p.fs_files fs_files: p.fs_files
status: p.status status: p.status
start_date: p.start_date start_date: p.start_date
end_date: p.end_date end_date: p.end_date
tags: p.tags tags: p.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
p.id = hash.hex()[..48] p.id = hash.hex()[..48]
} }
struct ProjectContent { struct ProjectContent {
name string name string
description string description string
group_id string group_id string
swimlanes []Swimlane swimlanes []Swimlane
milestones []Milestone milestones []Milestone
issues []string issues []string
fs_files []string fs_files []string
status ProjectStatus status ProjectStatus
start_date i64 start_date i64
end_date i64 end_date i64
tags []string tags []string
} }
pub struct NewProject { pub struct NewProject {
@@ -107,4 +107,4 @@ pub fn new_project(params NewProject) !Project {
} }
project.calculate_id() project.calculate_id()
return project return project
} }

View File

@@ -8,109 +8,109 @@ import json
@[heap] @[heap]
pub struct ProjectIssue { pub struct ProjectIssue {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
title string title string
description string description string
project_id string // Associated project project_id string // Associated project
issue_type IssueType issue_type IssueType
priority IssuePriority priority IssuePriority
status IssueStatus status IssueStatus
swimlane_id string // Current swimlane swimlane_id string // Current swimlane
assignees []string // User IDs assignees []string // User IDs
reporter string // User ID who created the issue reporter string // User ID who created the issue
milestone_id string // Associated milestone milestone_id string // Associated milestone
deadline i64 // Unix timestamp deadline i64 // Unix timestamp
estimate int // Story points or hours estimate int // Story points or hours
fs_files []string // IDs of linked files fs_files []string // IDs of linked files
parent_id string // Parent issue ID (for sub-tasks) parent_id string // Parent issue ID (for sub-tasks)
children []string // Child issue IDs children []string // Child issue IDs
created_at i64 created_at i64
updated_at i64 updated_at i64
tags []string tags []string
} }
pub enum IssueType { pub enum IssueType {
task task
story story
bug bug
question question
epic epic
subtask subtask
} }
pub enum IssuePriority { pub enum IssuePriority {
lowest lowest
low low
medium medium
high high
highest highest
critical critical
} }
pub enum IssueStatus { pub enum IssueStatus {
open open
in_progress in_progress
blocked blocked
review review
testing testing
done done
closed closed
} }
pub fn (mut i ProjectIssue) calculate_id() { pub fn (mut i ProjectIssue) calculate_id() {
content := json.encode(IssueContent{ content := json.encode(IssueContent{
title: i.title title: i.title
description: i.description description: i.description
project_id: i.project_id project_id: i.project_id
issue_type: i.issue_type issue_type: i.issue_type
priority: i.priority priority: i.priority
status: i.status status: i.status
swimlane_id: i.swimlane_id swimlane_id: i.swimlane_id
assignees: i.assignees assignees: i.assignees
reporter: i.reporter reporter: i.reporter
milestone_id: i.milestone_id milestone_id: i.milestone_id
deadline: i.deadline deadline: i.deadline
estimate: i.estimate estimate: i.estimate
fs_files: i.fs_files fs_files: i.fs_files
parent_id: i.parent_id parent_id: i.parent_id
children: i.children children: i.children
tags: i.tags tags: i.tags
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
i.id = hash.hex()[..48] i.id = hash.hex()[..48]
} }
struct IssueContent { struct IssueContent {
title string title string
description string description string
project_id string project_id string
issue_type IssueType issue_type IssueType
priority IssuePriority priority IssuePriority
status IssueStatus status IssueStatus
swimlane_id string swimlane_id string
assignees []string assignees []string
reporter string reporter string
milestone_id string milestone_id string
deadline i64 deadline i64
estimate int estimate int
fs_files []string fs_files []string
parent_id string parent_id string
children []string children []string
tags []string tags []string
} }
pub fn new_project_issue(title string, project_id string, reporter string, issue_type IssueType) ProjectIssue { pub fn new_project_issue(title string, project_id string, reporter string, issue_type IssueType) ProjectIssue {
mut issue := ProjectIssue{ mut issue := ProjectIssue{
title: title title: title
project_id: project_id project_id: project_id
reporter: reporter reporter: reporter
issue_type: issue_type issue_type: issue_type
priority: .medium priority: .medium
status: .open status: .open
swimlane_id: 'todo' swimlane_id: 'todo'
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
} }
issue.calculate_id() issue.calculate_id()
return issue return issue
} }

View File

@@ -8,61 +8,61 @@ import json
@[heap] @[heap]
pub struct User { pub struct User {
pub mut: pub mut:
id string // blake192 hash id string // blake192 hash
name string name string
email string email string
public_key string // for encryption/signing public_key string // for encryption/signing
phone string phone string
address string address string
avatar_url string avatar_url string
bio string bio string
timezone string timezone string
created_at i64 created_at i64
updated_at i64 updated_at i64
status UserStatus status UserStatus
} }
pub enum UserStatus { pub enum UserStatus {
active active
inactive inactive
suspended suspended
pending pending
} }
pub fn (mut u User) calculate_id() { pub fn (mut u User) calculate_id() {
content := json.encode(UserContent{ content := json.encode(UserContent{
name: u.name name: u.name
email: u.email email: u.email
public_key: u.public_key public_key: u.public_key
phone: u.phone phone: u.phone
address: u.address address: u.address
bio: u.bio bio: u.bio
timezone: u.timezone timezone: u.timezone
status: u.status status: u.status
}) })
hash := blake3.sum256(content.bytes()) hash := blake3.sum256(content.bytes())
u.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars u.id = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
} }
struct UserContent { struct UserContent {
name string name string
email string email string
public_key string public_key string
phone string phone string
address string address string
bio string bio string
timezone string timezone string
status UserStatus status UserStatus
} }
pub fn new_user(name string, email string) User { pub fn new_user(name string, email string) User {
mut user := User{ mut user := User{
name: name name: name
email: email email: email
created_at: time.now().unix() created_at: time.now().unix()
updated_at: time.now().unix() updated_at: time.now().unix()
status: .active status: .active
} }
user.calculate_id() user.calculate_id()
return user return user
} }

View File

@@ -6,32 +6,32 @@ import time
@[heap] @[heap]
pub struct VersionHistory { pub struct VersionHistory {
pub mut: pub mut:
current_id string // blake192 hash of current version current_id string // blake192 hash of current version
previous_id string // blake192 hash of previous version previous_id string // blake192 hash of previous version
next_id string // blake192 hash of next version (if exists) next_id string // blake192 hash of next version (if exists)
object_type string // Type of object (User, Group, etc.) object_type string // Type of object (User, Group, etc.)
change_type ChangeType change_type ChangeType
changed_by string // User ID who made the change changed_by string // User ID who made the change
changed_at i64 // Unix timestamp changed_at i64 // Unix timestamp
change_notes string // Optional description of changes change_notes string // Optional description of changes
} }
pub enum ChangeType { pub enum ChangeType {
create create
update update
delete delete
restore restore
} }
pub fn new_version_history(current_id string, previous_id string, object_type string, change_type ChangeType, changed_by string) VersionHistory { pub fn new_version_history(current_id string, previous_id string, object_type string, change_type ChangeType, changed_by string) VersionHistory {
return VersionHistory{ return VersionHistory{
current_id: current_id current_id: current_id
previous_id: previous_id previous_id: previous_id
object_type: object_type object_type: object_type
change_type: change_type change_type: change_type
changed_by: changed_by changed_by: changed_by
changed_at: time.now().unix() changed_at: time.now().unix()
} }
} }
// Database indexes needed: // Database indexes needed:
@@ -39,4 +39,4 @@ pub fn new_version_history(current_id string, previous_id string, object_type st
// - Index on previous_id for walking backward // - Index on previous_id for walking backward
// - Index on next_id for walking forward // - Index on next_id for walking forward
// - Index on object_type for filtering by type // - Index on object_type for filtering by type
// - Index on changed_by for user activity tracking // - Index on changed_by for user activity tracking

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'herorunner'
classname:'HeroRunner'
singleton:0
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:0

View File

@@ -0,0 +1,67 @@
module herorunner
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.installers.ulist
import os
//////////////////// following actions are not specific to instance of the object
fn installed() !bool {
return false
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
fn upload() ! {
}
fn install() ! {
console.print_header('install herorunner')
osal.package_install('crun')!
// osal.exec(
// cmd: '
// '
// stdout: true
// name: 'herorunner_install'
// )!
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("zinit")!
// osal.process_kill_recursive(name:'zinit')!
// osal.cmd_delete('zinit')!
// osal.package_remove('
// podman
// conmon
// buildah
// skopeo
// runc
// ')!
// //will remove all paths where go/bin is found
// osal.profile_path_add_remove(paths2delete:"go/bin")!
// osal.rm("
// podman
// conmon
// buildah
// skopeo
// runc
// /var/lib/containers
// /var/lib/podman
// /var/lib/buildah
// /tmp/podman
// /tmp/conmon
// ")!
}

View File

@@ -0,0 +1,79 @@
module herorunner
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.ui.console
import json
import freeflowuniverse.herolib.osal.startupmanager
__global (
herorunner_global map[string]&HeroRunner
herorunner_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
}
pub fn new(args ArgsGet) !&HeroRunner {
return &HeroRunner{}
}
pub fn get(args ArgsGet) !&HeroRunner {
return new(args)!
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'herorunner.') {
return
}
mut install_actions := plbook.find(filter: 'herorunner.configure')!
if install_actions.len > 0 {
return error("can't configure herorunner, because no configuration allowed for this installer.")
}
mut other_actions := plbook.find(filter: 'herorunner.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action herorunner.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action herorunner.install')
install()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self HeroRunner) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self HeroRunner) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for herorunner
pub fn switch(name string) {
herorunner_default = name
}

View File

@@ -0,0 +1,34 @@
module herorunner
import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.data.encoderhero
import os
pub const version = '0.0.0'
const singleton = false
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct HeroRunner {
pub mut:
name string = 'default'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ HeroRunner) !HeroRunner {
mut mycfg := mycfg_
return mycfg
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_loads(heroscript string) !HeroRunner {
mut obj := encoderhero.decode[HeroRunner](heroscript)!
return obj
}

View File

@@ -0,0 +1,44 @@
# herorunner
To get started
```vlang
import freeflowuniverse.herolib.installers.something.herorunner as herorunner_installer
heroscript:="
!!herorunner.configure name:'test'
password: '1234'
port: 7701
!!herorunner.start name:'test' reset:1
"
herorunner_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= herorunner_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!herorunner.configure
homedir: '/home/user/herorunner'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -1,9 +1,13 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.mcp.aitools.escalayer import freeflowuniverse.herolib.mcp.aitools.escalayer
import freeflowuniverse.herolib.core.redisclient
import os import os
fn main() { fn main() {
// Example of using redisclient module instead of old redis.Connection
redis_example() or { println('Redis example failed: ${err}') }
// Get the current directory where this script is located // Get the current directory where this script is located
current_dir := os.dir(@FILE) current_dir := os.dir(@FILE)
@@ -594,3 +598,64 @@ fn extract_functions_from_code(code string) []string {
return functions return functions
} }
// Example function showing how to use redisclient module instead of old redis.Connection
fn redis_example() ! {
// OLD WAY (don't use this):
// mut conns := []redis.Connection{}
// for s in servers {
// mut c := redis.connect(redis.Options{ server: s }) or {
// panic('could not connect to redis $s: $err')
// }
// conns << c
// }
// NEW WAY using redisclient module:
servers := ['127.0.0.1:6379', '127.0.0.1:6380', '127.0.0.1:6381', '127.0.0.1:6382']
mut redis_clients := []&redisclient.Redis{}
for server in servers {
// Parse server address
redis_url := redisclient.get_redis_url(server) or {
println('Failed to parse Redis URL ${server}: ${err}')
continue
}
// Create Redis client using redisclient module
mut redis_client := redisclient.core_get(redis_url) or {
println('Failed to connect to Redis ${server}: ${err}')
continue
}
// Test the connection
redis_client.ping() or {
println('Failed to ping Redis ${server}: ${err}')
continue
}
redis_clients << redis_client
println('Successfully connected to Redis server: ${server}')
}
// Example usage of Redis operations
if redis_clients.len > 0 {
mut redis := redis_clients[0]
// Set a test key
redis.set('test_key', 'test_value') or {
println('Failed to set test key: ${err}')
return
}
// Get the test key
value := redis.get('test_key') or {
println('Failed to get test key: ${err}')
return
}
println('Redis test successful - key: test_key, value: ${value}')
// Clean up
redis.del('test_key') or { println('Failed to delete test key: ${err}') }
}
}

View File

@@ -3,7 +3,7 @@ module core
import net import net
import time import time
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core import freeflowuniverse.herolib.core as herolib_core
import math import math
import os import os
@@ -18,7 +18,7 @@ pub mut:
// if ping ok, return true // if ping ok, return true
pub fn ping(args PingArgs) !bool { pub fn ping(args PingArgs) !bool {
platform_ := core.platform()! platform_ := herolib_core.platform()!
mut cmd := 'ping' mut cmd := 'ping'
if args.address.contains(':') { if args.address.contains(':') {
cmd = 'ping6' cmd = 'ping6'
@@ -127,6 +127,64 @@ pub fn tcp_port_test(args TcpPortTestArgs) bool {
return false return false
} }
// return in milliseconds
pub fn http_ping(args TcpPortTestArgs) !int {
start_time := time.now().unix_milli()
// Try to establish TCP connection
console.print_debug('Pinging HTTP server at ${args.address}:${args.port}...')
mut sock := net.dial_tcp('${args.address}:${args.port}') or {
return error('failed to establish TCP connection to ${args.address}:${args.port}')
}
console.print_debug('TCP connection established to ${args.address}:${args.port}')
// Send a simple HTTP GET request
http_request := 'GET / HTTP/1.1\r\nHost: ${args.address}\r\nConnection: close\r\n\r\n'
sock.write_string(http_request) or {
sock.close()!
return error('failed to send HTTP request to ${args.address}:${args.port}')
}
console.print_debug('HTTP request sent to ${args.address}:${args.port}')
// Read response (at least some bytes to confirm it's an HTTP server)
mut buf := []u8{len: 1024}
_ = sock.read(mut buf) or {
sock.close()!
return error('failed to read HTTP response from ${args.address}:${args.port}')
}
console.print_debug('HTTP response received from ${args.address}:${args.port}')
sock.close()!
console.print_debug('TCP connection closed for ${args.address}:${args.port}')
// Calculate and return the round-trip time
end_time := time.now().unix_milli()
return int(end_time - start_time)
}
// Wait until a web server responds properly to HTTP requests
// Returns true when the server is responding, false on timeout
pub fn http_wait(args TcpPortTestArgs) bool {
start_time := time.now().unix_milli()
mut run_time := 0.0
for true {
run_time = time.now().unix_milli()
if run_time > start_time + args.timeout {
return false
}
// Try to ping the HTTP server
_ = http_ping(args) or {
// If http_ping fails, it means the server is not responding properly yet
time.sleep(100 * time.millisecond)
continue
}
// If http_ping succeeds, the server is responding properly
return true
}
return false
}
// Returns the public IP address as known on the public side // Returns the public IP address as known on the public side
// Uses resolver4.opendns.com to fetch the IP address // Uses resolver4.opendns.com to fetch the IP address
pub fn ipaddr_pub_get() !string { pub fn ipaddr_pub_get() !string {
@@ -238,13 +296,14 @@ fn ssh_testrun_internal(args TcpPortTestArgs) !(string, SSHResult) {
res := exec(cmd: cmd, ignore_error: true, stdout: false, debug: false)! res := exec(cmd: cmd, ignore_error: true, stdout: false, debug: false)!
// console.print_debug('ssh test ${res.exit_code}: ===== cmd:\n${cmd}\n=====\n${res.output}') // console.print_debug('ssh test ${res.exit_code}: ===== cmd:\n${cmd}\n=====\n${res.output}')
res_output := res.output
if res.exit_code == 0 { if res.exit_code == 0 {
return res.output, SSHResult.ok return res_output, SSHResult.ok
} else if res.exit_code == 1 { } else if res.exit_code == 1 {
return res.output, SSHResult.tcpport return res_output, SSHResult.ssh
} else if res.exit_code == 2 { } else if res.exit_code == 2 {
return res.output, SSHResult.ping return res_output, SSHResult.ping
} else { } else {
return res.output, SSHResult.ssh return res_output, SSHResult.ssh
} }
} }

View File

@@ -6,16 +6,16 @@ fn test_ipaddr_pub_get() {
} }
fn test_ping() { fn test_ping() {
x := ping(address: '127.0.0.1', count: 1)! x := ping(address: '127.0.0.1', retry: 1)!
assert x == .ok assert x == true
} }
fn test_ping_timeout() ! { fn test_ping_timeout() ! {
x := ping(address: '192.168.145.154', count: 5, timeout: 1)! x := ping(address: '192.168.145.154', retry: 5, nr_ok: 1)!
assert x == .timeout assert x == false
} }
fn test_ping_unknownhost() ! { fn test_ping_unknownhost() ! {
x := ping(address: '12.902.219.1', count: 1, timeout: 1)! x := ping(address: '12.902.219.1', retry: 1, nr_ok: 1)!
assert x == .unknownhost assert x == false
} }

View File

@@ -3,33 +3,33 @@ module core
import freeflowuniverse.herolib.core import freeflowuniverse.herolib.core
fn test_package_management() { fn test_package_management() {
platform_ := core.platform()! // platform_ := core.platform()!
if platform_ == .osx { // if platform_ == .osx {
// Check if brew is installed // // Check if brew is installed
if !cmd_exists('brew') { // if !cmd_exists('brew') {
eprintln('WARNING: Homebrew is not installed. Please install it to run package management tests on OSX.') // eprintln('WARNING: Homebrew is not installed. Please install it to run package management tests on OSX.')
return // return
} // }
} // }
is_wget_installed := cmd_exists('wget') // is_wget_installed := cmd_exists('wget')
if is_wget_installed { // if is_wget_installed {
// Clean up - remove wget // // Clean up - remove wget
package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' } // package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
assert !cmd_exists('wget') // assert !cmd_exists('wget')
// Reinstalling wget as it was previously installed // // Reinstalling wget as it was previously installed
package_install('wget') or { assert false, 'Failed to install wget: ${err}' } // package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
assert cmd_exists('wget') // assert cmd_exists('wget')
return // return
} // }
// Intstall wget and verify it is installed // // Intstall wget and verify it is installed
package_install('wget') or { assert false, 'Failed to install wget: ${err}' } // package_install('wget') or { assert false, 'Failed to install wget: ${err}' }
assert cmd_exists('wget') // assert cmd_exists('wget')
// Clean up - remove wget // // Clean up - remove wget
package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' } // package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' }
assert !cmd_exists('wget') // assert !cmd_exists('wget')
} }

View File

@@ -1,13 +1,5 @@
module linux module linux
// import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.screen
import os
import time
// import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
@[heap] @[heap]
pub struct LinuxFactory { pub struct LinuxFactory {
pub mut: pub mut:

View File

@@ -15,10 +15,10 @@ pub fn new() ServerManager {
} }
fn (s ServerManager) execute(command string) bool { fn (s ServerManager) execute(command string) bool {
// console.print_debug(command) console.print_debug(command)
r := os.execute(command) r := os.execute(command)
// console.print_debug(r) console.print_debug(r)
return true return true
} }

View File

@@ -1,7 +1,6 @@
module sshagent module sshagent
import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.builder
// Check if SSH agent is properly configured and all is good // Check if SSH agent is properly configured and all is good
pub fn agent_check(mut agent SSHAgent) ! { pub fn agent_check(mut agent SSHAgent) ! {

View File

@@ -58,7 +58,7 @@ pub fn (mut agent SSHAgent) is_agent_responsive() bool {
return res.exit_code == 0 || res.exit_code == 1 // 1 means no keys, but agent is running return res.exit_code == 0 || res.exit_code == 1 // 1 means no keys, but agent is running
} }
// cleanup orphaned ssh-agent processes // cleanup orphaned ssh-agent processes, means all agents for the logged in user
pub fn (mut agent SSHAgent) cleanup_orphaned_agents() ! { pub fn (mut agent SSHAgent) cleanup_orphaned_agents() ! {
user := os.getenv('USER') user := os.getenv('USER')
@@ -77,6 +77,7 @@ pub fn (mut agent SSHAgent) cleanup_orphaned_agents() ! {
} }
} }
} }
$dbg;
} }
// check if specific agent PID is valid and responsive // check if specific agent PID is valid and responsive

View File

@@ -3,40 +3,185 @@ module main
import os import os
import io import io
import freeflowuniverse.herolib.core.logger import freeflowuniverse.herolib.core.logger
import freeflowuniverse.herolib.core.texttools
struct Args {
mut:
logpath string
pane_id string
log bool = true
logreset bool
}
fn main() { fn main() {
if os.args.len < 2 { args := parse_args() or {
eprintln('Usage: tmux_logger <log_path> [pane_id]') eprintln('Error: ${err}')
print_usage()
exit(1) exit(1)
} }
log_path := os.args[1] if !args.log {
// If logging is disabled, just consume stdin and exit
mut reader := io.new_buffered_reader(reader: os.stdin())
for {
reader.read_line() or { break }
}
return
}
mut l := logger.new(path: log_path) or { // Determine the actual log directory path
log_dir_path := determine_log_path(args) or {
eprintln('Error determining log path: ${err}')
exit(1)
}
// Handle log reset if requested
if args.logreset {
reset_logs(log_dir_path) or {
eprintln('Error resetting logs: ${err}')
exit(1)
}
}
// Create logger - the logger factory expects a directory path
mut l := logger.new(path: log_dir_path) or {
eprintln('Failed to create logger: ${err}') eprintln('Failed to create logger: ${err}')
exit(1) exit(1)
} }
// Read from stdin line by line and log with categorization // Read from stdin using a more direct approach that works with tmux pipe-pane
mut reader := io.new_buffered_reader(reader: os.stdin()) // The issue is that tmux pipe-pane sends data differently than regular pipes
mut buffer := []u8{len: 1024}
mut line_buffer := ''
for { for {
line := reader.read_line() or { break } // Read raw bytes from stdin - this is more compatible with tmux pipe-pane
if line.len == 0 { data, bytes_read := os.fd_read(0, buffer.len)
if bytes_read == 0 {
// No data available - for tmux pipe-pane this is normal, continue waiting
continue continue
} }
// Detect output type and set appropriate category // Convert bytes to string and add to line buffer
category, logtype := categorize_output(line) line_buffer += data
// Process complete lines
for line_buffer.contains('\n') {
idx := line_buffer.index('\n') or { break }
line := line_buffer[..idx].trim_space()
line_buffer = line_buffer[idx + 1..]
if line.len == 0 {
continue
}
// Detect output type and set appropriate category
category, logtype := categorize_output(line)
// Log immediately - the logger handles its own file operations
l.log(
cat: category
log: line
logtype: logtype
) or {
eprintln('Failed to log line: ${err}')
continue
}
}
}
// Process any remaining data in the buffer
if line_buffer.trim_space().len > 0 {
line := line_buffer.trim_space()
category, logtype := categorize_output(line)
l.log( l.log(
cat: category cat: category
log: line log: line
logtype: logtype logtype: logtype
) or { ) or { eprintln('Failed to log final line: ${err}') }
eprintln('Failed to log line: ${err}') }
continue }
fn parse_args() !Args {
if os.args.len < 2 {
return error('Missing required argument: logpath')
}
mut args := Args{
logpath: os.args[1]
}
// Parse optional pane_id (second positional argument)
if os.args.len >= 3 {
args.pane_id = os.args[2]
}
// Parse optional flags
for i in 3 .. os.args.len {
arg := os.args[i]
if arg == '--no-log' || arg == '--log=false' {
args.log = false
} else if arg == '--logreset' || arg == '--logreset=true' {
args.logreset = true
} else if arg.starts_with('--log=') {
val := arg.all_after('=').to_lower()
args.log = val == 'true' || val == '1' || val == 'yes'
} else if arg.starts_with('--logreset=') {
val := arg.all_after('=').to_lower()
args.logreset = val == 'true' || val == '1' || val == 'yes'
} }
} }
return args
}
fn determine_log_path(args Args) !string {
mut log_path := args.logpath
// Check if logpath is a directory or file
if os.exists(log_path) && os.is_dir(log_path) {
// It's an existing directory
if args.pane_id == '' {
return error('When logpath is a directory, pane_id must be provided')
}
// Create a subdirectory for this pane
pane_dir := os.join_path(log_path, args.pane_id)
return pane_dir
} else if log_path.contains('.') && !log_path.ends_with('/') {
// It looks like a file path, use parent directory
parent_dir := os.dir(log_path)
return parent_dir
} else {
// It's a directory path (may not exist yet)
if args.pane_id == '' {
return log_path
}
// Create a subdirectory for this pane
pane_dir := os.join_path(log_path, args.pane_id)
return pane_dir
}
}
fn reset_logs(logpath string) ! {
if !os.exists(logpath) {
return
}
if os.is_dir(logpath) {
// Remove all .log files in the directory
files := os.ls(logpath) or { return }
for file in files {
if file.ends_with('.log') {
full_path := os.join_path(logpath, file)
os.rm(full_path) or { eprintln('Warning: Failed to remove ${full_path}: ${err}') }
}
}
} else {
// Remove the specific log file
os.rm(logpath) or { return error('Failed to remove log file ${logpath}: ${err}') }
}
} }
fn categorize_output(line string) (string, logger.LogType) { fn categorize_output(line string) (string, logger.LogType) {
@@ -47,21 +192,41 @@ fn categorize_output(line string) (string, logger.LogType) {
|| line_lower.contains('exception') || line_lower.contains('panic') || line_lower.contains('exception') || line_lower.contains('panic')
|| line_lower.starts_with('e ') || line_lower.contains('fatal') || line_lower.starts_with('e ') || line_lower.contains('fatal')
|| line_lower.contains('critical') { || line_lower.contains('critical') {
return 'error', logger.LogType.error return texttools.expand('error', 10, ' '), logger.LogType.error
} }
// Warning patterns - use .stdout logtype but warning category // Warning patterns - use .stdout logtype but warning category
if line_lower.contains('warning') || line_lower.contains('warn:') if line_lower.contains('warning') || line_lower.contains('warn:')
|| line_lower.contains('deprecated') { || line_lower.contains('deprecated') {
return 'warning', logger.LogType.stdout return texttools.expand('warning', 10, ' '), logger.LogType.stdout
} }
// Info/debug patterns - use .stdout logtype // Info/debug patterns - use .stdout logtype
if line_lower.contains('info:') || line_lower.contains('debug:') if line_lower.contains('info:') || line_lower.contains('debug:')
|| line_lower.starts_with('info ') || line_lower.starts_with('debug ') { || line_lower.starts_with('info ') || line_lower.starts_with('debug ') {
return 'info', logger.LogType.stdout return texttools.expand('info', 10, ' '), logger.LogType.stdout
} }
// Default to stdout category and logtype // Default to stdout category and logtype
return 'stdout', logger.LogType.stdout return texttools.expand('stdout', 10, ' '), logger.LogType.stdout
}
fn print_usage() {
eprintln('Usage: tmux_logger <logpath> [pane_id] [options]')
eprintln('')
eprintln('Arguments:')
eprintln(' logpath Directory or file path where logs will be stored')
eprintln(' pane_id Optional pane identifier (required if logpath is a directory)')
eprintln('')
eprintln('Options:')
eprintln(' --log=true|false Enable/disable logging (default: true)')
eprintln(' --no-log Disable logging (same as --log=false)')
eprintln(' --logreset=true|false Reset existing logs before starting (default: false)')
eprintln(' --logreset Reset existing logs (same as --logreset=true)')
eprintln('')
eprintln('Examples:')
eprintln(' tmux_logger /tmp/logs pane1')
eprintln(' tmux_logger /tmp/logs/session.log')
eprintln(' tmux_logger /tmp/logs pane1 --logreset')
eprintln(' tmux_logger /tmp/logs pane1 --no-log')
} }

View File

@@ -673,7 +673,7 @@ fn play_pane_ensure(mut plbook PlayBook, mut tmux_instance Tmux) ! {
name := p.get('name')! name := p.get('name')!
parsed := parse_pane_name(name)! parsed := parse_pane_name(name)!
cmd := p.get_default('cmd', '')! cmd := p.get_default('cmd', '')!
label := p.get_default('label', '')! // label := p.get_default('label', '')!
// Parse environment variables if provided // Parse environment variables if provided
mut env := map[string]string{} mut env := map[string]string{}
@@ -721,7 +721,28 @@ fn play_pane_ensure(mut plbook PlayBook, mut tmux_instance Tmux) ! {
// Find the target pane (by index, since tmux pane IDs can vary) // Find the target pane (by index, since tmux pane IDs can vary)
if pane_number > 0 && pane_number <= window.panes.len { if pane_number > 0 && pane_number <= window.panes.len {
mut target_pane := window.panes[pane_number - 1] // Convert to 0-based index mut target_pane := window.panes[pane_number - 1] // Convert to 0-based index
target_pane.send_command(cmd)! // Use declarative command logic for intelligent state management
target_pane.send_command_declarative(cmd)!
}
}
// Handle logging parameters - enable logging if requested
log_enabled := p.get_default_false('log')
if log_enabled {
logpath := p.get_default('logpath', '')!
logreset := p.get_default_false('logreset')
// Find the target pane for logging
if pane_number > 0 && pane_number <= window.panes.len {
mut target_pane := window.panes[pane_number - 1] // Convert to 0-based index
// Enable logging with automation (binary compilation, directory creation, etc.)
target_pane.logging_enable(
logpath: logpath
logreset: logreset
) or {
console.print_debug('Warning: Failed to enable logging for pane ${name}: ${err}')
}
} }
} }

View File

@@ -164,6 +164,59 @@ hero run -p <heroscript_file>
label:'editor' // Optional: descriptive label label:'editor' // Optional: descriptive label
cmd:'vim' // Optional: command to run cmd:'vim' // Optional: command to run
env:'EDITOR=vim' // Optional: environment variables env:'EDITOR=vim' // Optional: environment variables
// Multi-line commands are supported using proper heroscript syntax
!!tmux.pane_ensure
name:"mysession|mywindow|2"
label:'setup'
cmd:'
echo "Starting setup..."
mkdir -p /tmp/workspace
cd /tmp/workspace
echo "Setup complete"
'
```
### Multi-line Commands
The tmux module supports multi-line commands in heroscripts using proper multi-line parameter syntax. Multi-line commands are automatically converted to temporary shell scripts for execution.
#### Syntax
Use the multi-line parameter format with quotes:
```heroscript
!!tmux.pane_ensure
name:"session|window|pane"
cmd:'
command1
command2
command3
'
```
#### Features
- **Automatic Script Generation**: Multi-line commands are converted to temporary shell scripts
- **Sequential Execution**: All commands execute in order within the same shell context
- **Error Handling**: Scripts include proper bash shebang and error handling
- **Temporary Files**: Scripts are stored in `/tmp/tmux/{session}/pane_{id}_script.sh`
#### Example
```heroscript
!!tmux.pane_ensure
name:"dev|workspace|1"
label:"setup"
cmd:'
echo "Setting up development environment..."
mkdir -p /tmp/dev_workspace
cd /tmp/dev_workspace
git clone https://github.com/example/repo.git
cd repo
npm install
echo "Development environment ready!"
'
``` ```
### Pane Layout Categories ### Pane Layout Categories

View File

@@ -2,6 +2,7 @@ module tmux
import freeflowuniverse.herolib.osal.core as osal import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.redisclient
// import freeflowuniverse.herolib.session // import freeflowuniverse.herolib.session
import os import os
import time import time
@@ -12,6 +13,7 @@ pub struct Tmux {
pub mut: pub mut:
sessions []&Session sessions []&Session
sessionid string // unique link to job sessionid string // unique link to job
redis &redisclient.Redis @[skip] // Redis client for command state tracking
} }
// get session (session has windows) . // get session (session has windows) .
@@ -82,13 +84,18 @@ pub fn (mut t Tmux) session_create(args SessionCreateArgs) !&Session {
@[params] @[params]
pub struct TmuxNewArgs { pub struct TmuxNewArgs {
pub:
sessionid string sessionid string
} }
// return tmux instance // return tmux instance
pub fn new(args TmuxNewArgs) !Tmux { pub fn new(args TmuxNewArgs) !Tmux {
// Initialize Redis client for command state tracking
mut redis := redisclient.core_get()!
mut t := Tmux{ mut t := Tmux{
sessionid: args.sessionid sessionid: args.sessionid
redis: redis
} }
// t.load()! // t.load()!
t.scan()! t.scan()!

View File

@@ -7,7 +7,7 @@ import time
import os import os
@[heap] @[heap]
struct Pane { pub struct Pane {
pub mut: pub mut:
window &Window @[str: skip] window &Window @[str: skip]
id int // pane id (e.g., %1, %2) id int // pane id (e.g., %1, %2)
@@ -112,7 +112,7 @@ pub fn (mut p Pane) output_wait(c_ string, timeoutsec int) ! {
mut t := ourtime.now() mut t := ourtime.now()
start := t.unix() start := t.unix()
c := c_.replace('\n', '') c := c_.replace('\n', '')
for i in 0 .. 2000 { for _ in 0 .. 2000 {
entries := p.logs_get_new(reset: false)! entries := p.logs_get_new(reset: false)!
for entry in entries { for entry in entries {
if entry.content.replace('\n', '').contains(c) { if entry.content.replace('\n', '').contains(c) {
@@ -146,9 +146,280 @@ pub fn (mut p Pane) processinfo_main() !osal.ProcessInfo {
} }
// Send a command to this pane // Send a command to this pane
// Supports both single-line and multi-line commands
pub fn (mut p Pane) send_command(command string) ! { pub fn (mut p Pane) send_command(command string) ! {
cmd := 'tmux send-keys -t ${p.window.session.name}:@${p.window.id}.%${p.id} "${command}" Enter' // Check if command contains multiple lines
osal.execute_silent(cmd) or { return error('Cannot send command to pane %${p.id}: ${err}') } if command.contains('\n') {
// Multi-line command - create temporary script
p.send_multiline_command(command)!
} else {
// Single-line command - send directly
cmd := 'tmux send-keys -t ${p.window.session.name}:@${p.window.id}.%${p.id} "${command}" Enter'
osal.execute_silent(cmd) or { return error('Cannot send command to pane %${p.id}: ${err}') }
}
}
// Send command with declarative mode logic (intelligent state management)
// This method implements the full declarative logic:
// 1. Check if pane has previous command (Redis lookup)
// 2. If previous command exists:
// a. Check if still running (process verification)
// b. Compare MD5 hashes
// c. If different command OR not running: proceed
// d. If same command AND running: skip
// 3. If proceeding: kill existing processes, then start new command
pub fn (mut p Pane) send_command_declarative(command string) ! {
console.print_debug('Declarative command for pane ${p.id}: ${command[..if command.len > 50 {
50
} else {
command.len
}]}...')
// Step 1: Check if command has changed
command_changed := p.has_command_changed(command)
// Step 2: Check if stored command is still running
stored_running := p.is_stored_command_running()
// Step 3: Decide whether to proceed
should_execute := command_changed || !stored_running
if !should_execute {
console.print_debug('Skipping command execution for pane ${p.id}: same command already running')
return
}
// Step 4: If we have a running command that needs to be replaced, kill it
if stored_running && command_changed {
console.print_debug('Killing existing command in pane ${p.id} before starting new one')
p.kill_running_command()!
// Give processes time to die
time.sleep(500 * time.millisecond)
}
// Step 5: Ensure bash is the parent process
p.ensure_bash_parent()!
// Step 6: Reset pane if it appears empty or needs cleanup
p.reset_if_needed()!
// Step 7: Execute the new command
p.send_command(command)!
// Step 8: Store the new command state
// Get the PID of the command we just started (this is approximate)
time.sleep(100 * time.millisecond) // Give command time to start
p.store_command_state(command, 'running', p.pid)!
console.print_debug('Successfully executed declarative command for pane ${p.id}')
}
// Kill the currently running command in this pane
pub fn (mut p Pane) kill_running_command() ! {
stored_state := p.get_command_state() or { return }
if stored_state.pid > 0 && osal.process_exists(stored_state.pid) {
// Kill the process and its children
osal.process_kill_recursive(pid: stored_state.pid)!
console.print_debug('Killed running command (PID: ${stored_state.pid}) in pane ${p.id}')
}
// Also try to kill any processes that might be running in the pane
p.kill_pane_process_group()!
// Update the command state to reflect that it's no longer running
p.update_command_status('killed')!
}
// Reset pane if it appears empty or needs cleanup
pub fn (mut p Pane) reset_if_needed() ! {
if p.is_pane_empty()! {
console.print_debug('Pane ${p.id} appears empty, sending reset')
p.send_reset()!
return
}
if !p.is_at_clean_prompt()! {
console.print_debug('Pane ${p.id} not at clean prompt, sending reset')
p.send_reset()!
}
}
// Check if pane is completely empty
pub fn (mut p Pane) is_pane_empty() !bool {
logs := p.logs_all() or { return true }
lines := logs.split_into_lines()
// Filter out empty lines
mut non_empty_lines := []string{}
for line in lines {
if line.trim_space().len > 0 {
non_empty_lines << line
}
}
return non_empty_lines.len == 0
}
// Check if pane is at a clean shell prompt
pub fn (mut p Pane) is_at_clean_prompt() !bool {
logs := p.logs_all() or { return false }
lines := logs.split_into_lines()
if lines.len == 0 {
return false
}
// Check last few lines for shell prompt indicators
check_lines := if lines.len > 5 { lines[lines.len - 5..] } else { lines }
for line in check_lines.reverse() {
line_clean := line.trim_space()
if line_clean.len == 0 {
continue
}
// Look for common shell prompt patterns
if line_clean.ends_with('$ ') || line_clean.ends_with('# ') || line_clean.ends_with('> ')
|| line_clean.ends_with('$') || line_clean.ends_with('#') || line_clean.ends_with('>') {
console.print_debug('Found clean prompt in pane ${p.id}: "${line_clean}"')
return true
}
// If we find a non-prompt line, we're not at a clean prompt
break
}
return false
}
// Send reset command to pane
pub fn (mut p Pane) send_reset() ! {
cmd := 'tmux send-keys -t ${p.window.session.name}:@${p.window.id}.%${p.id} "reset" Enter'
osal.execute_silent(cmd) or { return error('Cannot send reset to pane %${p.id}: ${err}') }
console.print_debug('Sent reset command to pane ${p.id}')
// Give reset time to complete
time.sleep(200 * time.millisecond)
}
// Verify that bash is the first process in this pane
pub fn (mut p Pane) verify_bash_parent() !bool {
if p.pid <= 0 {
return false
}
// Get process information for the pane's main process
proc_info := osal.processinfo_get(p.pid) or { return false }
// Check if the process command contains bash
if proc_info.cmd.contains('bash') || proc_info.cmd.contains('/bin/bash')
|| proc_info.cmd.contains('/usr/bin/bash') {
console.print_debug('Pane ${p.id} has bash as parent process (PID: ${p.pid})')
return true
}
console.print_debug('Pane ${p.id} does NOT have bash as parent process. Current: ${proc_info.cmd}')
return false
}
// Ensure bash is the first process in the pane
pub fn (mut p Pane) ensure_bash_parent() ! {
if p.verify_bash_parent()! {
return
}
console.print_debug('Ensuring bash is parent process for pane ${p.id}')
// Kill any existing processes in the pane
p.kill_pane_process_group()!
// Send a new bash command to establish bash as the parent
cmd := 'tmux send-keys -t ${p.window.session.name}:@${p.window.id}.%${p.id} "exec bash" Enter'
osal.execute_silent(cmd) or { return error('Cannot start bash in pane %${p.id}: ${err}') }
// Give bash time to start
time.sleep(500 * time.millisecond)
// Update pane information
p.window.scan()!
// Verify bash is now running
if !p.verify_bash_parent()! {
return error('Failed to establish bash as parent process in pane ${p.id}')
}
console.print_debug('Successfully established bash as parent process for pane ${p.id}')
}
// Get all child processes of this pane's main process
pub fn (mut p Pane) get_child_processes() ![]osal.ProcessInfo {
if p.pid <= 0 {
return []osal.ProcessInfo{}
}
children_map := osal.processinfo_children(p.pid)!
return children_map.processes
}
// Check if commands are running as children of bash
pub fn (mut p Pane) verify_command_hierarchy() !bool {
// First verify bash is the parent
if !p.verify_bash_parent()! {
return false
}
// Get child processes
children := p.get_child_processes()!
if children.len == 0 {
// No child processes, which is fine
return true
}
// Check if child processes have bash as their parent
for child in children {
if child.ppid != p.pid {
console.print_debug('Child process ${child.pid} (${child.cmd}) does not have pane process as parent')
return false
}
}
console.print_debug('Command hierarchy verified for pane ${p.id}: ${children.len} child processes')
return true
}
// Handle multi-line commands by creating a temporary script
fn (mut p Pane) send_multiline_command(command string) ! {
// Create temporary directory for tmux scripts
script_dir := '/tmp/tmux/${p.window.session.name}'
os.mkdir_all(script_dir) or { return error('Cannot create script directory: ${err}') }
// Create unique script file for this pane
script_path := '${script_dir}/pane_${p.id}_script.sh'
// Prepare script content with proper shebang and commands
script_content := '#!/bin/bash\n' + command.trim_space()
// Write script to file
os.write_file(script_path, script_content) or {
return error('Cannot write script file ${script_path}: ${err}')
}
// Make script executable
os.chmod(script_path, 0o755) or {
return error('Cannot make script executable ${script_path}: ${err}')
}
// Execute the script in the pane
cmd := 'tmux send-keys -t ${p.window.session.name}:@${p.window.id}.%${p.id} "${script_path}" Enter'
osal.execute_silent(cmd) or { return error('Cannot execute script in pane %${p.id}: ${err}') }
// Optional: Clean up script after a delay (commented out for debugging)
// spawn {
// time.sleep(5 * time.second)
// os.rm(script_path) or {}
// }
} }
// Send raw keys to this pane (without Enter) // Send raw keys to this pane (without Enter)
@@ -367,62 +638,23 @@ pub fn (mut p Pane) logging_enable(args PaneLoggingEnableArgs) ! {
} }
} }
// Use a completely different approach: direct tmux pipe-pane with a buffer-based logger // Use the simple and reliable tmux pipe-pane approach with tmux_logger binary
// This ensures ALL output is captured in real-time without missing anything // This is the proven approach that works perfectly
buffer_logger_script := "#!/bin/bash
PANE_TARGET=\"${p.window.session.name}:@${p.window.id}.%${p.id}\"
LOG_PATH=\"${log_path}\"
LOGGER_BINARY=\"${logger_binary}\"
BUFFER_FILE=\"/tmp/tmux_pane_${p.id}_buffer.txt\"
# Create a named pipe for real-time logging // Determine the pane identifier for logging
PIPE_FILE=\"/tmp/tmux_pane_${p.id}_pipe\" pane_log_id := 'pane${p.id}'
mkfifo \"\$PIPE_FILE\" 2>/dev/null || true
# Start the logger process that reads from the pipe // Set up tmux pipe-pane to send all output directly to tmux_logger
\"\$LOGGER_BINARY\" \"\$LOG_PATH\" \"${p.id}\" < \"\$PIPE_FILE\" & pipe_cmd := 'tmux pipe-pane -t ${p.window.session.name}:@${p.window.id}.%${p.id} -o "${logger_binary} ${log_path} ${pane_log_id}"'
LOGGER_PID=\$!
# Function to cleanup on exit console.print_debug('Starting real-time logging: ${pipe_cmd}')
cleanup() {
kill \$LOGGER_PID 2>/dev/null || true
rm -f \"\$PIPE_FILE\" \"\$BUFFER_FILE\"
exit 0
}
trap cleanup EXIT INT TERM
# Start tmux pipe-pane to send all output to our pipe osal.exec(cmd: pipe_cmd, stdout: false, name: 'tmux_start_pipe_logging') or {
tmux pipe-pane -t \"\$PANE_TARGET\" \"cat >> \"\$PIPE_FILE\"\" return error("Can't start pipe logging for pane %${p.id}: ${err}")
# Keep the script running and monitor the pane
while true; do
# Check if pane still exists
if ! tmux list-panes -t \"\$PANE_TARGET\" >/dev/null 2>&1; then
break
fi
sleep 1
done
cleanup
" // Write the buffer logger script
script_path := '/tmp/tmux_buffer_logger_${p.id}.sh'
os.write_file(script_path, buffer_logger_script) or {
return error("Can't create buffer logger script: ${err}")
} }
// Make script executable // Wait a moment for the process to start
osal.exec(cmd: 'chmod +x "${script_path}"', stdout: false, name: 'make_script_executable') or { time.sleep(500 * time.millisecond)
return error("Can't make script executable: ${err}")
}
// Start the buffer logger script in background
start_cmd := 'nohup "${script_path}" > /dev/null 2>&1 &'
console.print_debug('Starting pane logging with buffer logger: ${start_cmd}')
osal.exec(cmd: start_cmd, stdout: false, name: 'tmux_start_buffer_logger') or {
return error("Can't start buffer logger for pane %${p.id}: ${err}")
}
// Update pane state // Update pane state
p.log_enabled = true p.log_enabled = true
@@ -442,14 +674,12 @@ pub fn (mut p Pane) logging_disable() ! {
cmd := 'tmux pipe-pane -t ${p.window.session.name}:@${p.window.id}.%${p.id}' cmd := 'tmux pipe-pane -t ${p.window.session.name}:@${p.window.id}.%${p.id}'
osal.exec(cmd: cmd, stdout: false, name: 'tmux_stop_logging', ignore_error: true) or {} osal.exec(cmd: cmd, stdout: false, name: 'tmux_stop_logging', ignore_error: true) or {}
// Kill the buffer logger script process // Kill the tmux_logger process for this pane
script_path := '/tmp/tmux_buffer_logger_${p.id}.sh' pane_log_id := 'pane${p.id}'
kill_cmd := 'pkill -f "${script_path}"' kill_cmd := 'pkill -f "tmux_logger.*${pane_log_id}"'
osal.exec(cmd: kill_cmd, stdout: false, name: 'kill_buffer_logger_script', ignore_error: true) or {} osal.exec(cmd: kill_cmd, stdout: false, name: 'kill_tmux_logger', ignore_error: true) or {}
// Clean up script and temp files // No temp files to clean up with the simple pipe approach
cleanup_cmd := 'rm -f "${script_path}" "/tmp/tmux_pane_${p.id}_buffer.txt" "/tmp/tmux_pane_${p.id}_pipe"'
osal.exec(cmd: cleanup_cmd, stdout: false, name: 'cleanup_logging_files', ignore_error: true) or {}
// Update pane state // Update pane state
p.log_enabled = false p.log_enabled = false
@@ -466,3 +696,22 @@ pub fn (p Pane) logging_status() string {
} }
return 'disabled' return 'disabled'
} }
pub fn (mut p Pane) clear() ! {
// Kill current process in the pane
osal.exec(
cmd: 'tmux send-keys -t %${p.id} C-c'
stdout: false
name: 'tmux_pane_interrupt'
) or {}
// Reset pane by running a new bash
osal.exec(
cmd: "tmux send-keys -t %${p.id} '/bin/bash' Enter"
stdout: false
name: 'tmux_pane_reset_shell'
)!
// Update pane info
p.window.scan()!
}

View File

@@ -1,86 +1,39 @@
module tmux module tmux
import freeflowuniverse.herolib.osal.core as osal import freeflowuniverse.herolib.osal.core as osal
// import freeflowuniverse.herolib.installers.tmux import rand
// fn testsuite_end() {
//
// }
fn testsuite_begin() { fn testsuite_begin() {
mut tmux := Tmux{} mut tmux_instance := new()!
if tmux.is_running()! { if tmux_instance.is_running()! {
tmux.stop()! tmux_instance.stop()!
} }
} }
fn test_session_create() { fn test_session_create() ! {
// installer := tmux.get_install( // Create unique session names to avoid conflicts
// panic('could not install tmux: ${err}') session_name1 := 'testsession_${rand.int()}'
// } session_name2 := 'testsession2_${rand.int()}'
mut tmux := Tmux{} mut tmux_instance := new()!
tmux.start() or { panic('cannot start tmux: ${err}') } tmux_instance.start()!
mut s := Session{ // Create sessions using the proper API
tmux: &tmux mut s := tmux_instance.session_create(name: session_name1)!
windows: []&Window{} mut s2 := tmux_instance.session_create(name: session_name2)!
name: 'testsession'
}
mut s2 := Session{ // Test that sessions were created successfully
tmux: &tmux
windows: []&Window{}
name: 'testsession2'
}
// test testsession exists after session_create
mut tmux_ls := osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } mut tmux_ls := osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") }
assert !tmux_ls.contains('testsession: 1 windows') assert tmux_ls.contains(session_name1), 'Session 1 should exist'
s.create() or { panic('Cannot create session: ${err}') } assert tmux_ls.contains(session_name2), 'Session 2 should exist'
tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") }
assert tmux_ls.contains('testsession: 1 windows')
// test multiple session_create for same tmux // Test session existence check
tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } assert tmux_instance.session_exist(session_name1), 'Session 1 should exist via API'
assert !tmux_ls.contains('testsession2: 1 windows') assert tmux_instance.session_exist(session_name2), 'Session 2 should exist via API'
s2.create() or { panic('Cannot create session: ${err}') }
tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") }
assert tmux_ls.contains('testsession2: 1 windows')
// test session_create with duplicate session // Clean up
mut create_err := '' tmux_instance.session_delete(session_name1)!
s2.create() or { create_err = err.msg() } tmux_instance.session_delete(session_name2)!
assert create_err != '' tmux_instance.stop()!
assert create_err.contains('duplicate session: testsession2')
tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") }
assert tmux_ls.contains('testsession2: 1 windows')
s.stop() or { panic('Cannot stop session: ${err}') }
s2.stop() or { panic('Cannot stop session: ${err}') }
} }
// fn test_session_stop() {
//
// installer := tmux.get_install(
// mut tmux := Tmux {
// node: node_ssh
// }
// mut s := Session{
// tmux: &tmux // reference back
// windows: map[string]&Window{}
// name: 'testsession3'
// }
// s.create() or { panic("Cannot create session: $err") }
// mut tmux_ls := osal.execute_silent('tmux ls') or { panic("can't exec: $err") }
// assert tmux_ls.contains("testsession3: 1 windows")
// s.stop() or { panic("Cannot stop session: $err")}
// tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: $err") }
// assert !tmux_ls.contains("testsession3: 1 windows")
// }

157
lib/osal/tmux/tmux_state.v Normal file
View File

@@ -0,0 +1,157 @@
module tmux
import freeflowuniverse.herolib.osal.core as osal
import crypto.md5
import json
import time
import freeflowuniverse.herolib.ui.console
// Command state structure for Redis storage
pub struct CommandState {
pub mut:
cmd_md5 string // MD5 hash of the command
cmd_text string // Original command text
status string // running|finished|failed|unknown
pid int // Process ID of the command
started_at string // Timestamp when command started
last_check string // Last time status was checked
pane_id int // Pane ID for reference
}
// Generate Redis key for command state tracking
// Pattern: herotmux:${session}:${window}|${pane}
pub fn (p &Pane) get_state_key() string {
return 'herotmux:${p.window.session.name}:${p.window.name}|${p.id}'
}
// Generate MD5 hash for a command (normalized)
pub fn normalize_and_hash_command(cmd string) string {
// Normalize command: trim whitespace, normalize newlines
normalized := cmd.trim_space().replace('\r\n', '\n').replace('\r', '\n')
return md5.hexhash(normalized)
}
// Store command state in Redis
pub fn (mut p Pane) store_command_state(cmd string, status string, pid int) ! {
key := p.get_state_key()
cmd_hash := normalize_and_hash_command(cmd)
now := time.now().format_ss_milli()
state := CommandState{
cmd_md5: cmd_hash
cmd_text: cmd
status: status
pid: pid
started_at: now
last_check: now
pane_id: p.id
}
state_json := json.encode(state)
p.window.session.tmux.redis.set(key, state_json)!
console.print_debug('Stored command state for pane ${p.id}: ${cmd_hash[..8]}... status=${status}')
}
// Retrieve command state from Redis
pub fn (mut p Pane) get_command_state() ?CommandState {
key := p.get_state_key()
state_json := p.window.session.tmux.redis.get(key) or { return none }
if state_json.len == 0 {
return none
}
state := json.decode(CommandState, state_json) or {
console.print_debug('Failed to decode command state for pane ${p.id}: ${err}')
return none
}
return state
}
// Check if command has changed by comparing MD5 hashes
pub fn (mut p Pane) has_command_changed(new_cmd string) bool {
stored_state := p.get_command_state() or { return true }
new_hash := normalize_and_hash_command(new_cmd)
return stored_state.cmd_md5 != new_hash
}
// Update command status in Redis
pub fn (mut p Pane) update_command_status(status string) ! {
mut stored_state := p.get_command_state() or { return }
stored_state.status = status
stored_state.last_check = time.now().format_ss_milli()
key := p.get_state_key()
state_json := json.encode(stored_state)
p.window.session.tmux.redis.set(key, state_json)!
console.print_debug('Updated command status for pane ${p.id}: ${status}')
}
// Clear command state from Redis (when pane is reset or command is removed)
pub fn (mut p Pane) clear_command_state() ! {
key := p.get_state_key()
p.window.session.tmux.redis.del(key) or {
console.print_debug('Failed to clear command state for pane ${p.id}: ${err}')
}
console.print_debug('Cleared command state for pane ${p.id}')
}
// Check if stored command is currently running by verifying the PID
pub fn (mut p Pane) is_stored_command_running() bool {
stored_state := p.get_command_state() or { return false }
if stored_state.pid <= 0 {
return false
}
// Use osal to check if process exists
return osal.process_exists(stored_state.pid)
}
// Get all command states for a session (useful for debugging/monitoring)
pub fn (mut s Session) get_all_command_states() !map[string]CommandState {
mut states := map[string]CommandState{}
// Get all keys matching the session pattern
pattern := 'herotmux:${s.name}:*'
keys := s.tmux.redis.keys(pattern)!
for key in keys {
state_json := s.tmux.redis.get(key) or { continue }
if state_json.len == 0 {
continue
}
state := json.decode(CommandState, state_json) or {
console.print_debug('Failed to decode state for key ${key}: ${err}')
continue
}
states[key] = state
}
return states
}
// Clean up stale command states (for maintenance)
pub fn (mut s Session) cleanup_stale_command_states() ! {
states := s.get_all_command_states()!
for key, state in states {
// Check if the process is still running
if state.pid > 0 && !osal.process_exists(state.pid) {
// Process is dead, update status
mut updated_state := state
updated_state.status = 'finished'
updated_state.last_check = time.now().format_ss_milli()
state_json := json.encode(updated_state)
s.tmux.redis.set(key, state_json)!
console.print_debug('Updated stale command state ${key}: process ${state.pid} no longer exists')
}
}
}

View File

@@ -29,8 +29,8 @@ fn test_start() ! {
// test server is running after start() // test server is running after start()
tmux.start() or { panic('cannot start tmux: ${err}') } tmux.start() or { panic('cannot start tmux: ${err}') }
mut tmux_ls := osal.execute_silent('tmux ls') or { panic('Cannot execute tmux ls: ${err}') } mut tmux_ls := osal.execute_silent('tmux ls') or { panic('Cannot execute tmux ls: ${err}') }
// test started tmux contains windows // test started tmux contains some session
assert tmux_ls.contains('init: 1 windows') assert tmux_ls.len > 0, 'Tmux should have at least one session'
tmux.stop() or { panic('cannot stop tmux: ${err}') } tmux.stop() or { panic('cannot stop tmux: ${err}') }
} }

View File

@@ -406,3 +406,22 @@ pub fn (mut w Window) stop_ttyd(port int) ! {
} }
println('ttyd stopped for window ${w.name} on port ${port} (if it was running)') println('ttyd stopped for window ${w.name} on port ${port} (if it was running)')
} }
// Get a pane by its ID
pub fn (mut w Window) pane_get(id int) !&Pane {
w.scan()! // refresh info from tmux
for pane in w.panes {
if pane.id == id {
return pane
}
}
return error('Pane with id ${id} not found in window ${w.name}. Available panes: ${w.panes}')
}
// Create a new pane (just a split with default shell)
pub fn (mut w Window) pane_new() !&Pane {
return w.pane_split(
cmd: '/bin/bash'
horizontal: true
)
}

View File

@@ -1,65 +1,57 @@
module tmux module tmux
import freeflowuniverse.herolib.osal.core as osal import rand
import freeflowuniverse.herolib.ui.console
import time import time
// uses single tmux instance for all tests // Simple tests for tmux functionality
fn testsuite_begin() { // Test MD5 command hashing (doesn't require tmux)
muttmux := new() or { panic('Cannot create tmux: ${err}') } fn test_md5_hashing() ! {
// Test basic hashing
cmd1 := 'echo "test"'
cmd2 := 'echo "test"'
cmd3 := 'echo "different"'
// reset tmux for tests hash1 := normalize_and_hash_command(cmd1)
is_running := is_running() or { panic('cannot check if tmux is running: ${err}') } hash2 := normalize_and_hash_command(cmd2)
if is_running { hash3 := normalize_and_hash_command(cmd3)
stop() or { panic('Cannot stop tmux: ${err}') }
} assert hash1 == hash2, 'Same commands should have same hash'
assert hash1 != hash3, 'Different commands should have different hashes'
// Test normalization
cmd_with_spaces := ' echo "test" '
cmd_with_newlines := 'echo "test"\n'
hash_spaces := normalize_and_hash_command(cmd_with_spaces)
hash_newlines := normalize_and_hash_command(cmd_with_newlines)
assert hash1 == hash_spaces, 'Commands with extra spaces should normalize to same hash'
assert hash1 == hash_newlines, 'Commands with newlines should normalize to same hash'
} }
fn testsuite_end() { // Test basic tmux functionality
is_running := is_running() or { panic('cannot check if tmux is running: ${err}') } fn test_tmux_basic() ! {
if is_running { // Create unique session name to avoid conflicts
stop() or { panic('Cannot stop tmux: ${err}') } session_name := 'test_${rand.int()}'
mut tmux_instance := new()!
// Ensure tmux is running
if !tmux_instance.is_running()! {
tmux_instance.start()!
} }
}
fn test_window_new() ! { // Create session
mut tmux := new()! mut session := tmux_instance.session_create(name: session_name)!
tmux.start()! // Note: session name gets normalized by name_fix, so we check if it contains our unique part
assert session.name.contains('test_'), 'Session name should contain test_ prefix'
// Create session first
mut session := tmux.session_create(name: 'main')!
// Test window creation // Test window creation
mut window := session.window_new( mut window := session.window_new(name: 'testwin')!
name: 'TestWindow' assert window.name == 'testwin'
cmd: 'bash' assert session.window_exist(name: 'testwin')
reset: true
)!
assert window.name == 'testwindow' // name_fix converts to lowercase // Clean up - just stop tmux to clean everything
assert session.window_exist(name: 'testwindow') tmux_instance.stop()!
tmux.stop()!
}
// tests creating duplicate windows
fn test_window_new0() {
installer := get_install()!
mut tmux := Tmux{
node: node_ssh
}
window_args := WindowArgs{
name: 'TestWindow0'
}
// console.print_debug(tmux)
mut window := tmux.window_new(window_args) or { panic("Can't create new window: ${err}") }
assert tmux.sessions.keys().contains('main')
mut window_dup := tmux.window_new(window_args) or { panic("Can't create new window: ${err}") }
console.print_debug(node_ssh.exec('tmux ls') or { panic('fail:${err}') })
window.delete() or { panic('Cant delete window') }
// console.print_debug(tmux)
} }

180
lib/osal/ubuntu/mirrors.v Normal file
View File

@@ -0,0 +1,180 @@
module ubuntu
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.texttools
import net.http
import os
import time
import net.urllib
import net
import sync
pub struct PerfResult {
pub mut:
url string
ping_ms int
speed f64
error string
}
// Fetch Ubuntu mirror list
fn fetch_mirrors() ![]string {
cmd := 'curl -s https://launchpad.net/ubuntu/+archivemirrors | grep -oP \'http[s]?://[^"]+\' | sort -u'
job := osal.exec(cmd: cmd)!
if job.exit_code != 0 {
return error('Failed to fetch mirror list: ${job.output}')
}
mut mirrors := texttools.remove_empty_lines(job.output).split_into_lines()
mirrors = mirrors.filter(it.contains('answers.launchpad.net') == false) // remove launchpad answers
return mirrors
}
// Test download speed (download a small file)
fn test_download_speed(mirror string) f64 {
test_file := '${mirror}/dists/plucky/Release' // small file usually available +-258KB
start := time.now()
resp := http.get(test_file) or { return -1.0 }
if resp.status_code != 200 {
return -1.0
}
elapsed := time.since(start).milliseconds()
if elapsed == 0 {
return -1.0
}
size_kb := f64(resp.body.len) / 1024.0
println(size_kb)
$dbg;
return size_kb / elapsed // KB/sec
}
// Ping test (rough ICMP substitute using TCP connect on port 80), returns in ms
fn test_ping(mirror string, mut wg sync.WaitGroup, ch chan PerfResult) ! {
defer { wg.done() }
u := urllib.parse(mirror) or {
ch <- PerfResult{
url: mirror
ping_ms: -1
speed: 0.0
}
return
}
host := u.host
mut error := ''
result := osal.http_ping(address: host, port: 80, timeout: 5000) or {
error = err.msg()
0
}
if result > 0 {
ch <- PerfResult{
url: mirror
ping_ms: result
speed: 0.0
}
} else {
ch <- PerfResult{
url: mirror
error: error
}
}
}
// pub fn fix_mirrors() ! {
// println('Fetching Ubuntu mirrors...')
// mirrors := fetch_mirrors() or {
// print_backtrace()
// eprintln(err)
// return
// }
// // mut results := []PerfResult{}
// // mut c := 0
// // for m in mirrors {
// // c++
// // ping := test_ping(m)
// // println('Ping: ${ping} ms - ${mirrors.len} - ${c} ${m}')
// // $dbg;
// // }
// // for m in mirrors {
// // println('Speed: ${test_download_speed(m)} KB/s - ${m}')
// // $dbg;
// // speed := test_download_speed(m)
// // if speed > 0 {
// // ping := 0
// // results << PerfResult{
// // url: m
// // ping_ms: ping
// // speed: speed
// // }
// // println('✅ ${m} | ping: ${ping} ms | speed: ${speed:.2f} KB/s')
// // } else {
// // println('❌ ${m} skipped (unreachable or slow)')
// // }
// // $dbg;
// // }
// // println('\n🏆 Best mirrors:')
// // results.sort_with_compare(fn (a &PerfResult, b &PerfResult) int {
// // // Rank primarily by speed, secondarily by ping
// // if a.speed > b.speed {
// // return -1
// // } else if a.speed < b.speed {
// // return 1
// // } else {
// // return a.ping_ms - b.ping_ms
// // }
// // })
// // for r in results[..results.len.min(10)] {
// // println('${r.url} | ${r.ping_ms} ms | ${r.speed:.2f} KB/s')
// // }
// // println(results)
// // $dbg;
// }
pub fn fix_mirrors() ! {
// Create wait group for servers
mut wg := sync.new_waitgroup()
wg.add(500)
ch := chan PerfResult{cap: 1000}
mut mirrors := ['http://ftp.mirror.tw/pub/ubuntu/ubuntu/']
// mirrors := fetch_mirrors() or {
// print_backtrace()
// eprintln(err)
// return
// }
mut c := 0
mut result := []PerfResult{}
for m in mirrors {
c++
println('Start background ping - ${mirrors.len} - ${c} ${m} - Queue len: ${ch.len} / ${ch.cap}')
l := ch.len // number of elements in queue
for l > ch.cap - 2 { // if queue is full, wait
println('Queue full, wait till some are done')
time.sleep(1 * time.second)
}
spawn test_ping(m, mut wg, ch)
}
for {
value := <-ch or { // receive/pop values from the channel
println('Channel closed')
break
}
println('Received: ${value}')
}
println('All pings done 1')
wg.wait()
println('All pings done')
}

View File

@@ -94,6 +94,7 @@ pub fn (mut c Client) send[T, D](request RequestGeneric[T], params SendParams) !
myerror := response.error_ or { myerror := response.error_ or {
return error('Failed to get error from response:\nRequest: ${request.encode()}\nResponse: ${response_json}\n${err}') return error('Failed to get error from response:\nRequest: ${request.encode()}\nResponse: ${response_json}\n${err}')
} }
// print_backtrace() // print_backtrace()
mut myreq := request.encode() mut myreq := request.encode()
if c.transport is UnixSocketTransport { if c.transport is UnixSocketTransport {

View File

@@ -78,11 +78,10 @@ pub fn (mut t UnixSocketTransport) send(request string, params SendParams) !stri
// Append the newly read data to the total response // Append the newly read data to the total response
res_total << res[..n] res_total << res[..n]
//here we need to check we are at end // here we need to check we are at end
if res.bytestr().contains('\n') { if res.bytestr().contains('\n') {
break break
} }
} }
unix.shutdown(socket.sock.handle) unix.shutdown(socket.sock.handle)
socket.close() or {} socket.close() or {}

View File

@@ -6,7 +6,7 @@ import freeflowuniverse.herolib.schemas.jsonschema { Reference, decode_schemaref
pub fn decode_json_any(data string) !Any { pub fn decode_json_any(data string) !Any {
// mut o:=decode(data)! // mut o:=decode(data)!
return json2.decode[json2.Any](data)! return json2.decode[Any](data)!
} }
pub fn decode_json_string(data string) !string { pub fn decode_json_string(data string) !string {
@@ -14,8 +14,6 @@ pub fn decode_json_string(data string) !string {
return json.encode(o) return json.encode(o)
} }
pub fn decode(data string) !OpenRPC { pub fn decode(data string) !OpenRPC {
// mut object := json.decode[OpenRPC](data) or { return error('Failed to decode json\n=======\n${data}\n===========\n${err}') } // mut object := json.decode[OpenRPC](data) or { return error('Failed to decode json\n=======\n${data}\n===========\n${err}') }
mut object := json.decode(OpenRPC, data) or { mut object := json.decode(OpenRPC, data) or {

View File

@@ -3,117 +3,115 @@ module openrpcserver
import freeflowuniverse.herolib.data.encoder import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime import freeflowuniverse.herolib.data.ourtime
@[heap] @[heap]
pub struct Comment { pub struct Comment {
pub mut: pub mut:
id u32 id u32
comment string comment string
parent u32 //id of parent comment if any, 0 means none parent u32 // id of parent comment if any, 0 means none
updated_at i64 updated_at i64
author u32 //links to user author u32 // links to user
} }
pub fn (self Comment) type_name() string { pub fn (self Comment) type_name() string {
return 'comments' return 'comments'
} }
pub fn (self Comment) load(data []u8) !Comment { pub fn (self Comment) load(data []u8) !Comment {
return comment_load(data)! return comment_load(data)!
} }
pub fn (self Comment) dump() ![]u8{ pub fn (self Comment) dump() ![]u8 {
// Create a new encoder // Create a new encoder
mut e := encoder.new() mut e := encoder.new()
e.add_u8(1) e.add_u8(1)
e.add_u32(self.id) e.add_u32(self.id)
e.add_string(self.comment) e.add_string(self.comment)
e.add_u32(self.parent) e.add_u32(self.parent)
e.add_i64(self.updated_at) e.add_i64(self.updated_at)
e.add_u32(self.author) e.add_u32(self.author)
return e.data return e.data
} }
pub fn comment_load(data []u8) !Comment {
pub fn comment_load(data []u8) !Comment{ // Create a new decoder
// Create a new decoder mut e := encoder.decoder_new(data)
mut e := encoder.decoder_new(data) version := e.get_u8()!
version := e.get_u8()! if version != 1 {
if version != 1 { panic('wrong version in comment load')
panic("wrong version in comment load") }
} mut comment := Comment{}
mut comment := Comment{} comment.id = e.get_u32()!
comment.id = e.get_u32()! comment.comment = e.get_string()!
comment.comment = e.get_string()! comment.parent = e.get_u32()!
comment.parent = e.get_u32()! comment.updated_at = e.get_i64()!
comment.updated_at = e.get_i64()! comment.author = e.get_u32()!
comment.author = e.get_u32()! return comment
return comment
} }
pub struct CommentArg { pub struct CommentArg {
pub mut: pub mut:
comment string comment string
parent u32 parent u32
author u32 author u32
} }
pub fn comment_multiset(args []CommentArg) ![]u32 { pub fn comment_multiset(args []CommentArg) ![]u32 {
return comments2ids(args)! return comments2ids(args)!
} }
pub fn comments2ids(args []CommentArg) ![]u32 { pub fn comments2ids(args []CommentArg) ![]u32 {
return args.map(comment2id(it.comment)!) return args.map(comment2id(it.comment)!)
} }
pub fn comment2id(comment string) !u32 { pub fn comment2id(comment string) !u32 {
comment_fixed := comment.to_lower_ascii().trim_space() comment_fixed := comment.to_lower_ascii().trim_space()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
return if comment_fixed.len > 0{ return if comment_fixed.len > 0 {
hash := md5.hexhash(comment_fixed) hash := md5.hexhash(comment_fixed)
comment_found := redis.hget("db:comments", hash)! comment_found := redis.hget('db:comments', hash)!
if comment_found == ""{ if comment_found == '' {
id := u32(redis.incr("db:comments:id")!) id := u32(redis.incr('db:comments:id')!)
redis.hset("db:comments", hash, id.str())! redis.hset('db:comments', hash, id.str())!
redis.hset("db:comments", id.str(), comment_fixed)! redis.hset('db:comments', id.str(), comment_fixed)!
id id
}else{ } else {
comment_found.u32() comment_found.u32()
} }
} else { 0 } } else {
0
}
} }
// get new comment, not from the DB
//get new comment, not from the DB pub fn comment_new(args CommentArg) !Comment {
pub fn comment_new(args CommentArg) !Comment{ mut o := Comment{
mut o := Comment { comment: args.comment
comment: args.comment parent: args.parent
parent: args.parent updated_at: ourtime.now().unix()
updated_at: ourtime.now().unix() author: args.author
author: args.author }
} return o
return o
} }
pub fn comment_multiset(args []CommentArg) ![]u32{ pub fn comment_multiset(args []CommentArg) ![]u32 {
mut ids := []u32{} mut ids := []u32{}
for comment in args { for comment in args {
ids << comment_set(comment)! ids << comment_set(comment)!
} }
return ids return ids
} }
pub fn comment_set(args CommentArg) !u32{ pub fn comment_set(args CommentArg) !u32 {
mut o := comment_new(args)! mut o := comment_new(args)!
// Use openrpcserver set function which now returns the ID // Use openrpcserver set function which now returns the ID
return openrpcserver.set[Comment](mut o)! return set[Comment](mut o)!
} }
pub fn comment_exist(id u32) !bool{ pub fn comment_exist(id u32) !bool {
return openrpcserver.exists[Comment](id)! return exists[Comment](id)!
} }
pub fn comment_get(id u32) !Comment{ pub fn comment_get(id u32) !Comment {
return openrpcserver.get[Comment](id)! return get[Comment](id)!
} }

View File

@@ -3,55 +3,57 @@ module openrpcserver
import freeflowuniverse.herolib.core.redisclient import freeflowuniverse.herolib.core.redisclient
pub fn set[T](mut obj T) !u32 { pub fn set[T](mut obj T) !u32 {
name := T{}.type_name() name := T{}.type_name()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
// Generate ID if not set // Generate ID if not set
if obj.id == 0 { if obj.id == 0 {
myid := redis.incr("db:${name}:id")! myid := redis.incr('db:${name}:id')!
obj.id = u32(myid) obj.id = u32(myid)
} }
data := obj.dump()! data := obj.dump()!
redis.hset("db:${name}",obj.id.str(),data.bytestr())! redis.hset('db:${name}', obj.id.str(), data.bytestr())!
return obj.id return obj.id
} }
pub fn get[T](id u32) !T { pub fn get[T](id u32) !T {
name := T{}.type_name() name := T{}.type_name()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
data := redis.hget("db:${name}",id.str())! data := redis.hget('db:${name}', id.str())!
if data.len > 0 { if data.len > 0 {
return T{}.load(data.bytes())! return T{}.load(data.bytes())!
} else { } else {
return error("Can't find ${name} with id: ${id}") return error("Can't find ${name} with id: ${id}")
} }
} }
pub fn exists[T](id u32) !bool { pub fn exists[T](id u32) !bool {
name := T{}.type_name() name := T{}.type_name()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
return redis.hexists("db:${name}",id.str())! return redis.hexists('db:${name}', id.str())!
} }
pub fn delete[T](id u32) ! { pub fn delete[T](id u32) ! {
name := T{}.type_name() name := T{}.type_name()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
redis.hdel("db:${name}", id.str())! redis.hdel('db:${name}', id.str())!
} }
pub fn list[T]() ![]T { pub fn list[T]() ![]T {
name := T{}.type_name() name := T{}.type_name()
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
all_data := redis.hgetall("db:${name}")! all_data := redis.hgetall('db:${name}')!
mut result := []T{} mut result := []T{}
for _, data in all_data { for _, data in all_data {
result << T{}.load(data.bytes())! result << T{}.load(data.bytes())!
} }
return result return result
} }
//make it easy to get a base object // make it easy to get a base object
pub fn new_from_base[T](args BaseArgs) !Base { pub fn new_from_base[T](args BaseArgs) !Base {
return T { Base: new_base(args)! } return T{
} Base: new_base(args)!
}
}

View File

@@ -1,7 +1,6 @@
module openrpcserver module openrpcserver
import crypto.md5 import crypto.md5
import freeflowuniverse.herolib.core.redisclient import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.data.ourtime import freeflowuniverse.herolib.data.ourtime
@@ -9,85 +8,83 @@ import freeflowuniverse.herolib.data.ourtime
@[heap] @[heap]
pub struct Base { pub struct Base {
pub mut: pub mut:
id u32 id u32
name string name string
description string description string
created_at i64 created_at i64
updated_at i64 updated_at i64
securitypolicy u32 securitypolicy u32
tags u32 //when we set/get we always do as []string but this can then be sorted and md5ed this gies the unique id of tags tags u32 // when we set/get we always do as []string but this can then be sorted and md5ed this gies the unique id of tags
comments []u32 comments []u32
} }
@[heap] @[heap]
pub struct SecurityPolicy { pub struct SecurityPolicy {
pub mut: pub mut:
id u32 id u32
read []u32 //links to users & groups read []u32 // links to users & groups
write []u32 //links to users & groups write []u32 // links to users & groups
delete []u32 //links to users & groups delete []u32 // links to users & groups
public bool public bool
md5 string //this sorts read, write and delete u32 + hash, then do md5 hash, this allows to go from a random read/write/delete/public config to a hash md5 string // this sorts read, write and delete u32 + hash, then do md5 hash, this allows to go from a random read/write/delete/public config to a hash
} }
@[heap] @[heap]
pub struct Tags { pub struct Tags {
pub mut: pub mut:
id u32 id u32
names []string //unique per id names []string // unique per id
md5 string //of sorted names, to make easy to find unique id, each name lowercased and made ascii md5 string // of sorted names, to make easy to find unique id, each name lowercased and made ascii
} }
///////////////// /////////////////
@[params] @[params]
pub struct BaseArgs { pub struct BaseArgs {
pub mut: pub mut:
id ?u32 id ?u32
name string name string
description string description string
securitypolicy ?u32 securitypolicy ?u32
tags []string tags []string
comments []CommentArg comments []CommentArg
} }
//make it easy to get a base object // make it easy to get a base object
pub fn new_base(args BaseArgs) !Base { pub fn new_base(args BaseArgs) !Base {
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
commentids:=comment_multiset(args.comments)! commentids := comment_multiset(args.comments)!
tags:=tags2id(args.tags)! tags := tags2id(args.tags)!
return Base { return Base{
id: args.id or { 0 } id: args.id or { 0 }
name: args.name name: args.name
description: args.description description: args.description
created_at: ourtime.now().unix() created_at: ourtime.now().unix()
updated_at: ourtime.now().unix() updated_at: ourtime.now().unix()
securitypolicy: args.securitypolicy or { 0 } securitypolicy: args.securitypolicy or { 0 }
tags: tags tags: tags
comments: commentids comments: commentids
} }
} }
pub fn tags2id(tags []string) !u32 { pub fn tags2id(tags []string) !u32 {
mut redis := redisclient.core_get()! mut redis := redisclient.core_get()!
return if tags.len>0{ return if tags.len > 0 {
mut tags_fixed := tags.map(it.to_lower_ascii().trim_space()).filter(it != "") mut tags_fixed := tags.map(it.to_lower_ascii().trim_space()).filter(it != '')
tags_fixed.sort_ignore_case() tags_fixed.sort_ignore_case()
hash :=md5.hexhash(tags_fixed.join(",")) hash := md5.hexhash(tags_fixed.join(','))
tags_found := redis.hget("db:tags", hash)! tags_found := redis.hget('db:tags', hash)!
return if tags_found == ""{ return if tags_found == '' {
id := u32(redis.incr("db:tags:id")!) id := u32(redis.incr('db:tags:id')!)
redis.hset("db:tags", hash, id.str())! redis.hset('db:tags', hash, id.str())!
redis.hset("db:tags", id.str(), tags_fixed.join(","))! redis.hset('db:tags', id.str(), tags_fixed.join(','))!
id id
}else{ } else {
tags_found.u32() tags_found.u32()
} }
} else { } else {
0 0
} }
} }

Some files were not shown because too many files have changed in this diff Show More