Co-authored-by: Omdanii <mahmmoud.hassanein@gmail.com>
This commit is contained in:
2025-09-07 15:15:41 +04:00
parent 53552b03c2
commit cb125e8114
26 changed files with 1135 additions and 365 deletions

26
lib/virt/crun/crun_test.v Normal file
View File

@@ -0,0 +1,26 @@
module crun
fn test_factory_creation() {
config := new(name: 'test')!
assert config.name == 'test'
assert config.spec.version == '1.0.0'
}
fn test_json_generation() {
mut config := new(name: 'test')!
json_str := config.to_json()!
assert json_str.contains('"ociVersion": "1.0.0"')
assert json_str.contains('"os": "linux"')
}
fn test_configuration_methods() {
mut config := new(name: 'test')!
config.set_command(['/bin/echo', 'hello'])
.set_working_dir('/tmp')
.set_hostname('test-host')
assert config.spec.process.args == ['/bin/echo', 'hello']
assert config.spec.process.cwd == '/tmp'
assert config.spec.hostname == 'test-host'
}

44
lib/virt/crun/example.v Normal file
View File

@@ -0,0 +1,44 @@
module crun
import freeflowuniverse.herolib.core.pathlib
pub fn example_factory() ! {
// Create a new container configuration
mut config := new(name: 'mycontainer')!
// Configure the container
config.set_command(['/bin/bash', '-c', 'echo "Hello from container"'])
.set_working_dir('/app')
.set_user(1000, 1000, [1001, 1002])
.add_env('MY_VAR', 'my_value')
.add_env('ANOTHER_VAR', 'another_value')
.set_rootfs('/path/to/rootfs', false)
.set_hostname('my-container')
.set_memory_limit(1024 * 1024 * 1024) // 1GB
.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
.add_mount('/host/path', '/container/path', .bind, [.rw])
.add_capability(.cap_sys_admin)
.remove_capability(.cap_net_raw)
// Generate and print JSON
json_output := config.to_json()!
println(json_output)
// Save to file
config.save_to_file('/tmp/config.json')!
println('Configuration saved to /tmp/config.json')
}
pub fn example_simple() ! {
// Simple container for running a shell
mut config := new(name: 'shell')!
config.set_command(['/bin/sh'])
.set_rootfs('/path/to/alpine/rootfs', false)
.set_hostname('alpine-shell')
// Get the JSON
json_str := config.to_json()!
println('Simple container config:')
println(json_str)
}

203
lib/virt/crun/factory.v Normal file
View File

@@ -0,0 +1,203 @@
module crun
import freeflowuniverse.herolib.core.texttools
__global (
crun_configs map[string]&CrunConfig
)
@[params]
pub struct FactoryArgs {
pub mut:
name string = "default"
}
pub struct CrunConfig {
pub mut:
name string
spec Spec
}
// Process configuration
pub fn (mut config CrunConfig) set_command(args []string) &CrunConfig {
config.spec.process.args = args
return config
}
pub fn (mut config CrunConfig) set_working_dir(cwd string) &CrunConfig {
config.spec.process.cwd = cwd
return config
}
pub fn (mut config CrunConfig) set_user(uid u32, gid u32, additional_gids []u32) &CrunConfig {
config.spec.process.user = User{
uid: uid
gid: gid
additional_gids: additional_gids
}
return config
}
pub fn (mut config CrunConfig) add_env(key string, value string) &CrunConfig {
config.spec.process.env << '${key}=${value}'
return config
}
// Root filesystem configuration
pub fn (mut config CrunConfig) set_rootfs(path string, readonly bool) &CrunConfig {
config.spec.root = Root{
path: path
readonly: readonly
}
return config
}
// Hostname
pub fn (mut config CrunConfig) set_hostname(hostname string) &CrunConfig {
config.spec.hostname = hostname
return config
}
// Resource limits
pub fn (mut config CrunConfig) set_memory_limit(limit_bytes u64) &CrunConfig {
config.spec.linux.resources.memory_limit = limit_bytes
return config
}
pub fn (mut config CrunConfig) set_cpu_limits(period u64, quota i64, shares u64) &CrunConfig {
config.spec.linux.resources.cpu_period = period
config.spec.linux.resources.cpu_quota = quota
config.spec.linux.resources.cpu_shares = shares
return config
}
// Add mount
pub fn (mut config CrunConfig) add_mount(destination string, source string, typ MountType, options []MountOption) &CrunConfig {
config.spec.mounts << Mount{
destination: destination
typ: typ
source: source
options: options
}
return config
}
// Add capability
pub fn (mut config CrunConfig) add_capability(cap Capability) &CrunConfig {
if cap !in config.spec.process.capabilities.bounding {
config.spec.process.capabilities.bounding << cap
}
if cap !in config.spec.process.capabilities.effective {
config.spec.process.capabilities.effective << cap
}
if cap !in config.spec.process.capabilities.permitted {
config.spec.process.capabilities.permitted << cap
}
return config
}
// Remove capability
pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
config.spec.process.capabilities.bounding = config.spec.process.capabilities.bounding.filter(it != cap)
config.spec.process.capabilities.effective = config.spec.process.capabilities.effective.filter(it != cap)
config.spec.process.capabilities.permitted = config.spec.process.capabilities.permitted.filter(it != cap)
return config
}
}
pub fn new(args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
// Create default spec
default_spec := create_default_spec()
mut config := &CrunConfig{
name: name
spec: default_spec
}
crun_configs[name] = config
return config
}
pub fn get(args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
return crun_configs[name] or {
return error('crun config with name "${name}" does not exist')
}
}
fn create_default_spec() Spec {
return Spec{
version: '1.0.0'
platform: Platform{
os: .linux
arch: .amd64
}
process: Process{
terminal: true
user: User{
uid: 0
gid: 0
additional_gids: []
}
args: ['/bin/sh']
env: ['PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin']
cwd: '/'
capabilities: Capabilities{
bounding: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
effective: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
inheritable: []
permitted: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
ambient: []
}
rlimits: []
}
root: Root{
path: 'rootfs'
readonly: false
}
hostname: 'container'
mounts: create_default_mounts()
linux: Linux{
namespaces: create_default_namespaces()
resources: LinuxResource{}
devices: []
}
hooks: Hooks{}
}
}
fn create_default_namespaces() []LinuxNamespace {
return [
LinuxNamespace{typ: 'pid', path: ''},
LinuxNamespace{typ: 'network', path: ''},
LinuxNamespace{typ: 'ipc', path: ''},
LinuxNamespace{typ: 'uts', path: ''},
LinuxNamespace{typ: 'mount', path: ''},
]
}
fn create_default_mounts() []Mount {
return [
Mount{
destination: '/proc'
typ: .proc
source: 'proc'
options: [.nosuid, .noexec, .nodev]
},
Mount{
destination: '/dev'
typ: .tmpfs
source: 'tmpfs'
options: [.nosuid]
},
Mount{
destination: '/sys'
typ: .sysfs
source: 'sysfs'
options: [.nosuid, .noexec, .nodev, .ro]
},
]
}

View File

@@ -1,4 +1,4 @@
module runc
module crun
struct LinuxNamespace {
typ string

4
lib/virt/crun/readme.md Normal file
View File

@@ -0,0 +1,4 @@
specs on https://github.com/opencontainers/runtime-spec

280
lib/virt/crun/tojson.v Normal file
View File

@@ -0,0 +1,280 @@
module crun
import json
// Convert enum values to their string representations
fn (os OSType) to_json_string() string {
return match os {
.linux { 'linux' }
.windows { 'windows' }
.darwin { 'darwin' }
.solaris { 'solaris' }
}
}
fn (arch ArchType) to_json_string() string {
return match arch {
.amd64 { 'amd64' }
.arm64 { 'arm64' }
.arm { 'arm' }
.ppc64 { 'ppc64' }
.s390x { 's390x' }
}
}
fn (mount_type MountType) to_json_string() string {
return match mount_type {
.bind { 'bind' }
.tmpfs { 'tmpfs' }
.nfs { 'nfs' }
.overlay { 'overlay' }
.devpts { 'devpts' }
.proc { 'proc' }
.sysfs { 'sysfs' }
}
}
fn (option MountOption) to_json_string() string {
return match option {
.rw { 'rw' }
.ro { 'ro' }
.noexec { 'noexec' }
.nosuid { 'nosuid' }
.nodev { 'nodev' }
.rbind { 'rbind' }
.relatime { 'relatime' }
}
}
fn (cap Capability) to_json_string() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
fn (rlimit RlimitType) to_json_string() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Main method to generate complete OCI spec JSON
pub fn (config CrunConfig) to_json() !string {
spec_map := map[string]json.Any{}
// Basic spec fields
spec_map['ociVersion'] = config.spec.version
// Platform
spec_map['platform'] = map[string]json.Any{
'os': config.spec.platform.os.to_json_string()
'arch': config.spec.platform.arch.to_json_string()
}
// Process
process_map := map[string]json.Any{}
process_map['terminal'] = config.spec.process.terminal
process_map['user'] = map[string]json.Any{
'uid': int(config.spec.process.user.uid)
'gid': int(config.spec.process.user.gid)
'additionalGids': config.spec.process.user.additional_gids.map(int(it))
}
process_map['args'] = config.spec.process.args
process_map['env'] = config.spec.process.env
process_map['cwd'] = config.spec.process.cwd
// Capabilities
if config.spec.process.capabilities.bounding.len > 0 ||
config.spec.process.capabilities.effective.len > 0 ||
config.spec.process.capabilities.inheritable.len > 0 ||
config.spec.process.capabilities.permitted.len > 0 ||
config.spec.process.capabilities.ambient.len > 0 {
capabilities_map := map[string]json.Any{}
if config.spec.process.capabilities.bounding.len > 0 {
capabilities_map['bounding'] = config.spec.process.capabilities.bounding.map(it.to_json_string())
}
if config.spec.process.capabilities.effective.len > 0 {
capabilities_map['effective'] = config.spec.process.capabilities.effective.map(it.to_json_string())
}
if config.spec.process.capabilities.inheritable.len > 0 {
capabilities_map['inheritable'] = config.spec.process.capabilities.inheritable.map(it.to_json_string())
}
if config.spec.process.capabilities.permitted.len > 0 {
capabilities_map['permitted'] = config.spec.process.capabilities.permitted.map(it.to_json_string())
}
if config.spec.process.capabilities.ambient.len > 0 {
capabilities_map['ambient'] = config.spec.process.capabilities.ambient.map(it.to_json_string())
}
process_map['capabilities'] = capabilities_map
}
// Rlimits
if config.spec.process.rlimits.len > 0 {
rlimits_array := []json.Any{}
for rlimit in config.spec.process.rlimits {
rlimits_array << map[string]json.Any{
'type': rlimit.typ.to_json_string()
'hard': int(rlimit.hard)
'soft': int(rlimit.soft)
}
}
process_map['rlimits'] = rlimits_array
}
spec_map['process'] = process_map
// Root
spec_map['root'] = map[string]json.Any{
'path': config.spec.root.path
'readonly': config.spec.root.readonly
}
// Hostname
if config.spec.hostname != '' {
spec_map['hostname'] = config.spec.hostname
}
// Mounts
if config.spec.mounts.len > 0 {
mounts_array := []json.Any{}
for mount in config.spec.mounts {
mount_map := map[string]json.Any{
'destination': mount.destination
'type': mount.typ.to_json_string()
'source': mount.source
}
if mount.options.len > 0 {
mount_map['options'] = mount.options.map(it.to_json_string())
}
mounts_array << mount_map
}
spec_map['mounts'] = mounts_array
}
// Linux specific configuration
linux_map := map[string]json.Any{}
// Namespaces
if config.spec.linux.namespaces.len > 0 {
namespaces_array := []json.Any{}
for ns in config.spec.linux.namespaces {
ns_map := map[string]json.Any{
'type': ns.typ
}
if ns.path != '' {
ns_map['path'] = ns.path
}
namespaces_array << ns_map
}
linux_map['namespaces'] = namespaces_array
}
// Resources
resources_map := map[string]json.Any{}
has_resources := false
if config.spec.linux.resources.memory_limit > 0 {
memory_map := map[string]json.Any{
'limit': int(config.spec.linux.resources.memory_limit)
}
if config.spec.linux.resources.memory_reservation > 0 {
memory_map['reservation'] = int(config.spec.linux.resources.memory_reservation)
}
if config.spec.linux.resources.memory_swap_limit > 0 {
memory_map['swap'] = int(config.spec.linux.resources.memory_swap_limit)
}
resources_map['memory'] = memory_map
has_resources = true
}
if config.spec.linux.resources.cpu_period > 0 || config.spec.linux.resources.cpu_quota > 0 || config.spec.linux.resources.cpu_shares > 0 {
cpu_map := map[string]json.Any{}
if config.spec.linux.resources.cpu_period > 0 {
cpu_map['period'] = int(config.spec.linux.resources.cpu_period)
}
if config.spec.linux.resources.cpu_quota > 0 {
cpu_map['quota'] = int(config.spec.linux.resources.cpu_quota)
}
if config.spec.linux.resources.cpu_shares > 0 {
cpu_map['shares'] = int(config.spec.linux.resources.cpu_shares)
}
resources_map['cpu'] = cpu_map
has_resources = true
}
if config.spec.linux.resources.pids_limit > 0 {
resources_map['pids'] = map[string]json.Any{
'limit': int(config.spec.linux.resources.pids_limit)
}
has_resources = true
}
if has_resources {
linux_map['resources'] = resources_map
}
spec_map['linux'] = linux_map
return json.encode_pretty(spec_map)
}
// Convenience method to save JSON to file
pub fn (config CrunConfig) save_to_file(path string) ! {
json_content := config.to_json()!
import freeflowuniverse.herolib.core.pathlib
mut file := pathlib.get_file(path: path, create: true)!
file.write(json_content)!
}

View File

@@ -0,0 +1,119 @@
{
"ociVersion": "1.0.2",
"process": {
"terminal": true,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"/bin/sh"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "${rootfs_path}",
"readonly": false
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
],
"linux": {
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

View File

@@ -1,4 +1,4 @@
module herorun
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux

View File

@@ -0,0 +1,98 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.core.texttools
import os
// Updated enum to be more flexible
pub enum ContainerImageType {
alpine_3_20
ubuntu_24_04
ubuntu_25_04
custom // For custom images downloaded via podman
}
@[params]
pub struct ContainerNewArgs {
pub:
name string @[required]
image ContainerImageType = .alpine_3_20
custom_image_name string // Used when image = .custom
docker_url string // Docker image URL for new images
reset bool
}
pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
if args.name in self.containers && !args.reset {
return self.containers[args.name]
}
// Determine image to use
mut image_name := ''
mut rootfs_path := ''
match args.image {
.alpine_3_20 {
image_name = 'alpine'
rootfs_path = '/containers/images/alpine/rootfs'
}
.ubuntu_24_04 {
image_name = 'ubuntu_24_04'
rootfs_path = '/containers/images/ubuntu/24.04/rootfs'
}
.ubuntu_25_04 {
image_name = 'ubuntu_25_04'
rootfs_path = '/containers/images/ubuntu/25.04/rootfs'
}
.custom {
if args.custom_image_name == '' {
return error('custom_image_name is required when using custom image type')
}
image_name = args.custom_image_name
rootfs_path = '/containers/images/${image_name}/rootfs'
// Check if image exists, if not and docker_url provided, create it
if !os.is_dir(rootfs_path) && args.docker_url != '' {
console.print_debug('Creating new image ${image_name} from ${args.docker_url}')
_ = self.image_new(
image_name: image_name
docker_url: args.docker_url
reset: args.reset
)!
}
}
}
// Verify rootfs exists
if !os.is_dir(rootfs_path) {
return error('Image rootfs not found: ${rootfs_path}. Please ensure the image is available.')
}
// Create container config
self.create_container_config(args.name, rootfs_path)!
// Create container using crun
osal.exec(cmd: 'crun create --bundle /containers/configs/${args.name} ${args.name}', stdout: true)!
mut container := &Container{
name: args.name
factory: &self
}
self.containers[args.name] = container
return container
}
fn (self ContainerFactory) create_container_config(container_name string, rootfs_path string) ! {
config_dir := '/containers/configs/${container_name}'
osal.exec(cmd: 'mkdir -p ${config_dir}', stdout: false)!
// Generate OCI config.json using template
config_content := $tmpl('config_template.json')
config_path := '${config_dir}/config.json'
mut p := pathlib.get_file(path: config_path, create: true)!
p.write(config_content)!
}

View File

@@ -0,0 +1,294 @@
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.core.texttools
import os
import json
@[heap]
pub struct ContainerImage {
pub mut:
image_name string @[required] // image is located in /containers/images/<image_name>/rootfs
docker_url string // optional docker image URL
rootfs_path string // path to the extracted rootfs
size_mb f64 // size in MB
created_at string // creation timestamp
factory &ContainerFactory @[skip; str: skip]
}
@[params]
pub struct ContainerImageArgs {
pub mut:
image_name string @[required] // image is located in /containers/images/<image_name>/rootfs
docker_url string // docker image URL like "alpine:3.20" or "ubuntu:24.04"
reset bool
}
@[params]
pub struct ImageExportArgs {
pub mut:
dest_path string @[required] // destination .tgz file path
compress_level int = 6 // compression level 1-9
}
@[params]
pub struct ImageImportArgs {
pub mut:
source_path string @[required] // source .tgz file path
reset bool // overwrite if exists
}
// Create new image or get existing
pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&ContainerImage {
mut image_name := texttools.name_fix(args.image_name)
rootfs_path := '/containers/images/${image_name}/rootfs'
// Check if image already exists
if image_name in self.images && !args.reset {
return self.images[image_name]
}
// Ensure podman is installed
if !osal.cmd_exists('podman') {
return error('Podman is required for image management. Please install podman first.')
}
mut image := &ContainerImage{
image_name: image_name
docker_url: args.docker_url
rootfs_path: rootfs_path
factory: &self
}
// If docker_url is provided, download and extract the image
if args.docker_url != '' {
image.download_from_docker(args.docker_url, args.reset)!
} else {
// Check if rootfs directory exists
if !os.is_dir(rootfs_path) {
return error('Image rootfs not found at ${rootfs_path} and no docker_url provided')
}
}
// Update image metadata
image.update_metadata()!
self.images[image_name] = image
return image
}
// Download image from docker registry using podman
fn (mut self ContainerImage) download_from_docker(docker_url string, reset bool) ! {
console.print_header('Downloading image: ${docker_url}')
// Clean image name for local storage
image_dir := '/containers/images/${self.image_name}'
// Remove existing if reset is true
if reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create image directory
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Pull image using podman
console.print_debug('Pulling image: ${docker_url}')
osal.exec(cmd: 'podman pull ${docker_url}', stdout: true)!
// Create container from image (without running it)
temp_container := 'temp_${self.image_name}_extract'
osal.exec(cmd: 'podman create --name ${temp_container} ${docker_url}', stdout: false)!
// Export container filesystem
tar_file := '${image_dir}/rootfs.tar'
osal.exec(cmd: 'podman export ${temp_container} -o ${tar_file}', stdout: true)!
// Extract to rootfs directory
osal.exec(cmd: 'mkdir -p ${self.rootfs_path}', stdout: false)!
osal.exec(cmd: 'tar -xf ${tar_file} -C ${self.rootfs_path}', stdout: true)!
// Clean up temporary container and tar file
osal.exec(cmd: 'podman rm ${temp_container}', stdout: false) or {}
osal.exec(cmd: 'rm -f ${tar_file}', stdout: false) or {}
// Remove the pulled image from podman to save space (optional)
osal.exec(cmd: 'podman rmi ${docker_url}', stdout: false) or {}
console.print_green('Image ${docker_url} extracted to ${self.rootfs_path}')
}
// Update image metadata (size, creation time, etc.)
fn (mut self ContainerImage) update_metadata() ! {
if !os.is_dir(self.rootfs_path) {
return error('Rootfs path does not exist: ${self.rootfs_path}')
}
// Calculate size
result := osal.exec(cmd: 'du -sm ${self.rootfs_path}', stdout: false)!
size_str := result.split('\t')[0].trim_space()
self.size_mb = size_str.f64()
// Get creation time
stat_result := osal.exec(cmd: 'stat -c "%Y" ${self.rootfs_path}', stdout: false)!
self.created_at = stat_result.trim_space()
}
// List all available images
pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
mut images := []&ContainerImage{}
images_base_dir := '/containers/images'
if !os.is_dir(images_base_dir) {
return images
}
// Scan for image directories
dirs := os.ls(images_base_dir)!
for dir in dirs {
full_path := '${images_base_dir}/${dir}'
if os.is_dir(full_path) {
rootfs_path := '${full_path}/rootfs'
if os.is_dir(rootfs_path) {
// Create image object if not in cache
if dir !in self.images {
mut image := &ContainerImage{
image_name: dir
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata() or {
console.print_stderr('Failed to update metadata for image ${dir}: ${err}')
continue
}
self.images[dir] = image
}
images << self.images[dir]
}
}
}
return images
}
// Export image to .tgz file
pub fn (mut self ContainerImage) export(args ImageExportArgs) ! {
if !os.is_dir(self.rootfs_path) {
return error('Image rootfs not found: ${self.rootfs_path}')
}
console.print_header('Exporting image ${self.image_name} to ${args.dest_path}')
// Ensure destination directory exists
dest_dir := os.dir(args.dest_path)
osal.exec(cmd: 'mkdir -p ${dest_dir}', stdout: false)!
// Create compressed archive
cmd := 'tar -czf ${args.dest_path} -C ${os.dir(self.rootfs_path)} ${os.base(self.rootfs_path)}'
osal.exec(cmd: cmd, stdout: true)!
console.print_green('Image exported successfully to ${args.dest_path}')
}
// Import image from .tgz file
pub fn (mut self ContainerFactory) image_import(args ImageImportArgs) !&ContainerImage {
if !os.exists(args.source_path) {
return error('Source file not found: ${args.source_path}')
}
// Extract image name from filename
filename := os.base(args.source_path)
image_name := filename.replace('.tgz', '').replace('.tar.gz', '')
image_name_clean := texttools.name_fix(image_name)
console.print_header('Importing image from ${args.source_path}')
image_dir := '/containers/images/${image_name_clean}'
rootfs_path := '${image_dir}/rootfs'
// Check if image already exists
if os.is_dir(rootfs_path) && !args.reset {
return error('Image ${image_name_clean} already exists. Use reset=true to overwrite.')
}
// Remove existing if reset
if args.reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create directories
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Extract archive
osal.exec(cmd: 'tar -xzf ${args.source_path} -C ${image_dir}', stdout: true)!
// Create image object
mut image := &ContainerImage{
image_name: image_name_clean
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata()!
self.images[image_name_clean] = image
console.print_green('Image imported successfully: ${image_name_clean}')
return image
}
// Delete image
pub fn (mut self ContainerImage) delete() ! {
console.print_header('Deleting image: ${self.image_name}')
image_dir := os.dir(self.rootfs_path)
if os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: true)!
}
// Remove from factory cache
if self.image_name in self.factory.images {
self.factory.images.delete(self.image_name)
}
console.print_green('Image ${self.image_name} deleted successfully')
}
// Get image info as map
pub fn (self ContainerImage) info() map[string]string {
return {
'name': self.image_name
'docker_url': self.docker_url
'rootfs_path': self.rootfs_path
'size_mb': self.size_mb.str()
'created_at': self.created_at
}
}
// List available docker images that can be downloaded
pub fn list_available_docker_images() []string {
return [
'alpine:3.20',
'alpine:3.19',
'alpine:latest',
'ubuntu:24.04',
'ubuntu:22.04',
'ubuntu:20.04',
'ubuntu:latest',
'debian:12',
'debian:11',
'debian:latest',
'fedora:39',
'fedora:38',
'fedora:latest',
'archlinux:latest',
'centos:stream9',
'rockylinux:9',
'nginx:alpine',
'redis:alpine',
'postgres:15-alpine',
'node:20-alpine',
'python:3.12-alpine',
]
}

View File

@@ -1,4 +1,4 @@
module herorun
module heropods
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux
@@ -12,13 +12,14 @@ pub struct ContainerFactory {
pub mut:
tmux_session string // tmux session name if used
containers map[string]&Container
images map[string]&Image
images map[string]&ContainerImage // Added images map
}
@[params]
pub struct FactoryInitArgs {
pub:
reset bool
use_podman bool = true // Use podman for image management
}
pub fn new(args FactoryInitArgs) !ContainerFactory {
@@ -31,6 +32,56 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
// Ensure base directories exist
osal.exec(cmd: 'mkdir -p /containers/images /containers/configs /containers/runtime', stdout: false)!
if args.use_podman {
// Check if podman is installed
if !osal.cmd_exists('podman') {
console.print_stderr('Warning: podman not found. Installing podman is recommended for better image management.')
console.print_debug('You can install podman with: apt install podman (Ubuntu) or brew install podman (macOS)')
} else {
console.print_debug('Using podman for image management')
}
}
// Load existing images into cache
self.load_existing_images()!
// Setup default images if they don't exist
if !args.use_podman {
self.setup_default_images_legacy(args.reset)!
}
}
// Load existing images from filesystem into cache
fn (mut self ContainerFactory) load_existing_images() ! {
images_base_dir := '/containers/images'
if !os.is_dir(images_base_dir) {
return
}
dirs := os.ls(images_base_dir) or { return }
for dir in dirs {
full_path := '${images_base_dir}/${dir}'
if os.is_dir(full_path) {
rootfs_path := '${full_path}/rootfs'
if os.is_dir(rootfs_path) {
mut image := &ContainerImage{
image_name: dir
rootfs_path: rootfs_path
factory: &self
}
image.update_metadata() or {
console.print_stderr('Failed to load image metadata for ${dir}')
continue
}
self.images[dir] = image
console.print_debug('Loaded existing image: ${dir}')
}
}
}
}
// Legacy method for downloading images directly (fallback if no podman)
fn (mut self ContainerFactory) setup_default_images_legacy(reset bool) ! {
// Setup for all supported images
images := [ContainerImage.alpine_3_20, .ubuntu_24_04, .ubuntu_25_04]
@@ -43,7 +94,7 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
alpine_dest := '/containers/images/alpine/${alpine_file}'
alpine_rootfs := '/containers/images/alpine/rootfs'
if args.reset || !os.exists(alpine_rootfs) {
if reset || !os.exists(alpine_rootfs) {
osal.download(
url: alpine_url
dest: alpine_dest
@@ -64,7 +115,7 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
dest := '/containers/images/ubuntu/${ver}/${file}'
rootfs := '/containers/images/ubuntu/${ver}/rootfs'
if args.reset || !os.exists(rootfs) {
if reset || !os.exists(rootfs) {
osal.download(
url: url
dest: dest
@@ -85,7 +136,7 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
dest := '/containers/images/ubuntu/${ver}/${file}'
rootfs := '/containers/images/ubuntu/${ver}/rootfs'
if args.reset || !os.exists(rootfs) {
if reset || !os.exists(rootfs) {
osal.download(
url: url
dest: dest
@@ -102,7 +153,6 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
}
}
pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
if args.name !in self.containers {
return error('Container ${args.name} does not exist')
@@ -110,6 +160,14 @@ pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
return self.containers[args.name]
}
// Get image by name
pub fn (mut self ContainerFactory) image_get(name string) !&ContainerImage {
if name !in self.images {
return error('Image ${name} does not exist')
}
return self.images[name]
}
pub fn (self ContainerFactory) list() ![]Container {
mut containers := []Container{}
result := osal.exec(cmd: 'crun list --format json', stdout: false) or { '[]' }
@@ -130,5 +188,4 @@ pub fn (self ContainerFactory) list() ![]Container {
}
}
return containers
}
}

View File

@@ -1,75 +0,0 @@
module herorun
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux
import freeflowuniverse.herolib.osal.core as osal
import time
import freeflowuniverse.herolib.builder
import json
pub enum ContainerImage {
alpine_3_20
ubuntu_24_04
ubuntu_25_04
}
@[params]
pub struct ContainerNewArgs {
pub:
name string @[required]
image ContainerImage = .alpine_3_20
reset bool
}
pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
if args.name in self.containers && !args.reset {
return self.containers[args.name]
}
// Create container config
self.create_container_config(args)!
// Create container using crun
osal.exec(cmd: 'crun create --bundle /containers/configs/${args.name} ${args.name}', stdout: true)!
mut container := &Container{
name: args.name
factory: &self
}
self.containers[args.name] = container
return container
}
fn (self ContainerFactory) create_container_config(args ContainerNewArgs) ! {
// Determine rootfs path based on image
mut rootfs_path := ''
match args.image {
.alpine_3_20 {
rootfs_path = '/containers/images/alpine/rootfs'
}
.ubuntu_24_04 {
rootfs_path = '/containers/images/ubuntu/24.04/rootfs'
}
.ubuntu_25_04 {
rootfs_path = '/containers/images/ubuntu/25.04/rootfs'
}
}
config_dir := '/containers/configs/${args.name}'
osal.exec(cmd: 'mkdir -p ${config_dir}', stdout: false)!
// Generate OCI config.json
config_content := $tmpl('config_template.json')
config_path := '${config_dir}/config.json'
mut p := pathlib.get_file(path: config_path, create: true)!
p.write(config_content)!
}

View File

@@ -1,52 +0,0 @@
module herorun
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.tmux
import freeflowuniverse.herolib.osal.core as osal
import time
import freeflowuniverse.herolib.builder
import json
@[heap]
pub struct ContainerImage {
pub:
image_name string @[required] //image is located in /containers/images/<image_name>/rootfs
docker_unc string //optional
}
pub struct ContainerImageArgs {
pub:
image_name string @[required] //image is located in /containers/images/<image_name>/rootfs
docker_unc string
reset bool
}
pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&ContainerImage {
//if docker unc is given, we need to download the image and extract it to /containers/images/<image_name>/rootfs, use podman for it
//if podman not installed give error
//attach image to self.factory.images ..
}
pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
//TODO: ...
}
//TODO: export to .tgz file
pub fn (mut self ContainerImage) export(...) !{
//export dir if exist to the define path, if not exist then error
}
pub fn (mut self ContainerImage) import(...) !{
//import from .tgz file to /containers/images/<image_name>/rootfs, if already exist give error, unless if we specify reset
}
pub fn (mut self ContainerImage) delete() !{
//TODO:
}

View File

@@ -1,68 +0,0 @@
module runc
fn example() {
root := Root{
path: '/rootfs'
readonly: true
}
process := Process{
terminal: true
user: User{
uid: 0
gid: 0
additional_gids: [u32(0)]
}
args: ['/bin/bash']
env: ['PATH=/usr/bin']
cwd: '/'
capabilities: Capabilities{
bounding: [Capability.cap_chown, Capability.cap_dac_override]
effective: [Capability.cap_chown]
inheritable: []
permitted: [Capability.cap_chown]
ambient: []
}
rlimits: [
Rlimit{
typ: .rlimit_nofile
hard: 1024
soft: 1024
},
]
}
linux := Linux{
namespaces: [
LinuxNamespace{
typ: 'pid'
path: ''
},
]
resources: LinuxResource{
blkio_weight: 1000
cpu_period: 100000
cpu_quota: 50000
cpu_shares: 1024
devices: []
memory_limit: 1024 * 1024 * 1024 // 1GB
}
devices: []
}
spec := Spec{
version: '1.0.0'
platform: Platform{
os: .linux
arch: .amd64
}
process: process
root: root
hostname: 'my-container'
mounts: []
linux: linux
hooks: Hooks{}
}
println(spec)
}

View File

@@ -1,7 +0,0 @@
specs on https://github.com/opencontainers/runtime-spec
use https://github.com/containers/youki to test the implementation, wrap it as part of runc module,
make installer for it

View File

@@ -1,153 +0,0 @@
module runc
import json
// Helper functions to convert enums to strings
fn (cap Capability) str() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
fn (rlimit RlimitType) str() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Function to convert Capabilities struct to JSON
fn (cap Capabilities) to_json() map[string][]string {
return {
'bounding': cap.bounding.map(it.str())
'effective': cap.effective.map(it.str())
'inheritable': cap.inheritable.map(it.str())
'permitted': cap.permitted.map(it.str())
'ambient': cap.ambient.map(it.str())
}
}
// Function to convert Rlimit struct to JSON
fn (rlimit Rlimit) to_json() map[string]json.Any {
return {
'type': rlimit.typ.str()
'hard': rlimit.hard
'soft': rlimit.soft
}
}
// Example function to generate the Process JSON
fn generate_process_json(proc Process) string {
// Convert the Process object to JSON
process_json := {
'terminal': proc.terminal
'user': {
'uid': proc.user.uid
'gid': proc.user.gid
'additionalGids': proc.user.additional_gids
}
'args': proc.args
'env': proc.env
'cwd': proc.cwd
'capabilities': proc.capabilities.to_json()
'rlimits': proc.rlimits.map(it.to_json())
}
// Convert the entire process map to JSON string
return json.encode_pretty(process_json)
}
pub fn example_json() {
// Example instantiation using enums and Process structure
user := User{
uid: 1000
gid: 1000
additional_gids: [1001, 1002]
}
capabilities := Capabilities{
bounding: [Capability.cap_chown, Capability.cap_dac_override]
effective: [Capability.cap_chown]
inheritable: []
permitted: [Capability.cap_chown]
ambient: []
}
rlimits := [
Rlimit{
typ: RlimitType.rlimit_nofile
hard: 1024
soft: 1024
},
Rlimit{
typ: RlimitType.rlimit_cpu
hard: 1000
soft: 500
},
]
process := Process{
terminal: true
user: user
args: ['/bin/bash']
env: ['PATH=/usr/bin']
cwd: '/'
capabilities: capabilities
rlimits: rlimits
}
// Generate the JSON for Process object
json_output := generate_process_json(process)
println(json_output)
}