This commit is contained in:
2025-09-07 15:27:41 +04:00
parent cb125e8114
commit 145c6d8714
7 changed files with 587 additions and 592 deletions

View File

@@ -1,20 +1,31 @@
module crun
import json
fn test_factory_creation() {
config := new(name: 'test')!
mut configs := map[string]&CrunConfig{}
config := new(mut configs, name: 'test')!
assert config.name == 'test'
assert config.spec.version == '1.0.0'
assert config.spec.oci_version == '1.0.2'
}
fn test_json_generation() {
mut config := new(name: 'test')!
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
json_str := config.to_json()!
assert json_str.contains('"ociVersion": "1.0.0"')
assert json_str.contains('"os": "linux"')
// Parse back to verify structure
parsed := json.decode(map[string]json.Any, json_str)!
assert parsed['ociVersion']! as string == '1.0.2'
process := parsed['process']! as map[string]json.Any
assert process['terminal']! as bool == true
}
fn test_configuration_methods() {
mut config := new(name: 'test')!
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
config.set_command(['/bin/echo', 'hello'])
.set_working_dir('/tmp')
@@ -23,4 +34,41 @@ fn test_configuration_methods() {
assert config.spec.process.args == ['/bin/echo', 'hello']
assert config.spec.process.cwd == '/tmp'
assert config.spec.hostname == 'test-host'
}
fn test_validation() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'test')!
// Should validate successfully with defaults
config.validate()!
// Should fail with empty args
config.spec.process.args = []
if _ := config.validate() {
assert false, 'validation should have failed'
} else {
// Expected to fail
}
}
fn test_heropods_compatibility() {
mut configs := map[string]&CrunConfig{}
mut config := new(mut configs, name: 'heropods')!
// The default config should match heropods template structure
json_str := config.to_json()!
parsed := json.decode(map[string]json.Any, json_str)!
// Check key fields match template
assert parsed['ociVersion']! as string == '1.0.2'
process := parsed['process']! as map[string]json.Any
assert process['noNewPrivileges']! as bool == true
capabilities := process['capabilities']! as map[string]json.Any
bounding := capabilities['bounding']! as []json.Any
assert 'CAP_AUDIT_WRITE' in bounding.map(it as string)
assert 'CAP_KILL' in bounding.map(it as string)
assert 'CAP_NET_BIND_SERVICE' in bounding.map(it as string)
}

View File

@@ -1,44 +1,67 @@
module crun
import freeflowuniverse.herolib.core.pathlib
pub fn example_factory() ! {
// Create a new container configuration
mut config := new(name: 'mycontainer')!
pub fn example_heropods_compatible() ! {
mut configs := map[string]&CrunConfig{}
// Create a container configuration compatible with heropods template
mut config := new(mut configs, name: 'heropods-example')!
// Configure the container
config.set_command(['/bin/bash', '-c', 'echo "Hello from container"'])
.set_working_dir('/app')
.set_user(1000, 1000, [1001, 1002])
.add_env('MY_VAR', 'my_value')
.add_env('ANOTHER_VAR', 'another_value')
.set_rootfs('/path/to/rootfs', false)
.set_hostname('my-container')
.set_memory_limit(1024 * 1024 * 1024) // 1GB
.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
.add_mount('/host/path', '/container/path', .bind, [.rw])
.add_capability(.cap_sys_admin)
.remove_capability(.cap_net_raw)
// Configure to match the template
config.set_command(['/bin/sh'])
.set_working_dir('/')
.set_user(0, 0, [])
.add_env('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
.add_env('TERM', 'xterm')
.set_rootfs('${rootfs_path}', false) // This will be replaced by the actual path
.set_hostname('container')
.set_no_new_privileges(true)
// Add the specific rlimit from template
config.add_rlimit(.rlimit_nofile, 1024, 1024)
// Validate the configuration
config.validate()!
// Generate and print JSON
json_output := config.to_json()!
println(json_output)
// Save to file
config.save_to_file('/tmp/config.json')!
println('Configuration saved to /tmp/config.json')
config.save_to_file('/tmp/heropods_config.json')!
println('Heropods-compatible configuration saved to /tmp/heropods_config.json')
}
pub fn example_simple() ! {
// Simple container for running a shell
mut config := new(name: 'shell')!
pub fn example_custom() ! {
mut configs := map[string]&CrunConfig{}
// Create a more complex container configuration
mut config := new(mut configs, name: 'custom-container')!
config.set_command(['/bin/sh'])
.set_rootfs('/path/to/alpine/rootfs', false)
.set_hostname('alpine-shell')
config.set_command(['/usr/bin/my-app', '--config', '/etc/myapp/config.yaml'])
.set_working_dir('/app')
.set_user(1000, 1000, [1001, 1002])
.add_env('MY_VAR', 'my_value')
.add_env('ANOTHER_VAR', 'another_value')
.set_rootfs('/path/to/rootfs', false)
.set_hostname('my-custom-container')
.set_memory_limit(1024 * 1024 * 1024) // 1GB
.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
.set_pids_limit(500)
.add_mount('/host/path', '/container/path', .bind, [.rw])
.add_mount('/tmp/cache', '/app/cache', .tmpfs, [.rw, .noexec])
.add_capability(.cap_sys_admin)
.remove_capability(.cap_net_raw)
.add_rlimit(.rlimit_nproc, 100, 50)
.set_no_new_privileges(true)
// Add some additional security hardening
config.add_masked_path('/proc/kcore')
.add_readonly_path('/proc/sys')
// Validate before use
config.validate()!
// Get the JSON
json_str := config.to_json()!
println('Simple container config:')
println('Custom container config:')
println(json_str)
}

View File

@@ -2,9 +2,6 @@ module crun
import freeflowuniverse.herolib.core.texttools
__global (
crun_configs map[string]&CrunConfig
)
@[params]
pub struct FactoryArgs {
@@ -16,12 +13,103 @@ pub struct CrunConfig {
pub mut:
name string
spec Spec
}
// Process configuration
// Convert enum values to their string representations
pub fn (mount_type MountType) to_string() string {
return match mount_type {
.bind { 'bind' }
.tmpfs { 'tmpfs' }
.proc { 'proc' }
.sysfs { 'sysfs' }
.devpts { 'devpts' }
.nfs { 'nfs' }
.overlay { 'overlay' }
}
}
pub fn (option MountOption) to_string() string {
return match option {
.rw { 'rw' }
.ro { 'ro' }
.noexec { 'noexec' }
.nosuid { 'nosuid' }
.nodev { 'nodev' }
.rbind { 'rbind' }
.relatime { 'relatime' }
.strictatime { 'strictatime' }
.mode { 'mode=755' } // Default mode, can be customized
.size { 'size=65536k' } // Default size, can be customized
}
}
pub fn (cap Capability) to_string() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
pub fn (rlimit RlimitType) to_string() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Configuration methods with builder pattern
pub fn (mut config CrunConfig) set_command(args []string) &CrunConfig {
config.spec.process.args = args
config.spec.process.args = args.clone()
return config
}
@@ -34,7 +122,7 @@ pub fn (mut config CrunConfig) set_user(uid u32, gid u32, additional_gids []u32)
config.spec.process.user = User{
uid: uid
gid: gid
additional_gids: additional_gids
additional_gids: additional_gids.clone()
}
return config
}
@@ -44,7 +132,6 @@ pub fn (mut config CrunConfig) add_env(key string, value string) &CrunConfig {
return config
}
// Root filesystem configuration
pub fn (mut config CrunConfig) set_rootfs(path string, readonly bool) &CrunConfig {
config.spec.root = Root{
path: path
@@ -53,106 +140,143 @@ pub fn (mut config CrunConfig) set_rootfs(path string, readonly bool) &CrunConfi
return config
}
// Hostname
pub fn (mut config CrunConfig) set_hostname(hostname string) &CrunConfig {
config.spec.hostname = hostname
return config
}
// Resource limits
pub fn (mut config CrunConfig) set_memory_limit(limit_bytes u64) &CrunConfig {
config.spec.linux.resources.memory_limit = limit_bytes
config.spec.linux.resources.memory.limit = limit_bytes
return config
}
pub fn (mut config CrunConfig) set_cpu_limits(period u64, quota i64, shares u64) &CrunConfig {
config.spec.linux.resources.cpu_period = period
config.spec.linux.resources.cpu_quota = quota
config.spec.linux.resources.cpu_shares = shares
config.spec.linux.resources.cpu.period = period
config.spec.linux.resources.cpu.quota = quota
config.spec.linux.resources.cpu.shares = shares
return config
}
pub fn (mut config CrunConfig) set_pids_limit(limit i64) &CrunConfig {
config.spec.linux.resources.pids.limit = limit
return config
}
// Add mount
pub fn (mut config CrunConfig) add_mount(destination string, source string, typ MountType, options []MountOption) &CrunConfig {
config.spec.mounts << Mount{
destination: destination
typ: typ
typ: typ.to_string()
source: source
options: options
options: options.map(it.to_string())
}
return config
}
// Add capability
pub fn (mut config CrunConfig) add_capability(cap Capability) &CrunConfig {
if cap !in config.spec.process.capabilities.bounding {
config.spec.process.capabilities.bounding << cap
}
if cap !in config.spec.process.capabilities.effective {
config.spec.process.capabilities.effective << cap
}
if cap !in config.spec.process.capabilities.permitted {
config.spec.process.capabilities.permitted << cap
}
return config
}
// Remove capability
pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
config.spec.process.capabilities.bounding = config.spec.process.capabilities.bounding.filter(it != cap)
config.spec.process.capabilities.effective = config.spec.process.capabilities.effective.filter(it != cap)
config.spec.process.capabilities.permitted = config.spec.process.capabilities.permitted.filter(it != cap)
return config
}
}
pub fn new(args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
cap_str := cap.to_string()
// Create default spec
default_spec := create_default_spec()
if cap_str !in config.spec.process.capabilities.bounding {
config.spec.process.capabilities.bounding << cap_str
}
if cap_str !in config.spec.process.capabilities.effective {
config.spec.process.capabilities.effective << cap_str
}
if cap_str !in config.spec.process.capabilities.permitted {
config.spec.process.capabilities.permitted << cap_str
}
return config
}
pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
cap_str := cap.to_string()
config.spec.process.capabilities.bounding = config.spec.process.capabilities.bounding.filter(it != cap_str)
config.spec.process.capabilities.effective = config.spec.process.capabilities.effective.filter(it != cap_str)
config.spec.process.capabilities.permitted = config.spec.process.capabilities.permitted.filter(it != cap_str)
return config
}
pub fn (mut config CrunConfig) add_rlimit(typ RlimitType, hard u64, soft u64) &CrunConfig {
config.spec.process.rlimits << Rlimit{
typ: typ.to_string()
hard: hard
soft: soft
}
return config
}
pub fn (mut config CrunConfig) set_no_new_privileges(value bool) &CrunConfig {
config.spec.process.no_new_privileges = value
return config
}
pub fn (mut config CrunConfig) add_masked_path(path string) &CrunConfig {
if path !in config.spec.linux.masked_paths {
config.spec.linux.masked_paths << path
}
return config
}
pub fn (mut config CrunConfig) add_readonly_path(path string) &CrunConfig {
if path !in config.spec.linux.readonly_paths {
config.spec.linux.readonly_paths << path
}
return config
}
pub fn new(mut configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
mut config := &CrunConfig{
name: name
spec: default_spec
spec: create_default_spec()
}
crun_configs[name] = config
configs[name] = config
return config
}
pub fn get(args FactoryArgs) !&CrunConfig {
pub fn get(configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
name := texttools.name_fix(args.name)
return crun_configs[name] or {
return configs[name] or {
return error('crun config with name "${name}" does not exist')
}
}
fn create_default_spec() Spec {
return Spec{
version: '1.0.0'
// Create default spec that matches the heropods template
mut spec := Spec{
oci_version: '1.0.2' // Set default here
platform: Platform{
os: .linux
arch: .amd64
os: 'linux'
arch: 'amd64'
}
process: Process{
terminal: true
user: User{
uid: 0
gid: 0
additional_gids: []
}
args: ['/bin/sh']
env: ['PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin']
env: [
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
'TERM=xterm'
]
cwd: '/'
capabilities: Capabilities{
bounding: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
effective: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
inheritable: []
permitted: [.cap_chown, .cap_dac_override, .cap_fsetid, .cap_fowner, .cap_mknod, .cap_net_raw, .cap_setgid, .cap_setuid, .cap_setfcap, .cap_setpcap, .cap_net_bind_service, .cap_sys_chroot, .cap_kill, .cap_audit_write]
ambient: []
bounding: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
effective: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
inheritable: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
permitted: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
}
rlimits: []
rlimits: [
Rlimit{
typ: 'RLIMIT_NOFILE'
hard: 1024
soft: 1024
}
]
no_new_privileges: true // No JSON annotation needed here
}
root: Root{
path: 'rootfs'
@@ -162,20 +286,38 @@ fn create_default_spec() Spec {
mounts: create_default_mounts()
linux: Linux{
namespaces: create_default_namespaces()
resources: LinuxResource{}
devices: []
masked_paths: [
'/proc/acpi',
'/proc/kcore',
'/proc/keys',
'/proc/latency_stats',
'/proc/timer_list',
'/proc/timer_stats',
'/proc/sched_debug',
'/proc/scsi',
'/sys/firmware'
]
readonly_paths: [
'/proc/asound',
'/proc/bus',
'/proc/fs',
'/proc/irq',
'/proc/sys',
'/proc/sysrq-trigger'
]
}
hooks: Hooks{}
}
return spec
}
fn create_default_namespaces() []LinuxNamespace {
return [
LinuxNamespace{typ: 'pid', path: ''},
LinuxNamespace{typ: 'network', path: ''},
LinuxNamespace{typ: 'ipc', path: ''},
LinuxNamespace{typ: 'uts', path: ''},
LinuxNamespace{typ: 'mount', path: ''},
LinuxNamespace{typ: 'pid'},
LinuxNamespace{typ: 'network'},
LinuxNamespace{typ: 'ipc'},
LinuxNamespace{typ: 'uts'},
LinuxNamespace{typ: 'mount'},
]
}
@@ -183,21 +325,20 @@ fn create_default_mounts() []Mount {
return [
Mount{
destination: '/proc'
typ: .proc
typ: 'proc'
source: 'proc'
options: [.nosuid, .noexec, .nodev]
},
Mount{
destination: '/dev'
typ: .tmpfs
typ: 'tmpfs'
source: 'tmpfs'
options: [.nosuid]
options: ['nosuid', 'strictatime', 'mode=755', 'size=65536k']
},
Mount{
destination: '/sys'
typ: .sysfs
typ: 'sysfs'
source: 'sysfs'
options: [.nosuid, .noexec, .nodev, .ro]
options: ['nosuid', 'noexec', 'nodev', 'ro']
},
]
}

View File

@@ -1,158 +1,170 @@
module crun
struct LinuxNamespace {
typ string
path string
// OCI Runtime Spec structures that can be directly encoded to JSON
pub struct Spec {
pub mut:
oci_version string
platform Platform
process Process
root Root
hostname string
mounts []Mount
linux Linux
hooks Hooks
}
struct LinuxIDMapping {
container_id u32
host_id u32
size u32
pub struct Platform {
pub mut:
os string = 'linux'
arch string = 'amd64'
}
struct LinuxResource {
blkio_weight u16
blkio_weight_device []string
blkio_throttle_read_bps_device []string
blkio_throttle_write_bps_device []string
blkio_throttle_read_iops_device []string
blkio_throttle_write_iops_device []string
cpu_period u64
cpu_quota i64
cpu_shares u64
cpuset_cpus string
cpuset_mems string
devices []string
memory_limit u64
memory_reservation u64
memory_swap_limit u64
memory_kernel_limit u64
memory_swappiness i64
pids_limit i64
pub struct Process {
pub mut:
terminal bool = true
user User
args []string
env []string
cwd string = '/'
capabilities Capabilities
rlimits []Rlimit
no_new_privileges bool
}
struct LinuxDevice {
typ string
major int
minor int
permissions string
file_mode u32
uid u32
gid u32
}
struct Hooks {
prestart []string
poststart []string
poststop []string
}
// see https://github.com/opencontainers/runtime-spec/blob/main/config.md#process
struct Process {
terminal bool
user User
args []string
env []string // do as dict
cwd string
capabilities Capabilities
rlimits []Rlimit
}
// Enum for Rlimit types
enum RlimitType {
rlimit_cpu
rlimit_fsize
rlimit_data
rlimit_stack
rlimit_core
rlimit_rss
rlimit_nproc
rlimit_nofile
rlimit_memlock
rlimit_as
rlimit_lock
rlimit_sigpending
rlimit_msgqueue
rlimit_nice
rlimit_rtprio
rlimit_rttime
}
// Struct for Rlimit using enumerator
struct Rlimit {
typ RlimitType
hard u64
soft u64
}
struct User {
pub struct User {
pub mut:
uid u32
gid u32
additional_gids []u32
}
struct Root {
pub struct Capabilities {
pub mut:
bounding []string
effective []string
inheritable []string
permitted []string
ambient []string
}
pub struct Rlimit {
pub mut:
typ string
hard u64
soft u64
}
pub struct Root {
pub mut:
path string
readonly bool
}
struct Linux {
namespaces []LinuxNamespace
resources LinuxResource
devices []LinuxDevice
pub struct Mount {
pub mut:
destination string
typ string
source string
options []string
}
struct Spec {
version string
platform Platform
process Process
root Root
hostname string
mounts []Mount
linux Linux
hooks Hooks
pub struct Linux {
pub mut:
namespaces []LinuxNamespace
resources LinuxResources
devices []LinuxDevice
masked_paths []string
readonly_paths []string
uid_mappings []LinuxIDMapping
gid_mappings []LinuxIDMapping
}
// Enum for supported operating systems
enum OSType {
linux
windows
darwin
solaris
// Add other OS types as needed
pub struct LinuxNamespace {
pub mut:
typ string
path string
}
// Enum for supported architectures
enum ArchType {
amd64
arm64
arm
ppc64
s390x
// Add other architectures as needed
pub struct LinuxResources {
pub mut:
memory Memory
cpu CPU
pids Pids
blkio BlockIO
}
// Struct for Platform using enums
struct Platform {
os OSType
arch ArchType
pub struct Memory {
pub mut:
limit u64
reservation u64
swap u64
kernel u64
swappiness i64
}
// Enum for mount types
enum MountType {
pub struct CPU {
pub mut:
shares u64
quota i64
period u64
cpus string
mems string
}
pub struct Pids {
pub mut:
limit i64
}
pub struct BlockIO {
pub mut:
weight u16
}
pub struct LinuxDevice {
pub mut:
path string
typ string
major i64
minor i64
file_mode u32
uid u32
gid u32
}
pub struct LinuxIDMapping {
pub mut:
container_id u32
host_id u32
size u32
}
pub struct Hooks {
pub mut:
prestart []Hook
poststart []Hook
poststop []Hook
}
pub struct Hook {
pub mut:
path string
args []string
env []string
}
// Enums for type safety but convert to strings
pub enum MountType {
bind
tmpfs
nfs
overlay
devpts
proc
sysfs
// Add other mount types as needed
devpts
nfs
overlay
}
// Enum for mount options
enum MountOption {
pub enum MountOption {
rw
ro
noexec
@@ -160,18 +172,12 @@ enum MountOption {
nodev
rbind
relatime
// Add other options as needed
strictatime
mode
size
}
// Struct for Mount using enums
struct Mount {
destination string
typ MountType
source string
options []MountOption
}
enum Capability {
pub enum Capability {
cap_chown
cap_dac_override
cap_dac_read_search
@@ -212,10 +218,21 @@ enum Capability {
cap_audit_read
}
struct Capabilities {
bounding []Capability
effective []Capability
inheritable []Capability
permitted []Capability
ambient []Capability
}
pub enum RlimitType {
rlimit_cpu
rlimit_fsize
rlimit_data
rlimit_stack
rlimit_core
rlimit_rss
rlimit_nproc
rlimit_nofile
rlimit_memlock
rlimit_as
rlimit_lock
rlimit_sigpending
rlimit_msgqueue
rlimit_nice
rlimit_rtprio
rlimit_rttime
}

View File

@@ -1,280 +1,40 @@
module crun
import json
import freeflowuniverse.herolib.core.pathlib
// Convert enum values to their string representations
fn (os OSType) to_json_string() string {
return match os {
.linux { 'linux' }
.windows { 'windows' }
.darwin { 'darwin' }
.solaris { 'solaris' }
}
}
fn (arch ArchType) to_json_string() string {
return match arch {
.amd64 { 'amd64' }
.arm64 { 'arm64' }
.arm { 'arm' }
.ppc64 { 'ppc64' }
.s390x { 's390x' }
}
}
fn (mount_type MountType) to_json_string() string {
return match mount_type {
.bind { 'bind' }
.tmpfs { 'tmpfs' }
.nfs { 'nfs' }
.overlay { 'overlay' }
.devpts { 'devpts' }
.proc { 'proc' }
.sysfs { 'sysfs' }
}
}
fn (option MountOption) to_json_string() string {
return match option {
.rw { 'rw' }
.ro { 'ro' }
.noexec { 'noexec' }
.nosuid { 'nosuid' }
.nodev { 'nodev' }
.rbind { 'rbind' }
.relatime { 'relatime' }
}
}
fn (cap Capability) to_json_string() string {
return match cap {
.cap_chown { 'CAP_CHOWN' }
.cap_dac_override { 'CAP_DAC_OVERRIDE' }
.cap_dac_read_search { 'CAP_DAC_READ_SEARCH' }
.cap_fowner { 'CAP_FOWNER' }
.cap_fsetid { 'CAP_FSETID' }
.cap_kill { 'CAP_KILL' }
.cap_setgid { 'CAP_SETGID' }
.cap_setuid { 'CAP_SETUID' }
.cap_setpcap { 'CAP_SETPCAP' }
.cap_linux_immutable { 'CAP_LINUX_IMMUTABLE' }
.cap_net_bind_service { 'CAP_NET_BIND_SERVICE' }
.cap_net_broadcast { 'CAP_NET_BROADCAST' }
.cap_net_admin { 'CAP_NET_ADMIN' }
.cap_net_raw { 'CAP_NET_RAW' }
.cap_ipc_lock { 'CAP_IPC_LOCK' }
.cap_ipc_owner { 'CAP_IPC_OWNER' }
.cap_sys_module { 'CAP_SYS_MODULE' }
.cap_sys_rawio { 'CAP_SYS_RAWIO' }
.cap_sys_chroot { 'CAP_SYS_CHROOT' }
.cap_sys_ptrace { 'CAP_SYS_PTRACE' }
.cap_sys_pacct { 'CAP_SYS_PACCT' }
.cap_sys_admin { 'CAP_SYS_ADMIN' }
.cap_sys_boot { 'CAP_SYS_BOOT' }
.cap_sys_nice { 'CAP_SYS_NICE' }
.cap_sys_resource { 'CAP_SYS_RESOURCE' }
.cap_sys_time { 'CAP_SYS_TIME' }
.cap_sys_tty_config { 'CAP_SYS_TTY_CONFIG' }
.cap_mknod { 'CAP_MKNOD' }
.cap_lease { 'CAP_LEASE' }
.cap_audit_write { 'CAP_AUDIT_WRITE' }
.cap_audit_control { 'CAP_AUDIT_CONTROL' }
.cap_setfcap { 'CAP_SETFCAP' }
.cap_mac_override { 'CAP_MAC_OVERRIDE' }
.cap_mac_admin { 'CAP_MAC_ADMIN' }
.cap_syslog { 'CAP_SYSLOG' }
.cap_wake_alarm { 'CAP_WAKE_ALARM' }
.cap_block_suspend { 'CAP_BLOCK_SUSPEND' }
.cap_audit_read { 'CAP_AUDIT_READ' }
}
}
fn (rlimit RlimitType) to_json_string() string {
return match rlimit {
.rlimit_cpu { 'RLIMIT_CPU' }
.rlimit_fsize { 'RLIMIT_FSIZE' }
.rlimit_data { 'RLIMIT_DATA' }
.rlimit_stack { 'RLIMIT_STACK' }
.rlimit_core { 'RLIMIT_CORE' }
.rlimit_rss { 'RLIMIT_RSS' }
.rlimit_nproc { 'RLIMIT_NPROC' }
.rlimit_nofile { 'RLIMIT_NOFILE' }
.rlimit_memlock { 'RLIMIT_MEMLOCK' }
.rlimit_as { 'RLIMIT_AS' }
.rlimit_lock { 'RLIMIT_LOCK' }
.rlimit_sigpending { 'RLIMIT_SIGPENDING' }
.rlimit_msgqueue { 'RLIMIT_MSGQUEUE' }
.rlimit_nice { 'RLIMIT_NICE' }
.rlimit_rtprio { 'RLIMIT_RTPRIO' }
.rlimit_rttime { 'RLIMIT_RTTIME' }
}
}
// Main method to generate complete OCI spec JSON
// Simple JSON generation using V's built-in json module
pub fn (config CrunConfig) to_json() !string {
spec_map := map[string]json.Any{}
// Basic spec fields
spec_map['ociVersion'] = config.spec.version
// Platform
spec_map['platform'] = map[string]json.Any{
'os': config.spec.platform.os.to_json_string()
'arch': config.spec.platform.arch.to_json_string()
}
// Process
process_map := map[string]json.Any{}
process_map['terminal'] = config.spec.process.terminal
process_map['user'] = map[string]json.Any{
'uid': int(config.spec.process.user.uid)
'gid': int(config.spec.process.user.gid)
'additionalGids': config.spec.process.user.additional_gids.map(int(it))
}
process_map['args'] = config.spec.process.args
process_map['env'] = config.spec.process.env
process_map['cwd'] = config.spec.process.cwd
// Capabilities
if config.spec.process.capabilities.bounding.len > 0 ||
config.spec.process.capabilities.effective.len > 0 ||
config.spec.process.capabilities.inheritable.len > 0 ||
config.spec.process.capabilities.permitted.len > 0 ||
config.spec.process.capabilities.ambient.len > 0 {
capabilities_map := map[string]json.Any{}
if config.spec.process.capabilities.bounding.len > 0 {
capabilities_map['bounding'] = config.spec.process.capabilities.bounding.map(it.to_json_string())
}
if config.spec.process.capabilities.effective.len > 0 {
capabilities_map['effective'] = config.spec.process.capabilities.effective.map(it.to_json_string())
}
if config.spec.process.capabilities.inheritable.len > 0 {
capabilities_map['inheritable'] = config.spec.process.capabilities.inheritable.map(it.to_json_string())
}
if config.spec.process.capabilities.permitted.len > 0 {
capabilities_map['permitted'] = config.spec.process.capabilities.permitted.map(it.to_json_string())
}
if config.spec.process.capabilities.ambient.len > 0 {
capabilities_map['ambient'] = config.spec.process.capabilities.ambient.map(it.to_json_string())
}
process_map['capabilities'] = capabilities_map
}
// Rlimits
if config.spec.process.rlimits.len > 0 {
rlimits_array := []json.Any{}
for rlimit in config.spec.process.rlimits {
rlimits_array << map[string]json.Any{
'type': rlimit.typ.to_json_string()
'hard': int(rlimit.hard)
'soft': int(rlimit.soft)
}
}
process_map['rlimits'] = rlimits_array
}
spec_map['process'] = process_map
// Root
spec_map['root'] = map[string]json.Any{
'path': config.spec.root.path
'readonly': config.spec.root.readonly
}
// Hostname
if config.spec.hostname != '' {
spec_map['hostname'] = config.spec.hostname
}
// Mounts
if config.spec.mounts.len > 0 {
mounts_array := []json.Any{}
for mount in config.spec.mounts {
mount_map := map[string]json.Any{
'destination': mount.destination
'type': mount.typ.to_json_string()
'source': mount.source
}
if mount.options.len > 0 {
mount_map['options'] = mount.options.map(it.to_json_string())
}
mounts_array << mount_map
}
spec_map['mounts'] = mounts_array
}
// Linux specific configuration
linux_map := map[string]json.Any{}
// Namespaces
if config.spec.linux.namespaces.len > 0 {
namespaces_array := []json.Any{}
for ns in config.spec.linux.namespaces {
ns_map := map[string]json.Any{
'type': ns.typ
}
if ns.path != '' {
ns_map['path'] = ns.path
}
namespaces_array << ns_map
}
linux_map['namespaces'] = namespaces_array
}
// Resources
resources_map := map[string]json.Any{}
has_resources := false
if config.spec.linux.resources.memory_limit > 0 {
memory_map := map[string]json.Any{
'limit': int(config.spec.linux.resources.memory_limit)
}
if config.spec.linux.resources.memory_reservation > 0 {
memory_map['reservation'] = int(config.spec.linux.resources.memory_reservation)
}
if config.spec.linux.resources.memory_swap_limit > 0 {
memory_map['swap'] = int(config.spec.linux.resources.memory_swap_limit)
}
resources_map['memory'] = memory_map
has_resources = true
}
if config.spec.linux.resources.cpu_period > 0 || config.spec.linux.resources.cpu_quota > 0 || config.spec.linux.resources.cpu_shares > 0 {
cpu_map := map[string]json.Any{}
if config.spec.linux.resources.cpu_period > 0 {
cpu_map['period'] = int(config.spec.linux.resources.cpu_period)
}
if config.spec.linux.resources.cpu_quota > 0 {
cpu_map['quota'] = int(config.spec.linux.resources.cpu_quota)
}
if config.spec.linux.resources.cpu_shares > 0 {
cpu_map['shares'] = int(config.spec.linux.resources.cpu_shares)
}
resources_map['cpu'] = cpu_map
has_resources = true
}
if config.spec.linux.resources.pids_limit > 0 {
resources_map['pids'] = map[string]json.Any{
'limit': int(config.spec.linux.resources.pids_limit)
}
has_resources = true
}
if has_resources {
linux_map['resources'] = resources_map
}
spec_map['linux'] = linux_map
return json.encode_pretty(spec_map)
return json.encode_pretty(config.spec)
}
// Convenience method to save JSON to file
pub fn (config CrunConfig) save_to_file(path string) ! {
json_content := config.to_json()!
import freeflowuniverse.herolib.core.pathlib
mut file := pathlib.get_file(path: path, create: true)!
file.write(json_content)!
}
// Validate the configuration
pub fn (config CrunConfig) validate() ! {
if config.spec.oci_version == '' {
return error('ociVersion cannot be empty')
}
if config.spec.process.args.len == 0 {
return error('process.args cannot be empty')
}
if config.spec.root.path == '' {
return error('root.path cannot be empty')
}
// Validate that required capabilities are present
required_caps := ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
for cap in required_caps {
if cap !in config.spec.process.capabilities.bounding {
return error('missing required capability: ${cap}')
}
}
}

View File

@@ -10,12 +10,12 @@ import json
@[heap]
pub struct ContainerImage {
pub mut:
image_name string @[required] // image is located in /containers/images/<image_name>/rootfs
docker_url string // optional docker image URL
image_name string @[required] // image is located in /containers/images/<image_name>/rootfs
docker_url string // optional docker image URL
rootfs_path string // path to the extracted rootfs
size_mb f64 // size in MB
created_at string // creation timestamp
factory &ContainerFactory @[skip; str: skip]
size_mb f64 // size in MB
created_at string // creation timestamp
factory &ContainerFactory @[skip; str: skip]
}
@[params]
@@ -23,13 +23,13 @@ pub struct ContainerImageArgs {
pub mut:
image_name string @[required] // image is located in /containers/images/<image_name>/rootfs
docker_url string // docker image URL like "alpine:3.20" or "ubuntu:24.04"
reset bool
reset bool
}
@[params]
pub struct ImageExportArgs {
pub mut:
dest_path string @[required] // destination .tgz file path
dest_path string @[required] // destination .tgz file path
compress_level int = 6 // compression level 1-9
}
@@ -37,31 +37,31 @@ pub mut:
pub struct ImageImportArgs {
pub mut:
source_path string @[required] // source .tgz file path
reset bool // overwrite if exists
reset bool // overwrite if exists
}
// Create new image or get existing
pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&ContainerImage {
mut image_name := texttools.name_fix(args.image_name)
rootfs_path := '/containers/images/${image_name}/rootfs'
// Check if image already exists
if image_name in self.images && !args.reset {
return self.images[image_name]
return self.images[image_name] or { panic('bug') }
}
// Ensure podman is installed
if !osal.cmd_exists('podman') {
return error('Podman is required for image management. Please install podman first.')
}
mut image := &ContainerImage{
image_name: image_name
docker_url: args.docker_url
image_name: image_name
docker_url: args.docker_url
rootfs_path: rootfs_path
factory: &self
factory: &self
}
// If docker_url is provided, download and extract the image
if args.docker_url != '' {
image.download_from_docker(args.docker_url, args.reset)!
@@ -71,10 +71,10 @@ pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&Containe
return error('Image rootfs not found at ${rootfs_path} and no docker_url provided')
}
}
// Update image metadata
image.update_metadata()!
self.images[image_name] = image
return image
}
@@ -82,41 +82,41 @@ pub fn (mut self ContainerFactory) image_new(args ContainerImageArgs) !&Containe
// Download image from docker registry using podman
fn (mut self ContainerImage) download_from_docker(docker_url string, reset bool) ! {
console.print_header('Downloading image: ${docker_url}')
// Clean image name for local storage
image_dir := '/containers/images/${self.image_name}'
// Remove existing if reset is true
if reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create image directory
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Pull image using podman
console.print_debug('Pulling image: ${docker_url}')
osal.exec(cmd: 'podman pull ${docker_url}', stdout: true)!
// Create container from image (without running it)
temp_container := 'temp_${self.image_name}_extract'
osal.exec(cmd: 'podman create --name ${temp_container} ${docker_url}', stdout: false)!
// Export container filesystem
tar_file := '${image_dir}/rootfs.tar'
osal.exec(cmd: 'podman export ${temp_container} -o ${tar_file}', stdout: true)!
// Extract to rootfs directory
osal.exec(cmd: 'mkdir -p ${self.rootfs_path}', stdout: false)!
osal.exec(cmd: 'tar -xf ${tar_file} -C ${self.rootfs_path}', stdout: true)!
// Clean up temporary container and tar file
osal.exec(cmd: 'podman rm ${temp_container}', stdout: false) or {}
osal.exec(cmd: 'rm -f ${tar_file}', stdout: false) or {}
// Remove the pulled image from podman to save space (optional)
osal.exec(cmd: 'podman rmi ${docker_url}', stdout: false) or {}
console.print_green('Image ${docker_url} extracted to ${self.rootfs_path}')
}
@@ -125,26 +125,27 @@ fn (mut self ContainerImage) update_metadata() ! {
if !os.is_dir(self.rootfs_path) {
return error('Rootfs path does not exist: ${self.rootfs_path}')
}
// Calculate size
result := osal.exec(cmd: 'du -sm ${self.rootfs_path}', stdout: false)!
size_str := result.split('\t')[0].trim_space()
result_parts := result.output.split_by_space()[0] or { panic('bug') }
size_str := result_parts.trim_space()
self.size_mb = size_str.f64()
// Get creation time
stat_result := osal.exec(cmd: 'stat -c "%Y" ${self.rootfs_path}', stdout: false)!
self.created_at = stat_result.trim_space()
self.created_at = stat_result.output.trim_space() // TODO: should this be ourtime?
}
// List all available images
pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
mut images := []&ContainerImage{}
images_base_dir := '/containers/images'
if !os.is_dir(images_base_dir) {
return images
}
// Scan for image directories
dirs := os.ls(images_base_dir)!
for dir in dirs {
@@ -155,9 +156,9 @@ pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
// Create image object if not in cache
if dir !in self.images {
mut image := &ContainerImage{
image_name: dir
image_name: dir
rootfs_path: rootfs_path
factory: &self
factory: &self
}
image.update_metadata() or {
console.print_stderr('Failed to update metadata for image ${dir}: ${err}')
@@ -165,11 +166,11 @@ pub fn (mut self ContainerFactory) images_list() ![]&ContainerImage {
}
self.images[dir] = image
}
images << self.images[dir]
images << self.images[dir] or { panic('bug') }
}
}
}
return images
}
@@ -178,17 +179,17 @@ pub fn (mut self ContainerImage) export(args ImageExportArgs) ! {
if !os.is_dir(self.rootfs_path) {
return error('Image rootfs not found: ${self.rootfs_path}')
}
console.print_header('Exporting image ${self.image_name} to ${args.dest_path}')
// Ensure destination directory exists
dest_dir := os.dir(args.dest_path)
osal.exec(cmd: 'mkdir -p ${dest_dir}', stdout: false)!
// Create compressed archive
cmd := 'tar -czf ${args.dest_path} -C ${os.dir(self.rootfs_path)} ${os.base(self.rootfs_path)}'
osal.exec(cmd: cmd, stdout: true)!
console.print_green('Image exported successfully to ${args.dest_path}')
}
@@ -197,43 +198,43 @@ pub fn (mut self ContainerFactory) image_import(args ImageImportArgs) !&Containe
if !os.exists(args.source_path) {
return error('Source file not found: ${args.source_path}')
}
// Extract image name from filename
filename := os.base(args.source_path)
image_name := filename.replace('.tgz', '').replace('.tar.gz', '')
image_name_clean := texttools.name_fix(image_name)
console.print_header('Importing image from ${args.source_path}')
image_dir := '/containers/images/${image_name_clean}'
rootfs_path := '${image_dir}/rootfs'
// Check if image already exists
if os.is_dir(rootfs_path) && !args.reset {
return error('Image ${image_name_clean} already exists. Use reset=true to overwrite.')
}
// Remove existing if reset
if args.reset && os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: false)!
}
// Create directories
osal.exec(cmd: 'mkdir -p ${image_dir}', stdout: false)!
// Extract archive
osal.exec(cmd: 'tar -xzf ${args.source_path} -C ${image_dir}', stdout: true)!
// Create image object
mut image := &ContainerImage{
image_name: image_name_clean
image_name: image_name_clean
rootfs_path: rootfs_path
factory: &self
factory: &self
}
image.update_metadata()!
self.images[image_name_clean] = image
console.print_green('Image imported successfully: ${image_name_clean}')
return image
}
@@ -241,28 +242,28 @@ pub fn (mut self ContainerFactory) image_import(args ImageImportArgs) !&Containe
// Delete image
pub fn (mut self ContainerImage) delete() ! {
console.print_header('Deleting image: ${self.image_name}')
image_dir := os.dir(self.rootfs_path)
if os.is_dir(image_dir) {
osal.exec(cmd: 'rm -rf ${image_dir}', stdout: true)!
}
// Remove from factory cache
if self.image_name in self.factory.images {
self.factory.images.delete(self.image_name)
}
console.print_green('Image ${self.image_name} deleted successfully')
}
// Get image info as map
pub fn (self ContainerImage) info() map[string]string {
return {
'name': self.image_name
'docker_url': self.docker_url
'name': self.image_name
'docker_url': self.docker_url
'rootfs_path': self.rootfs_path
'size_mb': self.size_mb.str()
'created_at': self.created_at
'size_mb': self.size_mb.str()
'created_at': self.created_at
}
}
@@ -291,4 +292,4 @@ pub fn list_available_docker_images() []string {
'node:20-alpine',
'python:3.12-alpine',
]
}
}