Merge branch 'development' into development_kristof

This commit is contained in:
2025-01-31 15:40:18 +03:00
33 changed files with 661 additions and 228 deletions

View File

@@ -4,7 +4,10 @@ import freeflowuniverse.herolib.data.ipaddress
// get node connection to local machine
pub fn (mut bldr BuilderFactory) node_local() !&Node {
return bldr.node_new(name: 'localhost')
return bldr.node_new(
name: 'localhost'
ipaddr: '127.0.0.1'
)
}
// format ipaddr: localhost:7777 .
@@ -64,7 +67,6 @@ pub fn (mut bldr BuilderFactory) node_new(args_ NodeArguments) !&Node {
mut iadd := ipaddress.new(args.ipaddr)!
node.name = iadd.toname()!
}
wasincache := node.load()!
if wasincache && args.reload {

View File

@@ -5,6 +5,7 @@ import json
@[params]
pub struct OpenSSLGenerateArgs {
pub:
name string = 'default'
domain string = 'myregistry.domain.com'
reset bool
@@ -22,6 +23,7 @@ pub fn (mut ossl OpenSSL) generate(args OpenSSLGenerateArgs) !OpenSSLKey {
'
mut b := builder.new()!
println('b: ${b}')
mut node := b.node_local()!
node.exec(cmd: cmd)!

View File

@@ -4,7 +4,7 @@ import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.osal.screen
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.core.httpconnection
import os
@[params]

View File

@@ -1,4 +1,3 @@
!!hero_code.generate_installer
name:'buildah'
classname:'BuildahInstaller'
@@ -10,4 +9,4 @@
reset:0
startupmanager:0
hasconfig:0
build:1
build:0

View File

@@ -2,59 +2,26 @@ module buildah
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.installers.ulist
import freeflowuniverse.herolib.installers.lang.golang
import os
import freeflowuniverse.herolib.core
// checks if a certain version or above is installed
fn installed_() !bool {
res := os.execute('${osal.profile_path_source_and()!} buildah -v')
if res.exit_code != 0 {
return false
}
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 1 {
return error("couldn't parse herocontainers version, expected 'buildah -v' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('version').all_before('(').replace('-dev', ''))
if texttools.version(version) == v {
return true
}
return false
fn installed() !bool {
osal.execute_silent('buildah -v') or { return false }
return true
}
fn install_() ! {
fn install() ! {
console.print_header('install buildah')
build()!
}
if core.platform()! != .ubuntu {
return error('Only ubuntu is supported for now')
}
fn build_() ! {
console.print_header('build buildah')
osal.package_install('runc,bats,btrfs-progs,git,go-md2man,libapparmor-dev,libglib2.0-dev,libgpgme11-dev,libseccomp-dev,libselinux1-dev,make,skopeo,libbtrfs-dev')!
mut g := golang.get()!
g.install()!
cmd := '
cd /tmp
rm -rf buildah
git clone https://github.com/containers/buildah
cd buildah
make SECURITYTAGS="apparmor seccomp"
'
cmd := 'sudo apt-get -y update && sudo apt-get -y install buildah'
osal.execute_stdout(cmd)!
// now copy to the default bin path
osal.cmd_add(
cmdname: 'buildah'
source: '/tmp/buildah/bin/buildah'
)!
osal.rm('
/tmp/buildah
')!
console.print_header('Buildah Installed Successfuly')
}
// get the Upload List of the files
@@ -64,17 +31,6 @@ fn ulist_get() !ulist.UList {
return ulist.UList{}
}
fn destroy_() ! {
osal.package_remove('
buildah
')!
// will remove all paths where go/bin is found
osal.profile_path_add_remove(paths2delete: 'go/bin')!
osal.rm('
buildah
/var/lib/buildah
/tmp/buildah
')!
fn destroy() ! {
osal.execute_stdout('sudo apt remove --purge -y buildah')!
}

View File

@@ -14,29 +14,61 @@ __global (
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&BuildahInstaller {
return &BuildahInstaller{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args InstallArgs) ! {
if args.reset {
destroy()!
}
if !(installed_()!) {
install_()!
pub fn (mut self BuildahInstaller) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn destroy() ! {
destroy_()!
pub fn (mut self BuildahInstaller) destroy() ! {
switch(self.name)
destroy()!
}
pub fn build() ! {
build_()!
// switch instance to be used for buildah
pub fn switch(name string) {
buildah_default = name
}

View File

@@ -1,18 +1,22 @@
module buildah
pub const version = '1.38.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct BuildahInstaller {
pub mut:
name string = 'default'
}
fn obj_init(obj_ BuildahInstaller) !BuildahInstaller {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'docker'
classname:'DockerInstaller'
singleton:0
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:1
hasconfig:0
build:0

View File

@@ -1,64 +0,0 @@
module docker
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
// install docker will return true if it was already installed
pub fn install_() ! {
console.print_header('package install install docker')
if core.platform() != .ubuntu {
return error('only support ubuntu for now')
}
base.install()!
if !osal.done_exists('install_docker') && !osal.cmd_exists('docker') {
// osal.upgrade()!
osal.package_install('mc,wget,htop,apt-transport-https,ca-certificates,curl,software-properties-common')!
cmd := '
rm -f /usr/share/keyrings/docker-archive-keyring.gpg
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
apt-cache policy docker-ce
#systemctl status docker
'
osal.execute_silent(cmd)!
osal.package_install('docker-ce')!
check()!
osal.done_set('install_docker', 'OK')!
}
console.print_header('docker already done')
}
pub fn check() ! {
// todo: do a monitoring check to see if it works
cmd := '
# Check if docker command exists
if ! command -v docker &> /dev/null; then
echo "Error: Docker command-line tool is not installed."
exit 1
fi
# Check if Docker daemon is running
if ! pgrep -f "dockerd" &> /dev/null; then
echo "Error: Docker daemon is not running."
exit 1
fi
# Run the hello-world Docker container
output=$(docker run hello-world 2>&1)
if [[ "\$output" == *"Hello from Docker!"* ]]; then
echo "Docker is installed and running properly."
else
echo "Error: Failed to run the Docker hello-world container."
echo "Output: \$output"
exit 1
fi
'
r := osal.execute_silent(cmd)!
console.print_debug(r)
}

View File

@@ -0,0 +1,106 @@
module docker
import freeflowuniverse.herolib.core
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.installers.ulist
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut res := []zinit.ZProcessNewArgs{}
res << zinit.ZProcessNewArgs{
name: 'docker'
cmd: 'dockerd'
}
return res
}
fn running() !bool {
console.print_header('Checking if Docker is running')
is_installed := installed() or {
return error('Cannot execute command docker, check if the docker is installed or call the `install()` method: ${err}')
}
if !is_installed {
return false
}
// Checking if the docker server responed
cmd := 'docker ps'
osal.execute_stdout(cmd) or { return false }
console.print_header('Docker is running')
return true
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
//////////////////// following actions are not specific to instance of the object
// checks if a certain version or above is installed
fn installed() !bool {
console.print_header('Checking if Docker is installed')
cmd := 'docker -v'
osal.execute_stdout(cmd) or { return false }
console.print_header('Docker is installed')
return true
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {}
fn install() ! {
console.print_header('Installing Docker')
if core.platform()! != .ubuntu {
return error('only support ubuntu for now')
}
mut cmd := '
sudo apt-get update -y
sudo apt-get install -y ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "\$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update -y
'
osal.execute_stdout(cmd) or { return error('Cannot install docker due to: ${err}') }
cmd = 'sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin'
osal.execute_stdout(cmd) or { return error('Cannot install docker due to: ${err}') }
console.print_header('Docker installed sucessfully')
}
fn destroy() ! {
console.print_header('Removing Docker')
// Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages:
mut cmd := 'sudo apt-get purge -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras'
osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') }
// Images, containers, volumes, or custom configuration files on your host aren't automatically removed. To delete all images, containers, and volumes:
cmd = 'sudo rm -rf /var/lib/docker && sudo rm -rf /var/lib/containerd'
osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') }
// Remove source list and keyrings
cmd = 'sudo rm /etc/apt/sources.list.d/docker.list && sudo rm /etc/apt/keyrings/docker.asc'
osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') }
console.print_header('Docker is removed')
}

View File

@@ -0,0 +1,147 @@
module docker
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import time
__global (
docker_global map[string]&DockerInstaller
docker_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&DockerInstaller {
return &DockerInstaller{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
pub fn (mut self DockerInstaller) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('docker start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting docker with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('docker did not install properly.')
}
pub fn (mut self DockerInstaller) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self DockerInstaller) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self DockerInstaller) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self DockerInstaller) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self DockerInstaller) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self DockerInstaller) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for docker
pub fn switch(name string) {
docker_default = name
}

View File

@@ -0,0 +1,23 @@
module docker
pub const version = '1.14.3'
const singleton = false
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct DockerInstaller {
pub mut:
name string = 'default'
}
fn obj_init(obj_ DockerInstaller) !DockerInstaller {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}

View File

@@ -0,0 +1,42 @@
# docker
To get started
```vlang
import freeflowuniverse.herolib.installers.something.docker as docker_installer
heroscript:="
!!docker.configure name:'test'
password: '1234'
port: 7701
!!docker.start name:'test' reset:1
"
docker_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= docker_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!docker.configure
homedir: '/home/user/docker'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -1,6 +1,6 @@
module rmb
// import freeflowuniverse.herolib.clients.httpconnection
// import freeflowuniverse.herolib.core.httpconnection
import freeflowuniverse.herolib.core.redisclient { RedisURL }
import os

View File

@@ -1,7 +1,6 @@
module tfgrid3deployer
import freeflowuniverse.herolib.threefold.grid.models as grid_models
import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models
import freeflowuniverse.herolib.threefold.grid
import freeflowuniverse.herolib.ui.console
import compress.zlib
@@ -9,7 +8,6 @@ import encoding.hex
import x.crypto.chacha20
import crypto.sha256
import json
import rand
struct GridContracts {
pub mut:
@@ -51,7 +49,7 @@ pub fn new_deployment(name string) !TFDeployment {
kvstore := KVStoreFS{}
if _ := kvstore.get(name) {
return error('Deployment with the same name is already exist.')
return error('Deployment with the same name "${name}" already exists.')
}
deployer := get_deployer()!
@@ -114,6 +112,7 @@ pub fn (mut self TFDeployment) deploy() ! {
}
fn (mut self TFDeployment) set_nodes() ! {
// TODO: each request should run in a separate thread
for mut vm in self.vms {
if vm.node_id != 0 {
continue
@@ -284,10 +283,10 @@ fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! {
}
}
self.update_state(name_contracts_map, returned_deployments)!
self.update_state(setup, name_contracts_map, returned_deployments)!
}
fn (mut self TFDeployment) update_state(name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! {
fn (mut self TFDeployment) update_state(setup DeploymentSetup, name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! {
mut workloads := map[u32]map[string]&grid_models.Workload{}
for node_id, deployment in dls {
@@ -338,6 +337,10 @@ fn (mut self TFDeployment) update_state(name_contracts_map map[string]u64, dls m
wn.node_contract_id = dls[wn.node_id].contract_id
wn.name_contract_id = name_contracts_map[wn.requirements.name]
}
self.network.ip_range = setup.network_handler.ip_range
self.network.mycelium = setup.network_handler.mycelium
self.network.user_access_configs = setup.network_handler.user_access_configs.clone()
}
pub fn (mut self TFDeployment) vm_get(vm_name string) !VMachine {
@@ -512,3 +515,11 @@ pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deploymen
return dls
}
pub fn (mut self TFDeployment) configure_network(req NetworkRequirements) ! {
self.network.requirements = req
}
pub fn (mut self TFDeployment) get_user_access_configs() []UserAccessConfig {
return self.network.user_access_configs
}

View File

@@ -32,14 +32,15 @@ fn new_deployment_setup(network_specs NetworkSpecs, vms []VMachine, zdbs []ZDB,
mut dls := DeploymentSetup{
deployer: deployer
network_handler: NetworkHandler{
deployer: deployer
network_name: network_specs.name
mycelium: network_specs.mycelium
ip_range: network_specs.ip_range
req: network_specs.requirements
deployer: deployer
mycelium: network_specs.mycelium
ip_range: network_specs.ip_range
user_access_configs: network_specs.user_access_configs.clone()
}
}
dls.setup_network_workloads(vms, old_deployments)!
dls.setup_network_workloads(vms, webnames, old_deployments)!
dls.setup_vm_workloads(vms)!
dls.setup_zdb_workloads(zdbs)!
dls.setup_webname_workloads(webnames)!
@@ -67,9 +68,9 @@ fn (mut self DeploymentSetup) match_versions(old_dls map[u32]grid_models.Deploym
// - st: Modified DeploymentSetup struct with network workloads set up
// Returns:
// - None
fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, old_deployments map[u32]grid_models.Deployment) ! {
fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, webnames []WebName, old_deployments map[u32]grid_models.Deployment) ! {
st.network_handler.load_network_state(old_deployments)!
st.network_handler.create_network(vms)!
st.network_handler.create_network(vms, webnames)!
data := st.network_handler.generate_workloads()!
for node_id, workload in data {
@@ -176,6 +177,11 @@ fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! {
tls_passthrough: req.tls_passthrough
backends: [req.backend]
name: gw_name
network: if wn.requirements.use_wireguard_network {
self.network_handler.req.name
} else {
none
}
}
self.workloads[wn.node_id] << gw.to_workload(
@@ -205,7 +211,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip
network: grid_models.ZmachineNetwork{
interfaces: [
grid_models.ZNetworkInterface{
network: self.network_handler.network_name
network: self.network_handler.req.name
ip: if vmachine.wireguard_ip.len > 0 {
used_ip_octets[vmachine.node_id] << vmachine.wireguard_ip.all_after_last('.').u8()
vmachine.wireguard_ip
@@ -218,7 +224,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip
planetary: vmachine.requirements.planetary
mycelium: if mycelium := vmachine.requirements.mycelium {
grid_models.MyceliumIP{
network: self.network_handler.network_name
network: self.network_handler.req.name
hex_seed: mycelium.hex_seed
}
} else {

View File

@@ -1,23 +1,54 @@
module tfgrid3deployer
import freeflowuniverse.herolib.threefold.grid.models as grid_models
import freeflowuniverse.herolib.threefold.gridproxy
import freeflowuniverse.herolib.threefold.grid
import freeflowuniverse.herolib.ui.console
import json
import rand
// NetworkInfo struct to represent network details
@[params]
pub struct NetworkRequirements {
pub mut:
name string = 'net' + rand.string(5)
user_access_endpoints int
}
@[params]
pub struct NetworkSpecs {
pub mut:
name string = 'net' + rand.string(5)
ip_range string = '10.10.0.0/16'
mycelium string = rand.hex(64)
requirements NetworkRequirements
ip_range string = '10.10.0.0/16'
mycelium string = rand.hex(64)
user_access_configs []UserAccessConfig
}
pub struct UserAccessConfig {
pub:
ip string
secret_key string
public_key string
peer_public_key string
network_ip_range string
public_node_endpoint string
}
pub fn (c UserAccessConfig) print_wg_config() string {
return '[Interface]
Address = ${c.ip}
PrivateKey = ${c.secret_key}
[Peer]
PublicKey = ${c.peer_public_key}
AllowedIPs = ${c.network_ip_range}, 100.64.0.0/16
PersistentKeepalive = 25
Endpoint = ${c.public_node_endpoint}'
}
struct NetworkHandler {
mut:
network_name string
req NetworkRequirements
// network_name string
nodes []u32
ip_range string
wg_ports map[u32]u16
@@ -29,11 +60,14 @@ mut:
none_accessible_ip_ranges []string
mycelium string
// user_access_endopoints int
user_access_configs []UserAccessConfig
deployer &grid.Deployer @[skip; str: skip]
}
// TODO: maybe rename to fill_network or something similar
fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! {
fn (mut self NetworkHandler) create_network(vmachines []VMachine, webnames []WebName) ! {
// Set nodes
self.nodes = []
@@ -43,9 +77,16 @@ fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! {
}
}
for webname in webnames {
if webname.requirements.use_wireguard_network && !self.nodes.contains(webname.node_id) {
self.nodes << webname.node_id
}
}
console.print_header('Network nodes: ${self.nodes}.')
self.setup_wireguard_data()!
self.setup_access_node()!
self.setup_user_access()!
}
fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.Peer, mycleium_hex_key string) !grid_models.Workload {
@@ -62,7 +103,7 @@ fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.
}
return network_workload.to_workload(
name: self.network_name
name: self.req.name
description: 'VGridClient network workload'
)
}
@@ -70,10 +111,11 @@ fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.
fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_models.Peer {
mut peers := []grid_models.Peer{}
if self.public_node != 0 {
ip_range_oct := self.ip_range.all_before('/').split('.')
peers << grid_models.Peer{
subnet: self.wg_subnet[self.public_node]
wireguard_public_key: self.wg_keys[self.public_node][1]
allowed_ips: [self.ip_range, '100.64.0.0/16']
allowed_ips: [self.ip_range, '100.64.${ip_range_oct[1]}.${ip_range_oct[2]}/24']
endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}'
}
}
@@ -81,15 +123,7 @@ fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_mode
}
fn (mut self NetworkHandler) setup_access_node() ! {
// Case 1: Deployment on 28 which is hidden node
// - Setup access node
// Case 2: Deployment on 11 which is public node
// - Already have the access node
// Case 3: if the saved state has already public node.
// - Check the new deployment if its node is hidden take the saved one
// - if the access node is already set, that means we have set its values e.g. the wireguard port, keys
if self.hidden_nodes.len < 1 || self.nodes.len == 1 {
if self.req.user_access_endpoints == 0 && (self.hidden_nodes.len < 1 || self.nodes.len == 1) {
self.public_node = 0
return
}
@@ -144,8 +178,27 @@ fn (mut self NetworkHandler) setup_access_node() ! {
self.endpoints[self.public_node] = access_node.public_config.ipv4.split('/')[0]
}
fn (mut self NetworkHandler) setup_user_access() ! {
to_create_user_access := self.req.user_access_endpoints - self.user_access_configs.len
if to_create_user_access < 0 {
// TODO: support removing user access
return error('removing user access is not supported')
}
for i := 0; i < to_create_user_access; i++ {
wg_keys := self.deployer.client.generate_wg_priv_key()!
self.user_access_configs << UserAccessConfig{
ip: self.calculate_subnet()!
secret_key: wg_keys[0]
public_key: wg_keys[1]
peer_public_key: self.wg_keys[self.public_node][1]
public_node_endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}'
network_ip_range: self.ip_range
}
}
}
fn (mut self NetworkHandler) setup_wireguard_data() ! {
// TODO: We need to set the extra node
console.print_header('Setting up network workload.')
self.hidden_nodes, self.none_accessible_ip_ranges = [], []
@@ -230,6 +283,17 @@ fn (mut self NetworkHandler) prepare_public_node_peers(node_id u32) ![]grid_mode
endpoint: ''
}
}
for user_access in self.user_access_configs {
routing_ip := wireguard_routing_ip(user_access.ip)
peers << grid_models.Peer{
subnet: user_access.ip
wireguard_public_key: user_access.public_key
allowed_ips: [user_access.ip, routing_ip]
endpoint: ''
}
}
}
return peers
@@ -237,10 +301,16 @@ fn (mut self NetworkHandler) prepare_public_node_peers(node_id u32) ![]grid_mode
fn (mut self NetworkHandler) calculate_subnet() !string {
mut parts := self.ip_range.split('/')[0].split('.')
user_access_subnets := self.user_access_configs.map(it.ip)
node_subnets := self.wg_subnet.values()
mut used_subnets := []string{}
used_subnets << node_subnets.clone()
used_subnets << user_access_subnets.clone()
for i := 2; i <= 255; i += 1 {
parts[2] = '${i}'
candidate := parts.join('.') + '/24'
if !self.wg_subnet.values().contains(candidate) {
if !used_subnets.contains(candidate) {
return candidate
}
}
@@ -269,7 +339,7 @@ fn (mut self NetworkHandler) load_network_state(dls map[u32]grid_models.Deployme
continue
}
self.network_name = network_name
self.req.name = network_name
self.nodes << node_id
self.ip_range = znet.ip_range
self.wg_ports[node_id] = znet.wireguard_listen_port
@@ -289,7 +359,11 @@ fn (mut self NetworkHandler) load_network_state(dls map[u32]grid_models.Deployme
}
for subnet, endpoint in subnet_to_endpoint {
node_id := subnet_node[subnet]
node_id := subnet_node[subnet] or {
// this maybe a user access, not a node
continue
}
if endpoint == '' {
self.hidden_nodes << node_id
continue
@@ -318,9 +392,3 @@ fn (mut self NetworkHandler) generate_workloads() !map[u32]grid_models.Workload
return workloads
}
fn (mut n NetworkHandler) remove_node(node_id u32) ! {
}
fn (mut n NetworkHandler) add_node() ! {
}

View File

@@ -5,10 +5,12 @@ import json
@[params]
pub struct WebNameRequirements {
pub mut:
name string @[required]
node_id ?u32
name string @[required]
node_id ?u32
use_wireguard_network bool
// must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port]
backend string @[required]
use_wireguard bool
tls_passthrough bool
}

View File

@@ -2,7 +2,7 @@ module zerohub
import net.http
// import freeflowuniverse.herolib.clients.httpconnection
// import freeflowuniverse.herolib.core.httpconnection
// TODO: curl -H "Authorization: bearer 6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0=" https://hub.grid.tf/api/flist/

View File

@@ -1,6 +1,7 @@
module docker
import freeflowuniverse.herolib.osal { exec }
import freeflowuniverse.herolib.virt.utils
pub fn (mut e DockerEngine) container_create(args DockerContainerCreateArgs) !&DockerContainer {
mut ports := ''
@@ -33,7 +34,7 @@ pub fn (mut e DockerEngine) container_create(args DockerContainerCreateArgs) !&D
privileged := if args.privileged { '--privileged' } else { '' }
// if forwarded ports passed in the args not containing mapping tp ssh (22) create one
if !contains_ssh_port(args.forwarded_ports) {
if !utils.contains_ssh_port(args.forwarded_ports) {
// find random free port in the node
mut port := e.get_free_port() or { panic('No free port.') }
ports += '-p ${port}:22/tcp'

View File

@@ -4,6 +4,7 @@ import freeflowuniverse.herolib.osal { exec }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.virt.utils
import freeflowuniverse.herolib.core
import time
// import freeflowuniverse.herolib.installers.swarm
@@ -54,6 +55,29 @@ pub fn (mut e DockerEngine) load() ! {
e.containers_load()!
}
// load all images, they can be consulted in e.images
// see obj: DockerImage as result in e.images
pub fn (mut e DockerEngine) images_load() ! {
e.images = []DockerImage{}
mut lines := osal.execute_silent("docker images --format '{{.ID}}||{{.Repository}}||{{.Tag}}||{{.Digest}}||{{.Size}}||{{.CreatedAt}}'")!
for line in lines.split_into_lines() {
fields := line.split('||').map(utils.clear_str)
if fields.len != 6 {
panic('docker image needs to output 6 parts.\n${fields}')
}
mut image := DockerImage{
engine: &e
}
image.id = fields[0]
image.repo = fields[1]
image.tag = fields[2]
image.digest = utils.parse_digest(fields[3]) or { '' }
image.size = utils.parse_size_mb(fields[4]) or { 0 }
image.created = utils.parse_time(fields[5]) or { time.now() }
e.images << image
}
}
// load all containers, they can be consulted in e.containers
// see obj: DockerContainer as result in e.containers
pub fn (mut e DockerEngine) containers_load() ! {
@@ -65,7 +89,7 @@ pub fn (mut e DockerEngine) containers_load() ! {
stdout: false
)!
lines := ljob.output
for line in lines {
for line in lines.split_into_lines() {
if line.trim_space() == '' {
continue
}
@@ -142,22 +166,23 @@ pub fn (err ContainerGetError) code() int {
// image_id string
pub fn (mut e DockerEngine) containers_get(args_ ContainerGetArgs) ![]&DockerContainer {
mut args := args_
args.name = texttools.name_fix(args.name)
e.containers_load()!
mut res := []&DockerContainer{}
for _, c in e.containers {
for i, c in e.containers {
container := c
if args.name.contains('*') || args.name.contains('?') || args.name.contains('[') {
if c.name.match_glob(args.name) {
res << &c
if container.name.match_glob(args.name) {
res << &e.containers[i]
continue
}
} else {
if c.name == args.name || c.id == args.id {
res << &c
if container.name == args.name || container.id == args.id {
res << &e.containers[i]
continue
}
}
if args.image_id.len > 0 && c.image.id == args.image_id {
res << &c
if args.image_id.len > 0 && container.image.id == args.image_id {
res << &e.containers[i]
}
}
if res.len == 0 {
@@ -172,8 +197,8 @@ pub fn (mut e DockerEngine) containers_get(args_ ContainerGetArgs) ![]&DockerCon
// get container from memory, can use match_glob see https://modules.vlang.io/index.html#string.match_glob
pub fn (mut e DockerEngine) container_get(args_ ContainerGetArgs) !&DockerContainer {
mut args := args_
args.name = texttools.name_fix(args.name)
mut res := e.containers_get(args)!
if res.len > 1 {
return ContainerGetError{
args: args
@@ -211,7 +236,7 @@ pub fn (mut e DockerEngine) containers_delete(args ContainerGetArgs) ! {
// import a container into an image, run docker container with it
// image_repo examples ['myimage', 'myimage:latest']
// if DockerContainerCreateArgs contains a name, container will be created and restarted
pub fn (mut e DockerEngine) container_import(path string, mut args DockerContainerCreateArgs) !&DockerContainer {
pub fn (mut e DockerEngine) container_import(path string, args DockerContainerCreateArgs) !&DockerContainer {
mut image := args.image_repo
if args.image_tag != '' {
image = image + ':${args.image_tag}'

View File

@@ -89,7 +89,7 @@ pub fn (err ImageGetError) code() int {
pub fn (mut e DockerEngine) image_get(args ImageGetArgs) !&DockerImage {
mut counter := 0
mut result_digest := ''
for i in e.images {
for mut i in e.images {
if args.digest == args.digest {
return &i
}

View File

@@ -19,13 +19,10 @@ pub mut:
pub fn (mut r DockerBuilderRecipe) add_codeget(args_ CodeGetArgs) ! {
mut args := args_
mut gs := gittools.get(coderoot: '${r.path()}/code')!
locator := gs.locator_new(args.url)!
mut gr := gs.repo_get(locator: locator, pull: args.pull, reset: args.reset)!
mut gr := gs.get_repo(url: args.url, pull: args.pull, reset: args.reset)!
if args.name == '' {
args.name = gr.addr.name
args.name = gr.name
}
if args.dest == '' {
@@ -43,7 +40,7 @@ pub fn (mut r DockerBuilderRecipe) add_codeget(args_ CodeGetArgs) ! {
return error("dest is to short (min 3): now '${args.dest}'")
}
commonpath := gr.path_relative()
commonpath := gr.path()
if commonpath.contains('..') {
panic('bug should not be')
}

View File

@@ -1,8 +1,8 @@
module docker
import freeflowuniverse.herolib.crypt.openssl
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.osal { exec }
import freeflowuniverse.herolib.core.httpconnection
import freeflowuniverse.herolib.osal
import os
import freeflowuniverse.herolib.ui.console
@@ -84,30 +84,22 @@ pub fn (mut e DockerEngine) registry_add(args DockerRegistryArgs) ! {
e.registries << registry
// delete all previous containers, uses wildcards see https://modules.vlang.io/index.html#string.match_glob
e.container_delete(name: 'docker_registry*')!
e.container_delete(name: 'docker_registry*') or {
if !(err as ContainerGetError).notfound {
return err
}
println('No containers to matching docker registry')
}
composer.start()!
exec(cmd: 'curl https://localhost:5000/v2/ -k', retry: 4) or {
return error('could not start docker registry, did not answer')
}
mut conn := httpconnection.new(
name: 'localdockerhub'
url: 'https://localhost:5000/v2/'
retry: 10
)!
// r := conn.get_json_dict(mut prefix: 'errors')!
// r := conn.get_json_dict(mut prefix: 'errors')!
r := conn.get(method: .get)!
console.print_debug('Sdsd')
console.print_debug(r)
if true {
panic('sdsd')
}
// now we need to check if we can connect
res := conn.get()!
println(res)
}

View File

@@ -1,7 +1,7 @@
module hetzner
import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.core.httpconnection
pub const version = '1.14.3'
const singleton = false