From 8b0f6926737399deefd203b642f3025c76406521 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 28 Jan 2025 14:06:24 +0100 Subject: [PATCH 1/5] fix: Fix docker examples - Moved `httpconnection` import from `clients` to `core`. - Changed `tfgrid-sdk-ts` dashboard to playground. - Added ipaddr to node_local(). - Added public keyword to OpenSSLGenerateArgs. - Improved DockerEngine image and container loading. - Added utils.contains_ssh_port. - Improved error handling in DockerEngine. - Improved Docker registry handling. Co-authored-by: mariobassem12 Co-authored-by: omda --- .../core/openapi/gitea/gitea_client/factory.v | 2 +- examples/virt/docker/docker_dev_tools.vsh | 0 examples/virt/docker/docker_init.vsh | 0 examples/virt/docker/docker_registry.vsh | 0 examples/virt/docker/presearch_docker.vsh | 0 examples/virt/docker/tf_dashboard.vsh | 12 ++--- lib/builder/node_factory.v | 6 ++- lib/crypt/openssl/generate.v | 2 + lib/installers/infra/coredns/cdns_install.v | 2 +- lib/threefold/rmb/rmb_client.v | 2 +- lib/threefold/zerohub/zerohub.v | 2 +- lib/virt/docker/docker_container_create.v | 3 +- lib/virt/docker/docker_engine.v | 47 ++++++++++++++----- lib/virt/docker/docker_image.v | 2 +- lib/virt/docker/docker_recipe_code.v | 9 ++-- lib/virt/docker/docker_registry.v | 30 +++++------- lib/virt/hetzner/hetzner_model.v | 2 +- 17 files changed, 70 insertions(+), 51 deletions(-) mode change 100644 => 100755 examples/virt/docker/docker_dev_tools.vsh mode change 100644 => 100755 examples/virt/docker/docker_init.vsh mode change 100644 => 100755 examples/virt/docker/docker_registry.vsh mode change 100644 => 100755 examples/virt/docker/presearch_docker.vsh mode change 100644 => 100755 examples/virt/docker/tf_dashboard.vsh diff --git a/examples/core/openapi/gitea/gitea_client/factory.v b/examples/core/openapi/gitea/gitea_client/factory.v index 7d0f48a3..00a52a5b 100644 --- a/examples/core/openapi/gitea/gitea_client/factory.v +++ b/examples/core/openapi/gitea/gitea_client/factory.v @@ -1,7 +1,7 @@ module dagu // import os -import freeflowuniverse.herolib.clients.httpconnection +import freeflowuniverse.herolib.core.httpconnection import os struct GiteaClient[T] { diff --git a/examples/virt/docker/docker_dev_tools.vsh b/examples/virt/docker/docker_dev_tools.vsh old mode 100644 new mode 100755 diff --git a/examples/virt/docker/docker_init.vsh b/examples/virt/docker/docker_init.vsh old mode 100644 new mode 100755 diff --git a/examples/virt/docker/docker_registry.vsh b/examples/virt/docker/docker_registry.vsh old mode 100644 new mode 100755 diff --git a/examples/virt/docker/presearch_docker.vsh b/examples/virt/docker/presearch_docker.vsh old mode 100644 new mode 100755 diff --git a/examples/virt/docker/tf_dashboard.vsh b/examples/virt/docker/tf_dashboard.vsh old mode 100644 new mode 100755 index 01bbd0c1..82d60d39 --- a/examples/virt/docker/tf_dashboard.vsh +++ b/examples/virt/docker/tf_dashboard.vsh @@ -17,25 +17,25 @@ recipe.add_run(cmd: 'npm i -g yarn')! recipe.add_run( cmd: ' git clone https://github.com/threefoldtech/tfgrid-sdk-ts.git /app - cd /app/packages/dashboard + cd /app/packages/playground yarn install yarn lerna run build --no-private - yarn workspace @threefold/dashboard build + yarn workspace @threefold/playground build ' )! recipe.add_run( cmd: ' rm /etc/nginx/conf.d/default.conf - cp /app/packages/dashboard/nginx.conf /etc/nginx/conf.d + cp /app/packages/playground/nginx.conf /etc/nginx/conf.d apk add --no-cache bash - chmod +x /app/packages/dashboard/scripts/build-env.sh - cp -r /app/packages/dashboard/dist /usr/share/nginx/html + chmod +x /app/packages/playground/scripts/build-env.sh + cp -r /app/packages/playground/dist /usr/share/nginx/html ' )! recipe.add_run(cmd: 'echo "daemon off;" >> /etc/nginx/nginx.conf')! -recipe.add_cmd(cmd: '/bin/bash -c /app/packages/dashboard/scripts/build-env.sh')! +recipe.add_cmd(cmd: '/bin/bash -c /app/packages/playground/scripts/build-env.sh')! recipe.add_entrypoint(cmd: 'nginx')! recipe.build(false)! diff --git a/lib/builder/node_factory.v b/lib/builder/node_factory.v index b5d661c1..0793ac21 100644 --- a/lib/builder/node_factory.v +++ b/lib/builder/node_factory.v @@ -4,7 +4,10 @@ import freeflowuniverse.herolib.data.ipaddress // get node connection to local machine pub fn (mut bldr BuilderFactory) node_local() !&Node { - return bldr.node_new(name: 'localhost') + return bldr.node_new( + name: 'localhost' + ipaddr: '127.0.0.1' + ) } // format ipaddr: localhost:7777 . @@ -64,7 +67,6 @@ pub fn (mut bldr BuilderFactory) node_new(args_ NodeArguments) !&Node { mut iadd := ipaddress.new(args.ipaddr)! node.name = iadd.toname()! } - wasincache := node.load()! if wasincache && args.reload { diff --git a/lib/crypt/openssl/generate.v b/lib/crypt/openssl/generate.v index 151bdd5b..356dab4e 100644 --- a/lib/crypt/openssl/generate.v +++ b/lib/crypt/openssl/generate.v @@ -5,6 +5,7 @@ import json @[params] pub struct OpenSSLGenerateArgs { +pub: name string = 'default' domain string = 'myregistry.domain.com' reset bool @@ -22,6 +23,7 @@ pub fn (mut ossl OpenSSL) generate(args OpenSSLGenerateArgs) !OpenSSLKey { ' mut b := builder.new()! + println('b: ${b}') mut node := b.node_local()! node.exec(cmd: cmd)! diff --git a/lib/installers/infra/coredns/cdns_install.v b/lib/installers/infra/coredns/cdns_install.v index 5cbf9f29..ded49d5a 100644 --- a/lib/installers/infra/coredns/cdns_install.v +++ b/lib/installers/infra/coredns/cdns_install.v @@ -4,7 +4,7 @@ import freeflowuniverse.herolib.osal import freeflowuniverse.herolib.osal.screen import freeflowuniverse.herolib.ui.console import freeflowuniverse.herolib.core.texttools -import freeflowuniverse.herolib.clients.httpconnection +import freeflowuniverse.herolib.core.httpconnection import os @[params] diff --git a/lib/threefold/rmb/rmb_client.v b/lib/threefold/rmb/rmb_client.v index 46fab50a..2922f9c1 100644 --- a/lib/threefold/rmb/rmb_client.v +++ b/lib/threefold/rmb/rmb_client.v @@ -1,6 +1,6 @@ module rmb -// import freeflowuniverse.herolib.clients.httpconnection +// import freeflowuniverse.herolib.core.httpconnection import freeflowuniverse.herolib.core.redisclient { RedisURL } import os diff --git a/lib/threefold/zerohub/zerohub.v b/lib/threefold/zerohub/zerohub.v index 080fc41d..2829643f 100644 --- a/lib/threefold/zerohub/zerohub.v +++ b/lib/threefold/zerohub/zerohub.v @@ -2,7 +2,7 @@ module zerohub import net.http -// import freeflowuniverse.herolib.clients.httpconnection +// import freeflowuniverse.herolib.core.httpconnection // TODO: curl -H "Authorization: bearer 6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0=" https://hub.grid.tf/api/flist/ diff --git a/lib/virt/docker/docker_container_create.v b/lib/virt/docker/docker_container_create.v index 68338a69..4ce9732f 100644 --- a/lib/virt/docker/docker_container_create.v +++ b/lib/virt/docker/docker_container_create.v @@ -1,6 +1,7 @@ module docker import freeflowuniverse.herolib.osal { exec } +import freeflowuniverse.herolib.virt.utils pub fn (mut e DockerEngine) container_create(args DockerContainerCreateArgs) !&DockerContainer { mut ports := '' @@ -33,7 +34,7 @@ pub fn (mut e DockerEngine) container_create(args DockerContainerCreateArgs) !&D privileged := if args.privileged { '--privileged' } else { '' } // if forwarded ports passed in the args not containing mapping tp ssh (22) create one - if !contains_ssh_port(args.forwarded_ports) { + if !utils.contains_ssh_port(args.forwarded_ports) { // find random free port in the node mut port := e.get_free_port() or { panic('No free port.') } ports += '-p ${port}:22/tcp' diff --git a/lib/virt/docker/docker_engine.v b/lib/virt/docker/docker_engine.v index eda2016f..67818004 100644 --- a/lib/virt/docker/docker_engine.v +++ b/lib/virt/docker/docker_engine.v @@ -4,6 +4,7 @@ import freeflowuniverse.herolib.osal { exec } import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.virt.utils import freeflowuniverse.herolib.core +import time // import freeflowuniverse.herolib.installers.swarm @@ -54,6 +55,29 @@ pub fn (mut e DockerEngine) load() ! { e.containers_load()! } +// load all images, they can be consulted in e.images +// see obj: DockerImage as result in e.images +pub fn (mut e DockerEngine) images_load() ! { + e.images = []DockerImage{} + mut lines := osal.execute_silent("docker images --format '{{.ID}}||{{.Repository}}||{{.Tag}}||{{.Digest}}||{{.Size}}||{{.CreatedAt}}'")! + for line in lines.split_into_lines() { + fields := line.split('||').map(utils.clear_str) + if fields.len != 6 { + panic('docker image needs to output 6 parts.\n${fields}') + } + mut image := DockerImage{ + engine: &e + } + image.id = fields[0] + image.repo = fields[1] + image.tag = fields[2] + image.digest = utils.parse_digest(fields[3]) or { '' } + image.size = utils.parse_size_mb(fields[4]) or { 0 } + image.created = utils.parse_time(fields[5]) or { time.now() } + e.images << image + } +} + // load all containers, they can be consulted in e.containers // see obj: DockerContainer as result in e.containers pub fn (mut e DockerEngine) containers_load() ! { @@ -65,7 +89,7 @@ pub fn (mut e DockerEngine) containers_load() ! { stdout: false )! lines := ljob.output - for line in lines { + for line in lines.split_into_lines() { if line.trim_space() == '' { continue } @@ -142,22 +166,23 @@ pub fn (err ContainerGetError) code() int { // image_id string pub fn (mut e DockerEngine) containers_get(args_ ContainerGetArgs) ![]&DockerContainer { mut args := args_ - args.name = texttools.name_fix(args.name) + e.containers_load()! mut res := []&DockerContainer{} - for _, c in e.containers { + for i, c in e.containers { + container := c if args.name.contains('*') || args.name.contains('?') || args.name.contains('[') { - if c.name.match_glob(args.name) { - res << &c + if container.name.match_glob(args.name) { + res << &e.containers[i] continue } } else { - if c.name == args.name || c.id == args.id { - res << &c + if container.name == args.name || container.id == args.id { + res << &e.containers[i] continue } } - if args.image_id.len > 0 && c.image.id == args.image_id { - res << &c + if args.image_id.len > 0 && container.image.id == args.image_id { + res << &e.containers[i] } } if res.len == 0 { @@ -172,8 +197,8 @@ pub fn (mut e DockerEngine) containers_get(args_ ContainerGetArgs) ![]&DockerCon // get container from memory, can use match_glob see https://modules.vlang.io/index.html#string.match_glob pub fn (mut e DockerEngine) container_get(args_ ContainerGetArgs) !&DockerContainer { mut args := args_ - args.name = texttools.name_fix(args.name) mut res := e.containers_get(args)! + if res.len > 1 { return ContainerGetError{ args: args @@ -211,7 +236,7 @@ pub fn (mut e DockerEngine) containers_delete(args ContainerGetArgs) ! { // import a container into an image, run docker container with it // image_repo examples ['myimage', 'myimage:latest'] // if DockerContainerCreateArgs contains a name, container will be created and restarted -pub fn (mut e DockerEngine) container_import(path string, mut args DockerContainerCreateArgs) !&DockerContainer { +pub fn (mut e DockerEngine) container_import(path string, args DockerContainerCreateArgs) !&DockerContainer { mut image := args.image_repo if args.image_tag != '' { image = image + ':${args.image_tag}' diff --git a/lib/virt/docker/docker_image.v b/lib/virt/docker/docker_image.v index cc6d1a8f..c488b1f0 100644 --- a/lib/virt/docker/docker_image.v +++ b/lib/virt/docker/docker_image.v @@ -89,7 +89,7 @@ pub fn (err ImageGetError) code() int { pub fn (mut e DockerEngine) image_get(args ImageGetArgs) !&DockerImage { mut counter := 0 mut result_digest := '' - for i in e.images { + for mut i in e.images { if args.digest == args.digest { return &i } diff --git a/lib/virt/docker/docker_recipe_code.v b/lib/virt/docker/docker_recipe_code.v index 6e4459d2..2601185c 100644 --- a/lib/virt/docker/docker_recipe_code.v +++ b/lib/virt/docker/docker_recipe_code.v @@ -19,13 +19,10 @@ pub mut: pub fn (mut r DockerBuilderRecipe) add_codeget(args_ CodeGetArgs) ! { mut args := args_ mut gs := gittools.get(coderoot: '${r.path()}/code')! - - locator := gs.locator_new(args.url)! - - mut gr := gs.repo_get(locator: locator, pull: args.pull, reset: args.reset)! + mut gr := gs.get_repo(url: args.url, pull: args.pull, reset: args.reset)! if args.name == '' { - args.name = gr.addr.name + args.name = gr.name } if args.dest == '' { @@ -43,7 +40,7 @@ pub fn (mut r DockerBuilderRecipe) add_codeget(args_ CodeGetArgs) ! { return error("dest is to short (min 3): now '${args.dest}'") } - commonpath := gr.path_relative() + commonpath := gr.path() if commonpath.contains('..') { panic('bug should not be') } diff --git a/lib/virt/docker/docker_registry.v b/lib/virt/docker/docker_registry.v index 5eef303a..f0edb175 100644 --- a/lib/virt/docker/docker_registry.v +++ b/lib/virt/docker/docker_registry.v @@ -1,8 +1,8 @@ module docker import freeflowuniverse.herolib.crypt.openssl -import freeflowuniverse.herolib.clients.httpconnection -import freeflowuniverse.herolib.osal { exec } +import freeflowuniverse.herolib.core.httpconnection +import freeflowuniverse.herolib.osal import os import freeflowuniverse.herolib.ui.console @@ -84,30 +84,22 @@ pub fn (mut e DockerEngine) registry_add(args DockerRegistryArgs) ! { e.registries << registry // delete all previous containers, uses wildcards see https://modules.vlang.io/index.html#string.match_glob - e.container_delete(name: 'docker_registry*')! + + e.container_delete(name: 'docker_registry*') or { + if !(err as ContainerGetError).notfound { + return err + } + println('No containers to matching docker registry') + } composer.start()! - exec(cmd: 'curl https://localhost:5000/v2/ -k', retry: 4) or { - return error('could not start docker registry, did not answer') - } - mut conn := httpconnection.new( name: 'localdockerhub' url: 'https://localhost:5000/v2/' retry: 10 )! - // r := conn.get_json_dict(mut prefix: 'errors')! - - // r := conn.get_json_dict(mut prefix: 'errors')! - r := conn.get(method: .get)! - console.print_debug('Sdsd') - console.print_debug(r) - - if true { - panic('sdsd') - } - - // now we need to check if we can connect + res := conn.get()! + println(res) } diff --git a/lib/virt/hetzner/hetzner_model.v b/lib/virt/hetzner/hetzner_model.v index a6295450..b2bcf51b 100644 --- a/lib/virt/hetzner/hetzner_model.v +++ b/lib/virt/hetzner/hetzner_model.v @@ -1,7 +1,7 @@ module hetzner import freeflowuniverse.herolib.data.paramsparser -import freeflowuniverse.herolib.clients.httpconnection +import freeflowuniverse.herolib.core.httpconnection pub const version = '1.14.3' const singleton = false From 0f095a691d396e116c3c18fc252cb4dde37d449a Mon Sep 17 00:00:00 2001 From: root Date: Tue, 28 Jan 2025 16:17:33 +0100 Subject: [PATCH 2/5] feat: add docker installer - Add a new docker installer. - Includes functionality for installing, starting, stopping, and removing docker. Co-authored-by: mariobassem12 Co-authored-by: omda --- examples/installers/docker.vsh | 11 ++ lib/installers/virt/docker/.heroscript | 13 ++ lib/installers/virt/docker/docker.v | 64 -------- lib/installers/virt/docker/docker_actions.v | 106 +++++++++++++ lib/installers/virt/docker/docker_factory_.v | 147 +++++++++++++++++++ lib/installers/virt/docker/docker_model.v | 23 +++ lib/installers/virt/docker/readme.md | 42 ++++++ 7 files changed, 342 insertions(+), 64 deletions(-) create mode 100755 examples/installers/docker.vsh create mode 100644 lib/installers/virt/docker/.heroscript delete mode 100644 lib/installers/virt/docker/docker.v create mode 100644 lib/installers/virt/docker/docker_actions.v create mode 100644 lib/installers/virt/docker/docker_factory_.v create mode 100644 lib/installers/virt/docker/docker_model.v create mode 100644 lib/installers/virt/docker/readme.md diff --git a/examples/installers/docker.vsh b/examples/installers/docker.vsh new file mode 100755 index 00000000..1a0fe523 --- /dev/null +++ b/examples/installers/docker.vsh @@ -0,0 +1,11 @@ +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.installers.virt.docker as docker_installer + +mut docker := docker_installer.get()! + +// To install +docker.install()! + +// To remove +docker.destroy()! diff --git a/lib/installers/virt/docker/.heroscript b/lib/installers/virt/docker/.heroscript new file mode 100644 index 00000000..09192ecd --- /dev/null +++ b/lib/installers/virt/docker/.heroscript @@ -0,0 +1,13 @@ + +!!hero_code.generate_installer + name:'docker' + classname:'DockerInstaller' + singleton:0 + templates:0 + default:1 + title:'' + supported_platforms:'' + reset:0 + startupmanager:1 + hasconfig:0 + build:0 \ No newline at end of file diff --git a/lib/installers/virt/docker/docker.v b/lib/installers/virt/docker/docker.v deleted file mode 100644 index 5a9bc665..00000000 --- a/lib/installers/virt/docker/docker.v +++ /dev/null @@ -1,64 +0,0 @@ -module docker - -import freeflowuniverse.herolib.osal -import freeflowuniverse.herolib.installers.base -import freeflowuniverse.herolib.ui.console - -// install docker will return true if it was already installed -pub fn install_() ! { - console.print_header('package install install docker') - if core.platform() != .ubuntu { - return error('only support ubuntu for now') - } - - base.install()! - - if !osal.done_exists('install_docker') && !osal.cmd_exists('docker') { - // osal.upgrade()! - osal.package_install('mc,wget,htop,apt-transport-https,ca-certificates,curl,software-properties-common')! - cmd := ' - rm -f /usr/share/keyrings/docker-archive-keyring.gpg - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - apt update - apt-cache policy docker-ce - #systemctl status docker - ' - osal.execute_silent(cmd)! - osal.package_install('docker-ce')! - check()! - osal.done_set('install_docker', 'OK')! - } - console.print_header('docker already done') -} - -pub fn check() ! { - // todo: do a monitoring check to see if it works - cmd := ' - # Check if docker command exists - if ! command -v docker &> /dev/null; then - echo "Error: Docker command-line tool is not installed." - exit 1 - fi - - # Check if Docker daemon is running - if ! pgrep -f "dockerd" &> /dev/null; then - echo "Error: Docker daemon is not running." - exit 1 - fi - - # Run the hello-world Docker container - output=$(docker run hello-world 2>&1) - - if [[ "\$output" == *"Hello from Docker!"* ]]; then - echo "Docker is installed and running properly." - else - echo "Error: Failed to run the Docker hello-world container." - echo "Output: \$output" - exit 1 - fi - - ' - r := osal.execute_silent(cmd)! - console.print_debug(r) -} diff --git a/lib/installers/virt/docker/docker_actions.v b/lib/installers/virt/docker/docker_actions.v new file mode 100644 index 00000000..5b4212b1 --- /dev/null +++ b/lib/installers/virt/docker/docker_actions.v @@ -0,0 +1,106 @@ +module docker + +import freeflowuniverse.herolib.core +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal.zinit +import freeflowuniverse.herolib.installers.ulist + +fn startupcmd() ![]zinit.ZProcessNewArgs { + mut res := []zinit.ZProcessNewArgs{} + res << zinit.ZProcessNewArgs{ + name: 'docker' + cmd: 'dockerd' + } + + return res +} + +fn running() !bool { + console.print_header('Checking if Docker is running') + is_installed := installed() or { + return error('Cannot execute command docker, check if the docker is installed or call the `install()` method: ${err}') + } + + if !is_installed { + return false + } + + // Checking if the docker server responed + cmd := 'docker ps' + osal.execute_stdout(cmd) or { return false } + + console.print_header('Docker is running') + return true +} + +fn start_pre() ! { +} + +fn start_post() ! { +} + +fn stop_pre() ! { +} + +fn stop_post() ! { +} + +//////////////////// following actions are not specific to instance of the object + +// checks if a certain version or above is installed +fn installed() !bool { + console.print_header('Checking if Docker is installed') + cmd := 'docker -v' + osal.execute_stdout(cmd) or { return false } + console.print_header('Docker is installed') + return true +} + +// get the Upload List of the files +fn ulist_get() !ulist.UList { + return ulist.UList{} +} + +// uploads to S3 server if configured +fn upload() ! {} + +fn install() ! { + console.print_header('Installing Docker') + if core.platform()! != .ubuntu { + return error('only support ubuntu for now') + } + + mut cmd := ' + sudo apt-get update -y + sudo apt-get install -y ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "\$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update -y +' + + osal.execute_stdout(cmd) or { return error('Cannot install docker due to: ${err}') } + + cmd = 'sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin' + osal.execute_stdout(cmd) or { return error('Cannot install docker due to: ${err}') } + console.print_header('Docker installed sucessfully') +} + +fn destroy() ! { + console.print_header('Removing Docker') + // Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages: + mut cmd := 'sudo apt-get purge -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras' + osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') } + + // Images, containers, volumes, or custom configuration files on your host aren't automatically removed. To delete all images, containers, and volumes: + cmd = 'sudo rm -rf /var/lib/docker && sudo rm -rf /var/lib/containerd' + osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') } + + // Remove source list and keyrings + cmd = 'sudo rm /etc/apt/sources.list.d/docker.list && sudo rm /etc/apt/keyrings/docker.asc' + osal.execute_stdout(cmd) or { return error('Cannot uninstall docker due to: ${err}') } + console.print_header('Docker is removed') +} diff --git a/lib/installers/virt/docker/docker_factory_.v b/lib/installers/virt/docker/docker_factory_.v new file mode 100644 index 00000000..58e91414 --- /dev/null +++ b/lib/installers/virt/docker/docker_factory_.v @@ -0,0 +1,147 @@ + +module docker + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.sysadmin.startupmanager +import freeflowuniverse.herolib.osal.zinit +import time + +__global ( + docker_global map[string]&DockerInstaller + docker_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string +} + +pub fn get(args_ ArgsGet) !&DockerInstaller { + return &DockerInstaller{} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS /////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager { + // unknown + // screen + // zinit + // tmux + // systemd + match cat { + .zinit { + console.print_debug('startupmanager: zinit') + return startupmanager.get(cat: .zinit)! + } + .systemd { + console.print_debug('startupmanager: systemd') + return startupmanager.get(cat: .systemd)! + } + else { + console.print_debug('startupmanager: auto') + return startupmanager.get()! + } + } +} + +pub fn (mut self DockerInstaller) start() ! { + switch(self.name) + if self.running()! { + return + } + + console.print_header('docker start') + + if !installed()! { + install()! + } + + configure()! + + start_pre()! + + for zprocess in startupcmd()! { + mut sm := startupmanager_get(zprocess.startuptype)! + + console.print_debug('starting docker with ${zprocess.startuptype}...') + + sm.new(zprocess)! + + sm.start(zprocess.name)! + } + + start_post()! + + for _ in 0 .. 50 { + if self.running()! { + return + } + time.sleep(100 * time.millisecond) + } + return error('docker did not install properly.') +} + +pub fn (mut self DockerInstaller) install_start(args InstallArgs) ! { + switch(self.name) + self.install(args)! + self.start()! +} + +pub fn (mut self DockerInstaller) stop() ! { + switch(self.name) + stop_pre()! + for zprocess in startupcmd()! { + mut sm := startupmanager_get(zprocess.startuptype)! + sm.stop(zprocess.name)! + } + stop_post()! +} + +pub fn (mut self DockerInstaller) restart() ! { + switch(self.name) + self.stop()! + self.start()! +} + +pub fn (mut self DockerInstaller) running() !bool { + switch(self.name) + + // walk over the generic processes, if not running return + for zprocess in startupcmd()! { + mut sm := startupmanager_get(zprocess.startuptype)! + r := sm.running(zprocess.name)! + if r == false { + return false + } + } + return running()! +} + +@[params] +pub struct InstallArgs { +pub mut: + reset bool +} + +pub fn (mut self DockerInstaller) install(args InstallArgs) ! { + switch(self.name) + if args.reset || (!installed()!) { + install()! + } +} + +pub fn (mut self DockerInstaller) destroy() ! { + switch(self.name) + self.stop() or {} + destroy()! +} + +// switch instance to be used for docker +pub fn switch(name string) { + docker_default = name +} diff --git a/lib/installers/virt/docker/docker_model.v b/lib/installers/virt/docker/docker_model.v new file mode 100644 index 00000000..86c08bbb --- /dev/null +++ b/lib/installers/virt/docker/docker_model.v @@ -0,0 +1,23 @@ +module docker + +pub const version = '1.14.3' +const singleton = false +const default = true + +// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED +@[heap] +pub struct DockerInstaller { +pub mut: + name string = 'default' +} + +fn obj_init(obj_ DockerInstaller) !DockerInstaller { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + return obj +} + +// called before start if done +fn configure() ! { + // mut installer := get()! +} diff --git a/lib/installers/virt/docker/readme.md b/lib/installers/virt/docker/readme.md new file mode 100644 index 00000000..8cb368a3 --- /dev/null +++ b/lib/installers/virt/docker/readme.md @@ -0,0 +1,42 @@ +# docker + +To get started + +```vlang + + +import freeflowuniverse.herolib.installers.something.docker as docker_installer + +heroscript:=" +!!docker.configure name:'test' + password: '1234' + port: 7701 + +!!docker.start name:'test' reset:1 +" + +docker_installer.play(heroscript=heroscript)! + +//or we can call the default and do a start with reset +//mut installer:= docker_installer.get()! +//installer.start(reset:true)! + + + + +``` + +## example heroscript + +```hero +!!docker.configure + homedir: '/home/user/docker' + username: 'admin' + password: 'secretpassword' + title: 'Some Title' + host: 'localhost' + port: 8888 + +``` + + From 112f5eecb2b73e6318ca24d3cb6edd1dfd42e26b Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Jan 2025 11:13:11 +0100 Subject: [PATCH 3/5] feat: Add Buildah installer - Added a Buildah installer to the project. - The installer can install and remove Buildah. - Updated the installer to use the latest Buildah version. Co-authored-by: mahmmoud.hassanein Co-authored-by: mariobassem --- examples/installers/buildah.vsh | 11 +++ lib/installers/virt/buildah/.heroscript | 3 +- lib/installers/virt/buildah/buildah_actions.v | 70 ++++--------------- .../virt/buildah/buildah_factory_.v | 52 +++++++++++--- lib/installers/virt/buildah/buildah_model.v | 6 +- 5 files changed, 72 insertions(+), 70 deletions(-) create mode 100755 examples/installers/buildah.vsh diff --git a/examples/installers/buildah.vsh b/examples/installers/buildah.vsh new file mode 100755 index 00000000..a5ca6978 --- /dev/null +++ b/examples/installers/buildah.vsh @@ -0,0 +1,11 @@ +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.installers.virt.buildah as buildah_installer + +mut buildah := buildah_installer.get()! + +// To install +buildah.install()! + +// To remove +buildah.destroy()! diff --git a/lib/installers/virt/buildah/.heroscript b/lib/installers/virt/buildah/.heroscript index b3cadebe..405414b2 100644 --- a/lib/installers/virt/buildah/.heroscript +++ b/lib/installers/virt/buildah/.heroscript @@ -1,4 +1,3 @@ - !!hero_code.generate_installer name:'buildah' classname:'BuildahInstaller' @@ -10,4 +9,4 @@ reset:0 startupmanager:0 hasconfig:0 - build:1 \ No newline at end of file + build:0 \ No newline at end of file diff --git a/lib/installers/virt/buildah/buildah_actions.v b/lib/installers/virt/buildah/buildah_actions.v index dd0e9581..7e941bc1 100644 --- a/lib/installers/virt/buildah/buildah_actions.v +++ b/lib/installers/virt/buildah/buildah_actions.v @@ -2,59 +2,26 @@ module buildah import freeflowuniverse.herolib.osal import freeflowuniverse.herolib.ui.console -import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.installers.ulist -import freeflowuniverse.herolib.installers.lang.golang -import os +import freeflowuniverse.herolib.core // checks if a certain version or above is installed -fn installed_() !bool { - res := os.execute('${osal.profile_path_source_and()!} buildah -v') - if res.exit_code != 0 { - return false - } - r := res.output.split_into_lines().filter(it.trim_space().len > 0) - if r.len != 1 { - return error("couldn't parse herocontainers version, expected 'buildah -v' on 1 row.\n${res.output}") - } - v := texttools.version(r[0].all_after('version').all_before('(').replace('-dev', '')) - if texttools.version(version) == v { - return true - } - return false +fn installed() !bool { + osal.execute_silent('buildah -v') or { return false } + + return true } -fn install_() ! { +fn install() ! { console.print_header('install buildah') - build()! -} + if core.platform()! != .ubuntu { + return error('Only ubuntu is supported for now') + } -fn build_() ! { - console.print_header('build buildah') - - osal.package_install('runc,bats,btrfs-progs,git,go-md2man,libapparmor-dev,libglib2.0-dev,libgpgme11-dev,libseccomp-dev,libselinux1-dev,make,skopeo,libbtrfs-dev')! - - mut g := golang.get()! - g.install()! - - cmd := ' - cd /tmp - rm -rf buildah - git clone https://github.com/containers/buildah - cd buildah - make SECURITYTAGS="apparmor seccomp" - ' + cmd := 'sudo apt-get -y update && sudo apt-get -y install buildah' osal.execute_stdout(cmd)! - // now copy to the default bin path - osal.cmd_add( - cmdname: 'buildah' - source: '/tmp/buildah/bin/buildah' - )! - - osal.rm(' - /tmp/buildah - ')! + console.print_header('Buildah Installed Successfuly') } // get the Upload List of the files @@ -64,17 +31,6 @@ fn ulist_get() !ulist.UList { return ulist.UList{} } -fn destroy_() ! { - osal.package_remove(' - buildah - ')! - - // will remove all paths where go/bin is found - osal.profile_path_add_remove(paths2delete: 'go/bin')! - - osal.rm(' - buildah - /var/lib/buildah - /tmp/buildah - ')! +fn destroy() ! { + osal.execute_stdout('sudo apt remove --purge -y buildah')! } diff --git a/lib/installers/virt/buildah/buildah_factory_.v b/lib/installers/virt/buildah/buildah_factory_.v index 846eab53..f659ae7e 100644 --- a/lib/installers/virt/buildah/buildah_factory_.v +++ b/lib/installers/virt/buildah/buildah_factory_.v @@ -14,29 +14,61 @@ __global ( /////////FACTORY +@[params] +pub struct ArgsGet { +pub mut: + name string +} + +pub fn get(args_ ArgsGet) !&BuildahInstaller { + return &BuildahInstaller{} +} + //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS /////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// +fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager { + // unknown + // screen + // zinit + // tmux + // systemd + match cat { + .zinit { + console.print_debug('startupmanager: zinit') + return startupmanager.get(cat: .zinit)! + } + .systemd { + console.print_debug('startupmanager: systemd') + return startupmanager.get(cat: .systemd)! + } + else { + console.print_debug('startupmanager: auto') + return startupmanager.get()! + } + } +} + @[params] pub struct InstallArgs { pub mut: reset bool } -pub fn install(args InstallArgs) ! { - if args.reset { - destroy()! - } - if !(installed_()!) { - install_()! +pub fn (mut self BuildahInstaller) install(args InstallArgs) ! { + switch(self.name) + if args.reset || (!installed()!) { + install()! } } -pub fn destroy() ! { - destroy_()! +pub fn (mut self BuildahInstaller) destroy() ! { + switch(self.name) + destroy()! } -pub fn build() ! { - build_()! +// switch instance to be used for buildah +pub fn switch(name string) { + buildah_default = name } diff --git a/lib/installers/virt/buildah/buildah_model.v b/lib/installers/virt/buildah/buildah_model.v index e2195c0f..6e8d67ae 100644 --- a/lib/installers/virt/buildah/buildah_model.v +++ b/lib/installers/virt/buildah/buildah_model.v @@ -1,18 +1,22 @@ module buildah -pub const version = '1.38.0' const singleton = true const default = true +// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED +@[heap] pub struct BuildahInstaller { pub mut: name string = 'default' } fn obj_init(obj_ BuildahInstaller) !BuildahInstaller { + // never call get here, only thing we can do here is work on object itself mut obj := obj_ return obj } +// called before start if done fn configure() ! { + // mut installer := get()! } From 6a8bd5c2051596a9b175988f5f5aa506c323e415 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Jan 2025 18:12:16 +0100 Subject: [PATCH 4/5] wip: support multiple user access endpoints Co-authored-by: mahmoud Co-authored-by: mario --- .../gw_over_wireguard/gw_over_wireguard.vsh | 36 ++++++++++ lib/threefold/tfgrid3deployer/deployment.v | 8 ++- .../tfgrid3deployer/deployment_setup.v | 1 + lib/threefold/tfgrid3deployer/network.v | 68 ++++++++++++++++--- lib/threefold/tfgrid3deployer/webnames.v | 1 + 5 files changed, 102 insertions(+), 12 deletions(-) create mode 100644 examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh diff --git a/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh b/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh new file mode 100644 index 00000000..117ca7ec --- /dev/null +++ b/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh @@ -0,0 +1,36 @@ +#!/usr/bin/env -S v -gc none -no-retry-compilation -d use_openssl -enable-globals -cg run + +//#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals -cg run +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.tfgrid3deployer +import freeflowuniverse.herolib.installers.threefold.griddriver +import os +import time + +griddriver.install()! + +v := tfgrid3deployer.get()! +println('cred: ${v}') +deployment_name := 'vm_caddy1' +mut deployment := tfgrid3deployer.new_deployment(deployment_name)! +deployment.add_network(ip_range: '1.1.1.1/16', user_access: 5) +deployment.add_machine( + name: 'vm_caddy1' + cpu: 1 + memory: 2 + planetary: false + public_ip4: true + size: 10 // 10 gig + mycelium: tfgrid3deployer.Mycelium{} +) +deployment.deploy()! + +vm1 := deployment.vm_get('vm_caddy1')! +println('vm1 info: ${vm1}') + +vm1_public_ip4 := vm1.public_ip4.all_before('/') + +deployment.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80', use_wireguard: true) +deployment.deploy()! +gw1 := deployment.webname_get('gwnamecaddy')! +println('gw info: ${gw1}') diff --git a/lib/threefold/tfgrid3deployer/deployment.v b/lib/threefold/tfgrid3deployer/deployment.v index 9157bcfc..3c6612c3 100644 --- a/lib/threefold/tfgrid3deployer/deployment.v +++ b/lib/threefold/tfgrid3deployer/deployment.v @@ -51,7 +51,7 @@ pub fn new_deployment(name string) !TFDeployment { kvstore := KVStoreFS{} if _ := kvstore.get(name) { - return error('Deployment with the same name is already exist.') + return error('Deployment with the same name "${name}" already exists.') } deployer := get_deployer()! @@ -114,6 +114,7 @@ pub fn (mut self TFDeployment) deploy() ! { } fn (mut self TFDeployment) set_nodes() ! { + // TODO: each request should run in a separate thread for mut vm in self.vms { if vm.node_id != 0 { continue @@ -512,3 +513,8 @@ pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deploymen return dls } + + +pub fn (mut self TFDeployment) configure_network(req NetworkRequirements)!{ + self.network.requirements = req +} \ No newline at end of file diff --git a/lib/threefold/tfgrid3deployer/deployment_setup.v b/lib/threefold/tfgrid3deployer/deployment_setup.v index d09e9afa..a88263cb 100644 --- a/lib/threefold/tfgrid3deployer/deployment_setup.v +++ b/lib/threefold/tfgrid3deployer/deployment_setup.v @@ -36,6 +36,7 @@ fn new_deployment_setup(network_specs NetworkSpecs, vms []VMachine, zdbs []ZDB, network_name: network_specs.name mycelium: network_specs.mycelium ip_range: network_specs.ip_range + user_access_configs: network_specs.user_access_configs.clone() } } diff --git a/lib/threefold/tfgrid3deployer/network.v b/lib/threefold/tfgrid3deployer/network.v index ce6055c4..7a4a1442 100644 --- a/lib/threefold/tfgrid3deployer/network.v +++ b/lib/threefold/tfgrid3deployer/network.v @@ -1,23 +1,38 @@ module tfgrid3deployer import freeflowuniverse.herolib.threefold.grid.models as grid_models -import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.threefold.grid import freeflowuniverse.herolib.ui.console import json import rand // NetworkInfo struct to represent network details + +pub struct NetworkRequirements{ +pub: + name string = 'net' + rand.string(5) + user_access_endpoints int +} +@[params] pub struct NetworkSpecs { pub mut: - name string = 'net' + rand.string(5) + requirements NetworkRequirements ip_range string = '10.10.0.0/16' mycelium string = rand.hex(64) + user_access_configs []UserAccessConfig +} + +struct UserAccessConfig{ +pub: + ip string + secret_key string + public_key string } struct NetworkHandler { mut: - network_name string + req NetworkRequirements + // network_name string nodes []u32 ip_range string wg_ports map[u32]u16 @@ -29,6 +44,9 @@ mut: none_accessible_ip_ranges []string mycelium string + // user_access_endopoints int + user_access_configs []UserAccessConfig + deployer &grid.Deployer @[skip; str: skip] } @@ -62,7 +80,7 @@ fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models. } return network_workload.to_workload( - name: self.network_name + name: self.req.name description: 'VGridClient network workload' ) } @@ -89,7 +107,7 @@ fn (mut self NetworkHandler) setup_access_node() ! { // - Check the new deployment if its node is hidden take the saved one // - if the access node is already set, that means we have set its values e.g. the wireguard port, keys - if self.hidden_nodes.len < 1 || self.nodes.len == 1 { + if self.req.user_access_endpoints == 0 && (self.hidden_nodes.len < 1 || self.nodes.len == 1) { self.public_node = 0 return } @@ -145,7 +163,6 @@ fn (mut self NetworkHandler) setup_access_node() ! { } fn (mut self NetworkHandler) setup_wireguard_data() ! { - // TODO: We need to set the extra node console.print_header('Setting up network workload.') self.hidden_nodes, self.none_accessible_ip_ranges = [], [] @@ -194,6 +211,22 @@ fn (mut self NetworkHandler) setup_wireguard_data() ! { self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id]) } } + + to_create_user_access := self.req.user_access_endpoints - self.user_access_configs.len + if to_create_user_access < 0{ + // TODO: support removing user access + return error('removing user access is not supported') + } + + for i := 0; i Date: Thu, 30 Jan 2025 18:07:58 +0200 Subject: [PATCH 5/5] feat (tfgrid3deployer): add more support for wireguard - support adding multiple user access endpoints to a network - support connecting gateways over wireguard Co-authored-by: mahmoud --- .../gw_over_wireguard/gw_over_wireguard.vsh | 25 +++-- lib/threefold/tfgrid3deployer/deployment.v | 19 ++-- .../tfgrid3deployer/deployment_setup.v | 23 ++-- lib/threefold/tfgrid3deployer/network.v | 106 +++++++++++------- lib/threefold/tfgrid3deployer/webnames.v | 7 +- 5 files changed, 112 insertions(+), 68 deletions(-) mode change 100644 => 100755 examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh diff --git a/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh b/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh old mode 100644 new mode 100755 index 117ca7ec..e62bc498 --- a/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh +++ b/examples/threefold/tfgrid3deployer/gw_over_wireguard/gw_over_wireguard.vsh @@ -11,11 +11,12 @@ griddriver.install()! v := tfgrid3deployer.get()! println('cred: ${v}') -deployment_name := 'vm_caddy1' +deployment_name := 'wireguard_dep_example' mut deployment := tfgrid3deployer.new_deployment(deployment_name)! -deployment.add_network(ip_range: '1.1.1.1/16', user_access: 5) + +deployment.configure_network(user_access_endpoints: 3)! deployment.add_machine( - name: 'vm_caddy1' + name: 'vm1' cpu: 1 memory: 2 planetary: false @@ -25,12 +26,22 @@ deployment.add_machine( ) deployment.deploy()! -vm1 := deployment.vm_get('vm_caddy1')! +vm1 := deployment.vm_get('vm1')! println('vm1 info: ${vm1}') -vm1_public_ip4 := vm1.public_ip4.all_before('/') +user_access_configs := deployment.get_user_access_configs() +for config in user_access_configs { + println('config:\n------\n${config.print_wg_config()}\n------\n') +} -deployment.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80', use_wireguard: true) +deployment.add_webname( + name: 'gwoverwg' + backend: 'http://${vm1.wireguard_ip}:8000' + use_wireguard_network: true +) deployment.deploy()! -gw1 := deployment.webname_get('gwnamecaddy')! + +gw1 := deployment.webname_get('gwoverwg')! println('gw info: ${gw1}') + +// tfgrid3deployer.delete_deployment(deployment_name)! diff --git a/lib/threefold/tfgrid3deployer/deployment.v b/lib/threefold/tfgrid3deployer/deployment.v index 3c6612c3..2e221007 100644 --- a/lib/threefold/tfgrid3deployer/deployment.v +++ b/lib/threefold/tfgrid3deployer/deployment.v @@ -1,7 +1,6 @@ module tfgrid3deployer import freeflowuniverse.herolib.threefold.grid.models as grid_models -import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models import freeflowuniverse.herolib.threefold.grid import freeflowuniverse.herolib.ui.console import compress.zlib @@ -9,7 +8,6 @@ import encoding.hex import x.crypto.chacha20 import crypto.sha256 import json -import rand struct GridContracts { pub mut: @@ -285,10 +283,10 @@ fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! { } } - self.update_state(name_contracts_map, returned_deployments)! + self.update_state(setup, name_contracts_map, returned_deployments)! } -fn (mut self TFDeployment) update_state(name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! { +fn (mut self TFDeployment) update_state(setup DeploymentSetup, name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! { mut workloads := map[u32]map[string]&grid_models.Workload{} for node_id, deployment in dls { @@ -339,6 +337,10 @@ fn (mut self TFDeployment) update_state(name_contracts_map map[string]u64, dls m wn.node_contract_id = dls[wn.node_id].contract_id wn.name_contract_id = name_contracts_map[wn.requirements.name] } + + self.network.ip_range = setup.network_handler.ip_range + self.network.mycelium = setup.network_handler.mycelium + self.network.user_access_configs = setup.network_handler.user_access_configs.clone() } pub fn (mut self TFDeployment) vm_get(vm_name string) !VMachine { @@ -514,7 +516,10 @@ pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deploymen return dls } - -pub fn (mut self TFDeployment) configure_network(req NetworkRequirements)!{ +pub fn (mut self TFDeployment) configure_network(req NetworkRequirements) ! { self.network.requirements = req -} \ No newline at end of file +} + +pub fn (mut self TFDeployment) get_user_access_configs() []UserAccessConfig { + return self.network.user_access_configs +} diff --git a/lib/threefold/tfgrid3deployer/deployment_setup.v b/lib/threefold/tfgrid3deployer/deployment_setup.v index a88263cb..fa2f6581 100644 --- a/lib/threefold/tfgrid3deployer/deployment_setup.v +++ b/lib/threefold/tfgrid3deployer/deployment_setup.v @@ -32,15 +32,15 @@ fn new_deployment_setup(network_specs NetworkSpecs, vms []VMachine, zdbs []ZDB, mut dls := DeploymentSetup{ deployer: deployer network_handler: NetworkHandler{ - deployer: deployer - network_name: network_specs.name - mycelium: network_specs.mycelium - ip_range: network_specs.ip_range + req: network_specs.requirements + deployer: deployer + mycelium: network_specs.mycelium + ip_range: network_specs.ip_range user_access_configs: network_specs.user_access_configs.clone() } } - dls.setup_network_workloads(vms, old_deployments)! + dls.setup_network_workloads(vms, webnames, old_deployments)! dls.setup_vm_workloads(vms)! dls.setup_zdb_workloads(zdbs)! dls.setup_webname_workloads(webnames)! @@ -68,9 +68,9 @@ fn (mut self DeploymentSetup) match_versions(old_dls map[u32]grid_models.Deploym // - st: Modified DeploymentSetup struct with network workloads set up // Returns: // - None -fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, old_deployments map[u32]grid_models.Deployment) ! { +fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, webnames []WebName, old_deployments map[u32]grid_models.Deployment) ! { st.network_handler.load_network_state(old_deployments)! - st.network_handler.create_network(vms)! + st.network_handler.create_network(vms, webnames)! data := st.network_handler.generate_workloads()! for node_id, workload in data { @@ -177,6 +177,11 @@ fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! { tls_passthrough: req.tls_passthrough backends: [req.backend] name: gw_name + network: if wn.requirements.use_wireguard_network { + self.network_handler.req.name + } else { + none + } } self.workloads[wn.node_id] << gw.to_workload( @@ -206,7 +211,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip network: grid_models.ZmachineNetwork{ interfaces: [ grid_models.ZNetworkInterface{ - network: self.network_handler.network_name + network: self.network_handler.req.name ip: if vmachine.wireguard_ip.len > 0 { used_ip_octets[vmachine.node_id] << vmachine.wireguard_ip.all_after_last('.').u8() vmachine.wireguard_ip @@ -219,7 +224,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip planetary: vmachine.requirements.planetary mycelium: if mycelium := vmachine.requirements.mycelium { grid_models.MyceliumIP{ - network: self.network_handler.network_name + network: self.network_handler.req.name hex_seed: mycelium.hex_seed } } else { diff --git a/lib/threefold/tfgrid3deployer/network.v b/lib/threefold/tfgrid3deployer/network.v index 7a4a1442..07ef3cc6 100644 --- a/lib/threefold/tfgrid3deployer/network.v +++ b/lib/threefold/tfgrid3deployer/network.v @@ -7,26 +7,42 @@ import json import rand // NetworkInfo struct to represent network details - -pub struct NetworkRequirements{ -pub: - name string = 'net' + rand.string(5) +@[params] +pub struct NetworkRequirements { +pub mut: + name string = 'net' + rand.string(5) user_access_endpoints int } + @[params] pub struct NetworkSpecs { pub mut: - requirements NetworkRequirements - ip_range string = '10.10.0.0/16' - mycelium string = rand.hex(64) + requirements NetworkRequirements + ip_range string = '10.10.0.0/16' + mycelium string = rand.hex(64) user_access_configs []UserAccessConfig } -struct UserAccessConfig{ -pub: - ip string +pub struct UserAccessConfig { +pub: + ip string secret_key string public_key string + + peer_public_key string + network_ip_range string + public_node_endpoint string +} + +pub fn (c UserAccessConfig) print_wg_config() string { + return '[Interface] +Address = ${c.ip} +PrivateKey = ${c.secret_key} +[Peer] +PublicKey = ${c.peer_public_key} +AllowedIPs = ${c.network_ip_range}, 100.64.0.0/16 +PersistentKeepalive = 25 +Endpoint = ${c.public_node_endpoint}' } struct NetworkHandler { @@ -51,7 +67,7 @@ mut: } // TODO: maybe rename to fill_network or something similar -fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! { +fn (mut self NetworkHandler) create_network(vmachines []VMachine, webnames []WebName) ! { // Set nodes self.nodes = [] @@ -61,9 +77,16 @@ fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! { } } + for webname in webnames { + if webname.requirements.use_wireguard_network && !self.nodes.contains(webname.node_id) { + self.nodes << webname.node_id + } + } + console.print_header('Network nodes: ${self.nodes}.') self.setup_wireguard_data()! self.setup_access_node()! + self.setup_user_access()! } fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.Peer, mycleium_hex_key string) !grid_models.Workload { @@ -88,10 +111,11 @@ fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models. fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_models.Peer { mut peers := []grid_models.Peer{} if self.public_node != 0 { + ip_range_oct := self.ip_range.all_before('/').split('.') peers << grid_models.Peer{ subnet: self.wg_subnet[self.public_node] wireguard_public_key: self.wg_keys[self.public_node][1] - allowed_ips: [self.ip_range, '100.64.0.0/16'] + allowed_ips: [self.ip_range, '100.64.${ip_range_oct[1]}.${ip_range_oct[2]}/24'] endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}' } } @@ -99,14 +123,6 @@ fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_mode } fn (mut self NetworkHandler) setup_access_node() ! { - // Case 1: Deployment on 28 which is hidden node - // - Setup access node - // Case 2: Deployment on 11 which is public node - // - Already have the access node - // Case 3: if the saved state has already public node. - // - Check the new deployment if its node is hidden take the saved one - // - if the access node is already set, that means we have set its values e.g. the wireguard port, keys - if self.req.user_access_endpoints == 0 && (self.hidden_nodes.len < 1 || self.nodes.len == 1) { self.public_node = 0 return @@ -162,6 +178,26 @@ fn (mut self NetworkHandler) setup_access_node() ! { self.endpoints[self.public_node] = access_node.public_config.ipv4.split('/')[0] } +fn (mut self NetworkHandler) setup_user_access() ! { + to_create_user_access := self.req.user_access_endpoints - self.user_access_configs.len + if to_create_user_access < 0 { + // TODO: support removing user access + return error('removing user access is not supported') + } + + for i := 0; i < to_create_user_access; i++ { + wg_keys := self.deployer.client.generate_wg_priv_key()! + self.user_access_configs << UserAccessConfig{ + ip: self.calculate_subnet()! + secret_key: wg_keys[0] + public_key: wg_keys[1] + peer_public_key: self.wg_keys[self.public_node][1] + public_node_endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}' + network_ip_range: self.ip_range + } + } +} + fn (mut self NetworkHandler) setup_wireguard_data() ! { console.print_header('Setting up network workload.') self.hidden_nodes, self.none_accessible_ip_ranges = [], [] @@ -211,22 +247,6 @@ fn (mut self NetworkHandler) setup_wireguard_data() ! { self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id]) } } - - to_create_user_access := self.req.user_access_endpoints - self.user_access_configs.len - if to_create_user_access < 0{ - // TODO: support removing user access - return error('removing user access is not supported') - } - - for i := 0; i