This commit is contained in:
2024-12-25 10:11:52 +01:00
parent 38aaba018e
commit 37d2501067
145 changed files with 12629 additions and 0 deletions

View File

@@ -0,0 +1,74 @@
module base
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.core.texttools
import os
const scriptspath = os.dir(@FILE) + '/../../../scripts'
fn script_write(mybase string, name string, cmd_ string) ! {
cmd := texttools.dedent(cmd_)
mut out := '${mybase}\n'
for line in cmd.split_into_lines() {
out += '${line}\n'
}
mut p := pathlib.get_file(path: '${scriptspath}/${name}.sh', create: true)!
p.write(out)!
os.chmod(p.path, 0o777)!
}
pub fn bash_installers_package() !string {
l := '
1_init.sh
2_myplatform.sh
3_gittools.sh
4_package.sh
5_exec.sh
6_reset.sh
7_zinit.sh
8_osupdate.sh
9_redis.sh
10_installer_v.sh
11_installer_herolib.sh
12_installer_hero.sh
13_s3.sh
20_installers.sh
'
mut out := ''
for mut name in l.split_into_lines() {
name = name.trim_space()
if name == '' {
continue
}
mut p := pathlib.get_file(path: '${scriptspath}/lib/${name}', create: false)!
c := p.read()!
out += c
}
script_write(out, 'baselib', '')!
script_write(out, 'installer', "
freeflow_dev_env_install
echo 'V & hero INSTALL OK'
")!
script_write(out, 'build_hero', "
hero_build
echo 'BUILD HERO OK'
")!
script_write(out, 'install_hero', "
hero_install
echo 'INSTALL HERO OK'
")!
script_write(out, 'githubactions', "
hero_build
hero_test
hero_upload
echo 'OK'
")!
mut p4 := pathlib.get_dir(path: '${scriptspath}', create: false)!
return p4.path
}

View File

@@ -0,0 +1,133 @@
module base
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
develop bool
}
// install base will return true if it was already installed
pub fn install(args_ InstallArgs) ! {
console.print_header('install base (reset: ${args_.reset})')
pl := osal.platform()
mut args := args_
if pl == .osx && !osal.cmd_exists('brew') {
args.reset = true
}
if args.reset == false && osal.done_exists('platform_prepare') {
console.print_header('Platform already prepared')
return
}
if pl == .osx {
console.print_header(' - OSX prepare')
if !osal.cmd_exists('brew') {
console.print_header(' -Install Brew')
osal.exec(
cmd: '
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
(echo; echo \'eval "$(/opt/homebrew/bin/brew shellenv)"\') >> ${os.home_dir()}/.zprofile
reset
echo
echo
echo "execute: \'source ~/.zprofile\'"
echo "or restart your shell"
echo "and execute the hero command again"
echo
'
stdout: true
shell: true
) or { return error('cannot install brew, something went wrong.\n${err}') }
}
osal.package_install('mc,tmux,git,rsync,curl,screen,wget,git-lfs')!
if !osal.cmd_exists('uv') {
osal.exec(cmd: 'curl -LsSf https://astral.sh/uv/install.sh | sh')!
}
} else if pl == .ubuntu {
console.print_header(' - Ubuntu prepare')
osal.package_refresh()!
osal.package_install('autoconf,libtool,iputils-ping,net-tools,git,rsync,curl,mc,tmux,libsqlite3-dev,xz-utils,git,git-lfs,redis-server,ufw')!
} else if pl == .alpine {
console.print_header(' - Alpine prepare')
osal.package_refresh()!
osal.package_install('git,curl,mc,tmux,screen,git-lfs,redis-server')!
} else if pl == .arch {
console.print_header(' - Arch prepare')
osal.package_refresh()!
osal.package_install('rsync,ncdu, git,curl,mc,tmux,screen,git-lfs,redis,unzip,sudo,wget,htop,arch-install-scripts,ufw')!
} else {
panic('only ubuntu, arch, alpine and osx supported for now')
}
if args.develop {
develop(reset: args.reset)!
}
sshkeysinstall()!
console.print_header('platform prepare DONE')
osal.done_set('platform_prepare', 'OK')!
}
pub fn sshkeysinstall(args InstallArgs) ! {
cmd := '
mkdir -p ~/.ssh
if ! grep github.com ~/.ssh/known_hosts > /dev/null
then
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
fi
if ! grep git.ourworld.tf ~/.ssh/known_hosts > /dev/null
then
ssh-keyscan -t rsa git.ourworld.tf >> ~/.ssh/known_hosts
fi
git config --global pull.rebase false
'
osal.exec(cmd: cmd, stdout: false)!
}
pub fn develop(args InstallArgs) ! {
console.print_header('install base develop (reset: ${args.reset})')
pl := osal.platform()
if args.reset == false && osal.done_exists('hero_development') {
return
}
if pl == .osx {
console.print_header(' - OSX prepare for development.')
osal.package_install('bdw-gc,libpq')!
if !osal.cmd_exists('clang') {
osal.execute_silent('xcode-select --install') or {
return error('cannot install xcode-select --install, something went wrong.\n${err}')
}
}
} else if pl == .ubuntu {
console.print_header(' - Ubuntu prepare')
osal.package_install('libgc-dev,make,libpq-dev,build-essential,gcc,tcc')!
// osal.exec(
// cmd: '
// cd /tmp
// wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.4.1.tar.gz
// tar -xvf v0.4.1.tar.gz
// cd secp256k1-0.4.1/
// ./autogen.sh
// ./configure
// make -j 5
// make install
// '
// )!
} else if pl == .alpine {
osal.package_install('libpq-dev,make')!
} else if pl == .arch {
osal.package_install('gcc,tcc,make,postgresql-libs')!
} else {
panic('only arch, alpine, ubuntu and osx supported for now')
}
osal.done_set('hero_development', 'OK')!
console.print_header('platform development DONE')
}

View File

@@ -0,0 +1,22 @@
#!/bin/bash
set -ux -o pipefail
# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
if command_exists brew; then
# Uninstall Homebrew and all installed packages
echo "Uninstalling Homebrew and all packages..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/uninstall.sh)"
# Verify removal and cleanup any remaining files
echo "Cleaning up remaining files and verifying removal..."
brew cleanup 2>/dev/null
sudo rm -rf /opt/homebrew
echo "Uninstallation process completed for brew."
fi

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -u -o pipefail
# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Stop all running Docker containers and remove Docker images, containers, and volumes
if command_exists docker; then
echo "Removing all Docker containers, images, and volumes..."
docker stop $(docker ps -a -q) 2>/dev/null
docker rm $(docker ps -a -q) 2>/dev/null
docker rmi $(docker images -q) 2>/dev/null
docker volume rm $(docker volume ls -q) 2>/dev/null
docker system prune -a -f --volumes 2>/dev/null
osascript -e 'quit app "Docker"'
sudo rm -rf /Applications/Docker.app
rm -f ~/Library/Preferences/com.docker.docker.plist
rm -rf ~/Library/Saved\ Application\ State/com.electron.docker-frontend.savedState
rm -rf ~/Library/Containers/com.docker.docker
rm -rf ~/Library/Containers/com.docker.helper
rm -rf ~/Library/Application\ Support/Docker\ Desktop
rm -rf ~/.docker
else
echo "Docker is not installed. Skipping Docker cleanup."
fi
# Remove binaries from ~/hero/bin
echo "Removing binaries from ~/hero/bin..."
rm -f ~/hero/bin/lima*
rm -f ~/hero/bin/docker*
rm -f ~/hero/bin/herocontainers*
rm -f ~/hero/bin/kube*
# Remove Lima VMs
if command_exists limactl; then
echo "Removing Lima VMs..."
limactl stop $(limactl list --quiet) 2>/dev/null
limactl delete --force $(limactl list --quiet) 2>/dev/null
limactl list
rm -rf ~/.lima
else
echo "limactl is not installed. Skipping Lima VM removal."
fi
echo "Remove containers process completed."

View File

@@ -0,0 +1,43 @@
module base
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import os
const templatespath = os.dir(@FILE) + '/templates'
// install base will return true if it was already installed
pub fn osx_uninstall_containers() ! {
console.print_header('Uninstall Containers')
pl := osal.platform()
if pl != .osx {
return error('only support OSX')
}
containers_uninstall_file := $embed_file('templates/containers_uninstall.sh')
osal.exec(
cmd: containers_uninstall_file.to_string()
stdout: false
ignore_error: false
shell: true
) or { return error('cannot uninstall containers, something went wrong.\n${err}') }
// will never come here because of shell: true
console.print_header('Removed brew and docker ...')
}
// pub fn osx_uninstall_brew() ! {
// console.print_header('Uninstall Brew')
// pl := osal.platform()
// if pl != .osx {
// return error("only support OSX")
// }
// return error('cannot uninstall containers, something went wrong.\n${err}')
// brew_uninstall_file:=$embed_file('templates/brew_uninstall.sh')
// osal.exec(cmd: brew_uninstall_file.to_string(), stdout: true, ignore_error:false, shell: true
// ) or { return error('cannot uninstall brew, something went wrong.\n${err}') }
// //will never come here because of shell: true
// console.print_header('Removed brew and docker ...')
// }

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'meilisearchinstaller'
classname:'MeilisearchServer'
singleton:0
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,168 @@
module meilisearchinstaller
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.installers.ulist
// import freeflowuniverse.herolib.installers.lang.rust
import os
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} meilisearch -V')
if res.exit_code != 0 {
return false
}
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 1 {
return error("couldn't parse meilisearch version.\n${res.output}")
}
r2 := r[0].all_after('meilisearch').trim(' ')
if texttools.version(version) != texttools.version(r2) {
return false
}
return true
}
fn install() ! {
console.print_header('install meilisearch')
mut url := ''
if osal.is_linux_arm() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-linux-aarch64'
} else if osal.is_linux_intel() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-linux-amd64'
} else if osal.is_osx_arm() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-macos-apple-silicon'
} else if osal.is_osx_intel() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-macos-amd64'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 100000
expand_dir: '/tmp/meilisearch'
)!
// dest.moveup_single_subdir()!
mut binpath := dest.file_get('meilisearch')!
osal.cmd_add(
cmdname: 'meilisearch'
source: binpath.path
)!
}
fn build() ! {
// mut installer := get()!
// url := 'https://github.com/threefoldtech/meilisearch'
// console.print_header('compile meilisearch')
// rust.install()!
// mut dest_on_os := '${os.home_dir()}/hero/bin'
// if osal.is_linux() {
// dest_on_os = '/usr/local/bin'
// }
// console.print_debug(' - dest path for meilisearchs is on: ${dest_on_os}')
// //osal.package_install('pkg-config,openssl')!
// cmd := '
// echo "start meilisearch installer"
// set +ex
// source ~/.cargo/env > /dev/null 2>&1
// //TODO
// cargo install meilisearch
// cp ${os.home_dir()}/.cargo/bin/mdb* ${dest_on_os}/
// '
// defer {
// destroy()!
// }
// osal.execute_stdout(cmd)!
// osal.done_set('install_meilisearch', 'OK')!
// console.print_header('meilisearch installed')
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// mut installer := get()!
// installers.upload(
// cmdname: 'meilisearch'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/meilisearch'
// )!
}
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut res := []zinit.ZProcessNewArgs{}
mut installer := get()!
mut env := 'development'
if installer.production {
env = 'production'
}
res << zinit.ZProcessNewArgs{
name: 'meilisearch'
cmd: 'meilisearch --no-analytics --http-addr ${installer.host}:${installer.port} --env ${env} --db-path ${installer.path} --master-key ${installer.masterkey}'
}
return res
}
fn running() !bool {
mut installer := get()!
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// this checks health of meilisearch
// curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// url:='http://127.0.0.1:${cfg.port}/api/v1'
// mut conn := httpconnection.new(name: 'meilisearch', url: url)!
// if cfg.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${cfg.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug("curl -X 'GET' '${url}'/tags --oauth2-bearer ${cfg.secret}")
// r := conn.get_json_dict(prefix: 'tags', debug: false) or {return false}
// println(r)
// if true{panic("ssss")}
// tags := r['Tags'] or { return false }
// console.print_debug(tags)
// console.print_debug('meilisearch is answering.')
return false
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("meilisearch")!
osal.process_kill_recursive(name: 'meilisearch')!
osal.cmd_delete('meilisearch')!
osal.package_remove('
meilisearch
') or { println('') }
// osal.rm("
// ")!
}

View File

@@ -0,0 +1,271 @@
module meilisearchinstaller
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
meilisearchinstaller_global map[string]&MeilisearchServer
meilisearchinstaller_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
mut args := args_
if args.name == '' {
args.name = meilisearchinstaller_default
}
if args.name == '' {
args.name = 'default'
}
return args
}
pub fn get(args_ ArgsGet) !&MeilisearchServer {
mut args := args_get(args_)
if args.name !in meilisearchinstaller_global {
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
}
return meilisearchinstaller_global[args.name] or {
println(meilisearchinstaller_global)
panic('could not get config for meilisearchinstaller with name:${args.name}')
}
}
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('meilisearchinstaller', args.name)
}
fn config_load(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
mut heroscript := context.hero_config_get('meilisearchinstaller', args.name)!
play(heroscript: heroscript)!
}
fn config_save(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
context.hero_config_set('meilisearchinstaller', args.name, heroscript_default()!)!
}
fn set(o MeilisearchServer) ! {
mut o2 := obj_init(o)!
meilisearchinstaller_global[o.name] = &o2
meilisearchinstaller_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
}
pub fn play(args_ PlayArgs) ! {
mut args := args_
if args.heroscript == '' {
args.heroscript = heroscript_default()!
}
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
mut install_actions := plbook.find(filter: 'meilisearchinstaller.configure')!
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
console.print_debug('install action meilisearchinstaller.configure\n${mycfg}')
set(mycfg)!
}
}
mut other_actions := plbook.find(filter: 'meilisearchinstaller.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action meilisearchinstaller.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action meilisearchinstaller.install')
install()!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
mut p := other_action.params
name := p.get('name')!
mut meilisearchinstaller_obj := get(name: name)!
console.print_debug('action object:\n${meilisearchinstaller_obj}')
if other_action.name == 'start' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.restart()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self MeilisearchServer) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self MeilisearchServer) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('meilisearchinstaller start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting meilisearchinstaller with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('meilisearchinstaller did not install properly.')
}
pub fn (mut self MeilisearchServer) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self MeilisearchServer) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self MeilisearchServer) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self MeilisearchServer) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self MeilisearchServer) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self MeilisearchServer) build() ! {
switch(self.name)
build()!
}
pub fn (mut self MeilisearchServer) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for meilisearchinstaller
pub fn switch(name string) {
meilisearchinstaller_default = name
}

View File

@@ -0,0 +1,59 @@
module meilisearchinstaller
import freeflowuniverse.herolib.data.paramsparser
pub const version = '1.11.3'
const singleton = false
const default = true
pub fn heroscript_default() !string {
heroscript := "
!!meilisearch.configure
name:'default'
masterkey: '1234'
host: 'localhost'
port: 7700
production: 0
"
return heroscript
}
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct MeilisearchServer {
pub mut:
name string = 'default'
path string
masterkey string @[secret]
host string
port int
production bool
}
fn cfg_play(p paramsparser.Params) !MeilisearchServer {
name := p.get_default('name', 'default')!
mut mycfg := MeilisearchServer{
name: name
path: p.get_default('path', '{HOME}/hero/var/meilisearch/${name}')!
host: p.get_default('host', 'localhost')!
masterkey: p.get_default('masterkey', '1234')!
port: p.get_int_default('port', 7700)!
production: p.get_default_false('production')
}
return mycfg
}
fn obj_init(obj_ MeilisearchServer) !MeilisearchServer {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
// mut mycode := $tmpl('templates/atemplate.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
}

View File

@@ -0,0 +1,44 @@
# meilisearch
To get started
```vlang
import freeflowuniverse.herolib.installers.db.meilisearch as meilisearchinstaller
heroscript:="
!!meilisearch.configure name:'test'
masterkey: '1234'
port: 7701
!!meilisearch.start name:'test' reset:1
"
meilisearchinstaller.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= meilisearch_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!meilisearch.configure
name:'default'
path: '{HOME}/hero/var/meilisearch/default'
masterkey: ''
host: 'localhost'
port: 7700
production: 0
```

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'postgresql'
classname:'Postgresql'
singleton:1
templates:1
default:1
title:''
supported_platforms:'linux'
reset:0
startupmanager:1
hasconfig:1
build:0

View File

@@ -0,0 +1,16 @@
module postgresql
// import freeflowuniverse.herolib.osal
// import freeflowuniverse.herolib.ui.console
// import freeflowuniverse.herolib.installers.virt.docker
// pub fn requirements() ! {
// if !osal.done_exists('postgres_install') {
// panic('to implement, check is ubuntu and then install, for now only ubuntu')
// osal.package_install('libpq-dev,postgresql-client')!
// osal.done_set('postgres_install', 'OK')!
// console.print_header('postgresql installed')
// } else {
// console.print_header('postgresql already installed')
// }
// }

View File

@@ -0,0 +1,8 @@
```bash
brew install postgresql
brew services start postgresql
psql postgres -c "CREATE ROLE postgres WITH LOGIN SUPERUSER PASSWORD 'mypasswd';"
psql -U postgres
```

View File

@@ -0,0 +1,88 @@
module postgresql
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.osal.zinit
fn installed() !bool {
return true
}
fn install() ! {
osal.execute_silent('podman pull docker.io/library/postgres:latest')!
}
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut cfg := get()!
mut res := []zinit.ZProcessNewArgs{}
db_user := 'root'
cmd := "
mkdir -p ${cfg.path}
podman run --name ${cfg.name} -e POSTGRES_USER=${db_user} -e POSTGRES_PASSWORD=\"${cfg.passwd}\" -v ${cfg.path}:/var/lib/postgresql/data -p 5432:5432 --health-cmd=\"pg_isready -U ${db_user}\" postgres:latest
"
res << zinit.ZProcessNewArgs{
name: 'postgresql'
cmd: cmd
workdir: cfg.path
startuptype: .zinit
}
return res
}
fn running() !bool {
mut mydb := get()!
mydb.check() or { return false }
return true
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
fn destroy() ! {
mut mydb := get()!
mydb.destroy()!
// mut cfg := get()!
// osal.rm("
// ${cfg.path}
// /etc/postgresql/
// /etc/postgresql-common/
// /var/lib/postgresql/
// /etc/systemd/system/multi-user.target.wants/postgresql
// /lib/systemd/system/postgresql.service
// /lib/systemd/system/postgresql@.service
// ")!
// c := '
// #dont die
// set +e
// # Stop the PostgreSQL service
// sudo systemctl stop postgresql
// # Purge PostgreSQL packages
// sudo apt-get purge -y postgresql* pgdg-keyring
// # Remove all data and configurations
// sudo userdel -r postgres
// sudo groupdel postgres
// # Reload systemd configurations and reset failed systemd entries
// sudo systemctl daemon-reload
// sudo systemctl reset-failed
// echo "PostgreSQL has been removed completely"
// '
// osal.exec(cmd: c)!
}

View File

@@ -0,0 +1,103 @@
module postgresql
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import db.pg
import os
import net
pub fn (mut server Postgresql) path_config() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/config', create: true)!
}
pub fn (mut server Postgresql) path_data() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/data', create: true)!
}
pub fn (mut server Postgresql) path_export() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/exports', create: true)!
}
fn is_port_open(host string, port int) bool {
mut socket := net.dial_tcp('${host}:${port}') or { return false }
socket.close() or { return false }
return true
}
pub fn (mut server Postgresql) db() !pg.DB {
if is_port_open('localhost', 5432) == false {
return error('PostgreSQL is not listening on port 5432')
}
conn_string := 'postgresql://root:${server.passwd}@localhost:5432/postgres?connect_timeout=5'
mut db := pg.connect_with_conninfo(conn_string)!
// console.print_header("Database connected: ${db}")
return db
}
pub fn (mut server Postgresql) check() ! {
mut db := server.db() or { return error('failed to check server: ${err}') }
db.exec('SELECT version();') or { return error('postgresql could not do select version') }
cmd := 'podman healthcheck run ${server.name}'
result := os.execute(cmd)
if result.exit_code != 0 {
return error("Postgresql container isn't healthy: ${result.output}")
}
container_id := 'podman container inspect default --format {{.Id}}'
container_id_result := os.execute(container_id)
if container_id_result.exit_code != 0 {
return error('Cannot get the container ID: ${result.output}')
}
server.container_id = container_id
console.print_header('Container ID: ${container_id_result.output}')
}
pub fn (mut server Postgresql) db_exists(name_ string) !bool {
mut db := server.db()!
// SELECT datname FROM pg_database WHERE datname='gitea';
r := db.exec("SELECT datname FROM pg_database WHERE datname='${name_}';")!
if r.len == 1 {
console.print_header('db exists: ${name_}')
return true
}
if r.len > 1 {
return error('should not have more than 1 db with name ${name_}')
}
return false
}
pub fn (mut server Postgresql) db_create(name_ string) ! {
name := texttools.name_fix(name_)
server.check()!
mut db := server.db()!
db_exists := server.db_exists(name_)!
if !db_exists {
console.print_header('db create: ${name_}')
db.exec('CREATE DATABASE ${name};')!
}
db_exists2 := server.db_exists(name_)!
if !db_exists2 {
return error('Could not create db: ${name_}, could not find in DB.')
}
}
pub fn (mut server Postgresql) db_delete(name_ string) ! {
name := texttools.name_fix(name_)
server.check()!
mut db := server.db()!
db_exists := server.db_exists(name_)!
if db_exists {
console.print_header('db delete: ${name_}')
db.exec('DROP DATABASE ${name};')!
}
db_exists2 := server.db_exists(name_)!
if db_exists2 {
return error('Could not delete db: ${name_}, could not find in DB.')
}
}

View File

@@ -0,0 +1,266 @@
module postgresql
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
postgresql_global map[string]&Postgresql
postgresql_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
mut args := args_
if args.name == '' {
args.name = postgresql_default
}
if args.name == '' {
args.name = 'default'
}
return args
}
pub fn get(args_ ArgsGet) !&Postgresql {
mut args := args_get(args_)
if args.name !in postgresql_global {
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
}
return postgresql_global[args.name] or {
println(postgresql_global)
panic('could not get config for postgresql with name:${args.name}')
}
}
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('postgresql', args.name)
}
fn config_load(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
mut heroscript := context.hero_config_get('postgresql', args.name)!
play(heroscript: heroscript)!
}
fn config_save(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
context.hero_config_set('postgresql', args.name, heroscript_default()!)!
}
fn set(o Postgresql) ! {
mut o2 := obj_init(o)!
postgresql_global[o.name] = &o2
postgresql_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
}
pub fn play(args_ PlayArgs) ! {
mut args := args_
if args.heroscript == '' {
args.heroscript = heroscript_default()!
}
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
mut install_actions := plbook.find(filter: 'postgresql.configure')!
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
console.print_debug('install action postgresql.configure\n${mycfg}')
set(mycfg)!
}
}
mut other_actions := plbook.find(filter: 'postgresql.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action postgresql.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action postgresql.install')
install()!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
mut p := other_action.params
name := p.get('name')!
mut postgresql_obj := get(name: name)!
console.print_debug('action object:\n${postgresql_obj}')
if other_action.name == 'start' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.restart()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Postgresql) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Postgresql) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('postgresql start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting postgresql with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('postgresql did not install properly.')
}
pub fn (mut self Postgresql) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Postgresql) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self Postgresql) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Postgresql) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self Postgresql) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self Postgresql) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for postgresql
pub fn switch(name string) {
postgresql_default = name
}

View File

@@ -0,0 +1,62 @@
module postgresql
import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.osal
import os
pub const version = '0.0.0'
const singleton = true
const default = true
pub fn heroscript_default() !string {
heroscript := "
!!postgresql.configure
name:'default'
passwd:'mysecret'
path:''
"
return heroscript
}
pub struct Postgresql {
pub mut:
name string = 'default'
path string
passwd string
container_id string
}
fn cfg_play(p paramsparser.Params) !Postgresql {
mut mycfg := Postgresql{
name: p.get_default('name', 'default')!
passwd: p.get('passwd')!
path: p.get_default('path', '')!
}
return mycfg
}
fn obj_init(obj_ Postgresql) !Postgresql {
mut obj := obj_
if obj.path == '' {
if osal.is_linux() {
obj.path = '/data/postgresql/${obj.name}'
} else {
obj.path = '${os.home_dir()}/hero/var/postgresql/${obj.name}'
}
}
osal.dir_ensure(obj.path)!
return obj
}
// called before start if done
fn configure() ! {
// t2 := $tmpl('templates/pg_hba.conf')
// mut p2 := server.path_config.file_get_new('pg_hba.conf')!
// p2.write(t2)!
// mut t3 := $tmpl('templates/postgresql.conf')
// t3 = t3.replace('@@', '$') // to fix templating issues
// mut p3 := server.path_config.file_get_new('postgresql.conf')!
// p3.write(t3)!
}

View File

@@ -0,0 +1,56 @@
# postgresql
To get started
```vlang
import freeflowuniverse.herolib.installers.db.postgresql
mut installer:= postgresql.get()!
installer.start()!
```
## example heroscript
```hero
!!postgresql.install
path: ''
passwd: 'asecret'
```
## use psql
uses our hero configure output and jq command line trick
```bash
#default is the instance name
export PGPASSWORD=`hero configure -c postgres -i default -s | jq -r '.passwd'`
psql -U "root" -h localhost
```
## to use in other installer
```v
//e.g. in server configure function
import freeflowuniverse.herolib.installers.db.postgresql
mut mydbinstaller:=postgresql.get()!
mydbinstaller.start()!
// now create the DB
mydbinstaller.db_create('gitea')!
```

View File

@@ -0,0 +1,100 @@
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# Refer to the "Client Authentication" section in the PostgreSQL
# documentation for a complete description of this file. A short
# synopsis follows.
#
# This file controls: which hosts are allowed to connect, how clients
# are authenticated, which PostgreSQL user names they can use, which
# databases they can access. Records take one of these forms:
#
# local DATABASE USER METHOD [OPTIONS]
# host DATABASE USER ADDRESS METHOD [OPTIONS]
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
#
# (The uppercase items must be replaced by actual values.)
#
# The first field is the connection type:
# - "local" is a Unix-domain socket
# - "host" is a TCP/IP socket (encrypted or not)
# - "hostssl" is a TCP/IP socket that is SSL-encrypted
# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
#
# DATABASE can be "all", "sameuser", "samerole", "replication", a
# database name, or a comma-separated list thereof. The "all"
# keyword does not match "replication". Access to replication
# must be enabled in a separate record (see example below).
#
# USER can be "all", a user name, a group name prefixed with "+", or a
# comma-separated list thereof. In both the DATABASE and USER fields
# you can also write a file name prefixed with "@" to include names
# from a separate file.
#
# ADDRESS specifies the set of hosts the record matches. It can be a
# host name, or it is made up of an IP address and a CIDR mask that is
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
# specifies the number of significant bits in the mask. A host name
# that starts with a dot (.) matches a suffix of the actual host name.
# Alternatively, you can write an IP address and netmask in separate
# columns to specify the set of hosts. Instead of a CIDR-address, you
# can write "samehost" to match any of the server's own IP addresses,
# or "samenet" to match any address in any subnet that the server is
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
# Note that "password" sends passwords in clear text; "md5" or
# "scram-sha-256" are preferred since they send encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
# NAME=VALUE. The available options depend on the different
# authentication methods -- refer to the "Client Authentication"
# section in the documentation for a list of which options are
# available for which authentication methods.
#
# Database and user names containing spaces, commas, quotes and other
# special characters must be quoted. Quoting one of the keywords
# "all", "sameuser", "samerole" or "replication" makes the name lose
# its special character, and just match a database or username with
# that name.
#
# This file is read on server startup and when the server receives a
# SIGHUP signal. If you edit the file on a running system, you have to
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
# or execute "SELECT pg_reload_conf()".
#
# Put your actual configuration here
# ----------------------------------
#
# If you want to allow non-local connections, you need to add more
# "host" records. In that case you will also need to make PostgreSQL
# listen on a non-local interface via the listen_addresses
# configuration parameter, or via the -i or -h command line switches.
# CAUTION: Configuring the system for local "trust" authentication
# allows any local user to connect as any PostgreSQL user, including
# the database superuser. If you do not trust all your local users,
# use another authentication method.
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all trust
# IPv4 local connections:
host all all 0.0.0.0/0 trust
# IPv6 local connections:
host all all ::1/128 trust
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all all scram-sha-256

View File

@@ -0,0 +1,815 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = 'localhost'
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:@@{sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_crl_dir = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a logfile segment
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#print_debug_parse = off
#print_debug_rewritten = off
#print_debug_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
#log_line_prefix = '%m [%p] ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"@@user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.utf8' # locale for system error message
# strings
lc_monetary = 'en_US.utf8' # locale for monetary formatting
lc_numeric = 'en_US.utf8' # locale for number formatting
lc_time = 'en_US.utf8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '@@libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -0,0 +1,108 @@
module redis
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.sysadmin.startupmanager
import time
import os
@[params]
pub struct InstallArgs {
pub mut:
port int = 6379
datadir string = '${os.home_dir()}/hero/var/redis'
ipaddr string = 'localhost' // can be more than 1, space separated
reset bool
start bool
restart bool // do not put on true
}
// ```
// struct InstallArgs {
// port int = 6379
// datadir string = '${os.home_dir()}/hero/var/redis'
// ipaddr string = "localhost" //can be more than 1, space separated
// reset bool
// start bool
// restart bool = true
// }
// ```
pub fn install(args_ InstallArgs) ! {
mut args := args_
if !args.reset {
if check() {
return
}
}
console.print_header('install redis.')
if !(osal.cmd_exists_profile('redis-server')) {
if osal.is_linux() {
osal.package_install('redis-server')!
} else {
osal.package_install('redis')!
}
}
osal.execute_silent('mkdir -p ${args.datadir}')!
if args.restart {
stop()!
}
start(args)!
}
fn configfilepath(args InstallArgs) string {
if osal.is_linux() {
return '/etc/redis/redis.conf'
} else {
return '${args.datadir}/redis.conf'
}
}
fn configure(args InstallArgs) ! {
c := $tmpl('template/redis_config.conf')
pathlib.template_write(c, configfilepath(), true)!
}
pub fn check(args InstallArgs) bool {
res := os.execute('redis-cli -c -p ${args.port} ping > /dev/null 2>&1')
if res.exit_code == 0 {
return true
}
return false
}
pub fn start(args InstallArgs) ! {
if check() {
return
}
configure(args)!
// remove all redis in memory
osal.process_kill_recursive(name: 'redis-server')!
if osal.platform() == .osx {
osal.exec(cmd: 'redis-server ${configfilepath()} --daemonize yes')!
// osal.exec(cmd:"brew services start redis") or {
// osal.exec(cmd:"redis-server ${configfilepath()} --daemonize yes")!
// }
} else {
mut sm := startupmanager.get()!
sm.new(name: 'redis', cmd: 'redis-server ${configfilepath()}', start: true)!
}
for _ in 0 .. 100 {
if check() {
console.print_debug('redis started.')
return
}
time.sleep(100)
}
return error("Redis did not install propertly could not do:'redis-cli -c ping'")
}
pub fn stop() ! {
osal.execute_silent('redis-cli shutdown')!
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
# rfs
more info see https://github.com/threefoldtech/rfs

View File

@@ -0,0 +1,31 @@
module rfs
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.installers.zinit
import freeflowuniverse.herolib.ui.console
pub fn install() ! {
rust.install()!
zinit.install()!
console.print_header('install rfs')
if !osal.done_exists('install_rfs') || !osal.cmd_exists('rfs') {
osal.package_install('musl-dev,musl-tools')!
mut gs := gittools.new()!
mut repo := gs.get_repo(url: 'https://github.com/threefoldtech/rfs', reset: true)!
path := repo.get_path()!
cmd := '
cd ${path}
rustup target add x86_64-unknown-linux-musl
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
cp ~/code/github/threefoldtech/rfs/target/x86_64-unknown-linux-musl/release/rfs /usr/local/bin/
'
console.print_header('build rfs')
osal.execute_stdout(cmd)!
osal.done_set('install_rfs', 'OK')!
}
console.print_header('rfs already done')
}

View File

@@ -0,0 +1,26 @@
## Vlang ZDB Client
to use:
- build zero db from source: https://github.com/threefoldtech/0-db
- run zero db from root of 0db folder:
`./zdbd/zdb --help || true` for more info
## to use test
```bash
#must set unix domain with --socket argument when running zdb
#run zdb as following:
mkdir -p ~/.zdb
zdb --socket ~/.zdb/socket --admin 1234
redis-cli -s ~/.zdb/socket
#or easier:
redis-cli -s ~/.zdb/socket --raw nsinfo default
```
then in the redis-cli can do e.g.
```
nsinfo default
```

View File

@@ -0,0 +1,29 @@
module zdb
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
// install zdb will return true if it was already installed
pub fn build() ! {
base.install()!
console.print_header('package_install install zdb')
if !osal.done_exists('install_zdb') && !osal.cmd_exists('zdb') {
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'git@github.com:threefoldtech/0-db.git'
reset: false
pull: true
)!
path := repo.get_path()!
cmd := '
set -ex
cd ${path}
make
sudo rsync -rav ${path}/bin/zdb* /usr/local/bin/
'
osal.execute_silent(cmd) or { return error('Cannot install zdb.\n${err}') }
osal.done_set('install_zdb', 'OK')!
}
}

View File

@@ -0,0 +1,156 @@
module zdb
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.crypt.secrets
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.clients.zdb
import os
import time
@[params]
pub struct InstallArgs {
pub mut:
reset bool
secret string
start bool = true
restart bool
sequential bool // if sequential then we autoincrement the keys
datadir string = '${os.home_dir()}/var/zdb/data'
indexdir string = '${os.home_dir()}/var/zdb/index'
rotateperiod int = 1200 // 20 min
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '2.0.7'
res := os.execute('${osal.profile_path_source_and()} zdb --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 3 {
return error("couldn't parse zdb version.\n${res.output}")
}
myversion := r[1].all_after_first('server, v').all_before_last('(').trim_space()
if texttools.version(version) > texttools.version(myversion) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install zdb')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/threefoldtech/0-db/releases/download/v${version}/zdb-${version}-linux-amd64-static'
} else {
return error('unsported platform, only linux 64 for zdb for now')
}
mut dest := osal.download(
url: url
minsize_kb: 1000
)!
osal.cmd_add(
cmdname: 'zdb'
source: dest.path
)!
}
if args.restart {
restart(args)!
return
}
if args.start {
start(args)!
}
}
pub fn restart(args_ InstallArgs) ! {
stop(args_)!
start(args_)!
}
pub fn stop(args_ InstallArgs) ! {
console.print_header('zdb stop')
mut sm := startupmanager.get()!
sm.stop('zdb')!
}
pub fn start(args_ InstallArgs) ! {
mut args := args_
console.print_header('zdb start')
mut box := secrets.get()!
secret := box.secret(key: 'ZDB.SECRET', default: args.secret)!
mut sm := startupmanager.get()!
mut cmd := 'zdb --socket ${os.home_dir()}/hero/var/zdb.sock --port 3355 --admin ${secret} --data ${args.datadir} --index ${args.indexdir} --dualnet --protect --rotate ${args.rotateperiod}'
if args.sequential {
cmd += ' --mode seq'
}
pathlib.get_dir(path: '${os.home_dir()}/hero/var', create: true)!
sm.start(
name: 'zdb'
cmd: cmd
)!
console.print_debug(cmd)
for _ in 0 .. 50 {
if check()! {
return
}
time.sleep(10 * time.millisecond)
}
return error('zdb not installed properly, check failed.')
}
pub fn check() !bool {
cmd := 'redis-cli -s /root/hero/var/zdb.sock PING'
result := os.execute(cmd)
if result.exit_code > 0 {
return error('${cmd} failed with exit code: ${result.exit_code} and error: ${result.output}')
}
if result.output.trim_space() == 'PONG' {
console.print_debug('zdb is answering.')
// return true
}
// TODO: need to work on socket version
// mut db := zdb.get('${os.home_dir()}/hero/var/zdb.sock', secret()!, 'test')!
mut db := client()!
// check info returns info about zdb
info := db.info()!
// console.print_debug(info)
assert info.contains('server_name: 0-db')
console.print_debug('zdb is answering.')
return true
}
pub fn secret() !string {
mut box := secrets.get()!
secret := box.get('ZDB.SECRET')!
return secret
}
pub fn client() !zdb.ZDB {
mut db := zdb.get('localhost:3355', secret()!, 'test')!
return db
}

View File

@@ -0,0 +1,22 @@
module zdb
import freeflowuniverse.herolib.clients.zdb
fn test_get() {
// must set unix domain with --socket argument when running zdb
// run zdb as following:
// mkdir -p ~/.zdb/ && zdb --socket ~/.zdb/socket --admin 1234
install(secret: 'hamada', start: true) or { panic(err) }
mut client := zdb.get('/root/hero/var/zdb.sock', 'hamada', 'test') or { panic(err) }
// check info returns info about zdb
info := client.info()!
assert info.contains('server_name: 0-db')
nslist := client.nslist()!
assert nslist == ['default', 'test']
nsinfo := client.nsinfo('default')!
assert nsinfo['name'] == 'default'
}

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'zinit'
classname:'Zinit'
singleton:1
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:1
hasconfig:0
build:1

View File

@@ -0,0 +1,34 @@
# zinit
To get started
```vlang
import freeflowuniverse.herolib.installers.something. zinit
mut installer:= zinit.get()!
installer.start()!
```
## example heroscript
```hero
!!zinit.install
homedir: '/home/user/zinit'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -0,0 +1,132 @@
module zinit
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.installers.ulist
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.osal.systemd
import os
// checks if a certain version or above is installed
fn installed() !bool {
cmd := 'zinit --version'
// console.print_debug(cmd)
res := os.execute(cmd)
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('zinit v'))
if r.len != 1 {
return error("couldn't parse zinit version.\n${res.output}")
}
if texttools.version(version) == texttools.version(r[0].all_after_first('zinit v')) {
return true
}
}
console.print_debug(res.str())
return false
}
fn install() ! {
console.print_header('install zinit')
if !osal.is_linux() {
return error('only support linux for now')
}
release_url := 'https://github.com/threefoldtech/zinit/releases/download/v0.2.14/zinit'
mut dest := osal.download(
url: release_url
minsize_kb: 2000
reset: true
)!
osal.cmd_add(
cmdname: 'zinit'
source: dest.path
)!
osal.dir_ensure('/etc/zinit')!
console.print_header('install zinit done')
}
fn build() ! {
if !osal.is_linux() {
return error('only support linux for now')
}
rust.install()!
// install zinit if it was already done will return true
console.print_header('build zinit')
mut gs := gittools.get(coderoot: '/tmp/builder')!
mut repo := gs.get_repo(
url: 'https://github.com/threefoldtech/zinit'
reset: true
pull: true
)!
gitpath := repo.get_path()!
// source ${osal.profile_path()}
cmd := '
source ~/.cargo/env
cd ${gitpath}
make release
'
osal.execute_stdout(cmd)!
osal.cmd_add(
cmdname: 'zinit'
source: '/tmp/builder/github/threefoldtech/zinit/target/x86_64-unknown-linux-musl/release/zinit'
)!
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
}
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut res := []zinit.ZProcessNewArgs{}
res << zinit.ZProcessNewArgs{
name: 'zinit'
cmd: '/usr/local/bin/zinit init'
startuptype: .systemd
start: true
restart: true
}
return res
}
fn running() !bool {
cmd := 'zinit list'
return osal.execute_ok(cmd)
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
fn destroy() ! {
mut systemdfactory := systemd.new()!
systemdfactory.destroy('zinit')!
osal.process_kill_recursive(name: 'zinit')!
osal.cmd_delete('zinit')!
}

View File

@@ -0,0 +1,153 @@
module zinit
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
zinit_global map[string]&Zinit
zinit_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&Zinit {
return &Zinit{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
pub fn (mut self Zinit) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('zinit start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting zinit with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('zinit did not install properly.')
}
pub fn (mut self Zinit) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Zinit) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self Zinit) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Zinit) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self Zinit) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self Zinit) build() ! {
switch(self.name)
build()!
}
pub fn (mut self Zinit) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for zinit
pub fn switch(name string) {
zinit_default = name
}

View File

@@ -0,0 +1,26 @@
module zinit
import freeflowuniverse.herolib.data.paramsparser
import os
pub const version = '0.2.14'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct Zinit {
pub mut:
name string = 'default'
}
fn obj_init(obj_ Zinit) !Zinit {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}

View File

@@ -0,0 +1,128 @@
module herolib
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.installers.lang.vlang
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.develop.gittools
import os
// install herolib will return true if it was already installed
@[params]
pub struct InstallArgs {
pub mut:
git_pull bool
git_reset bool
reset bool // means reinstall
}
pub fn install(args InstallArgs) ! {
// install herolib if it was already done will return true
console.print_header('install herolib (reset: ${args.reset})')
// osal.package_refresh()!
if args.reset {
osal.done_reset()!
}
base.install(develop: true)!
vlang.install(reset: args.reset)!
vlang.v_analyzer_install(reset: args.reset)!
mut gs := gittools.get()!
gs.config.light = true // means we clone depth 1
mut repo := gs.get_repo(
pull: args.git_pull
reset: args.git_reset
url: 'https://github.com/freeflowuniverse/herolib/tree/development/herolib'
)!
mut repo2 := gs.get_repo(
pull: args.git_pull
reset: args.git_reset
url: 'https://github.com/freeflowuniverse/webcomponents/tree/main/webcomponents'
)!
mut path1 := repo.get_path()!
mut path2 := repo2.get_path()!
mut path1p := pathlib.get_dir(path: path1, create: false)!
mut path2p := pathlib.get_dir(path: path2, create: false)!
path1p.link('${os.home_dir()}/.vmodules/freeflowuniverse/herolib', true)!
path2p.link('${os.home_dir()}/.vmodules/freeflowuniverse/webcomponents', true)!
// hero_compile()!
osal.done_set('install_herolib', 'OK')!
return
}
// check if herolibs installed and hero, if not do so
pub fn check() ! {
if osal.done_exists('install_herolib') {
return
}
install()!
}
// remove hero, hero, ...
pub fn uninstall() ! {
console.print_debug('uninstall hero & herolib')
cmd := '
rm -rf ${os.home_dir()}/hero
rm -rf ${os.home_dir()}/_code
rm -f /usr/local/bin/hero
rm -f /tmp/hero
rm -f /tmp/install*
rm -f /tmp/build_hero*
rm -rf /tmp/execscripts
'
osal.execute_stdout(cmd) or { return error('Cannot uninstall herolib/hero.\n${err}') }
}
pub fn hero_install(args InstallArgs) ! {
if args.reset == false && osal.done_exists('install_hero') {
console.print_debug('hero already installed')
return
}
console.print_header('install hero')
base.install()!
cmd := '
cd /tmp
export TERM=xterm
curl https://raw.githubusercontent.com/freeflowuniverse/herolib/development/scripts/install_hero.sh > /tmp/hero_install.sh
bash /tmp/hero_install.sh
'
osal.execute_stdout(cmd) or { return error('Cannot install hero.\n${err}') }
osal.done_set('install_hero', 'OK')!
return
}
pub fn hero_compile(args InstallArgs) ! {
if args.reset == false && osal.done_exists('compile_hero') {
console.print_debug('hero already compiled')
return
}
console.print_header('compile hero')
home_dir := os.home_dir()
cmd_hero := texttools.template_replace($tmpl('templates/hero.sh'))
osal.exec(cmd: cmd_hero, stdout: false)!
osal.execute_stdout(cmd_hero) or { return error('Cannot compile hero.\n${err}') }
osal.done_set('compile_hero', 'OK')!
return
}
// pub fn update() ! {
// console.print_header('package_install update herolib')
// if !(i.state == .reset) && osal.done_exists('install_herotools') {
// console.print_debug(' package_install was already done')
// return
// }
// osal.execute_silent('cd /tmp && export TERM=xterm && source /root/env.sh && ct_upgrade') or {
// return error('Cannot update hero tools.\n${err}')
// }
// osal.done_set('update_herotools', 'OK')!
// }

View File

@@ -0,0 +1,21 @@
export PATH=${home_dir}/hero/bin:??PATH
export TERM=xterm
cd ${home_dir}/code/github/freeflowuniverse/herolib/cli/hero
PRF="${home_dir}/.profile"
[ -f "??PRF" ] && source "??PRF"
if [[ "??OSTYPE" == "linux-gnu"* ]]; then
#v -enable-globals -w -cflags -static -cc gcc hero.v
v -enable-globals -w -n hero.v
export HEROPATH='/usr/local/bin/hero'
elif [[ "??OSTYPE" == "darwin"* ]]; then
v -enable-globals -w -n hero.v
export HEROPATH=${home_dir}/hero/bin/hero
fi
chmod +x hero
cp hero ??HEROPATH
rm hero

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'golang'
classname:'GolangInstaller'
singleton:1
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:1

View File

@@ -0,0 +1,106 @@
module golang
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.installers.ulist
import os
// checks if a certain version or above is installed
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} go version')
if res.exit_code == 0 {
r := res.output.split_into_lines()
.filter(it.contains('go version'))
if r.len != 1 {
return error("couldn't parse go version, expected 'go version' on 1 row.\n${res.output}")
}
mut vstring := r[0] or { panic('bug') }
vstring = vstring.all_after_first('version').all_after_first('go').all_before(' ').trim_space()
v := texttools.version(vstring)
if v == texttools.version(version) {
return true
}
}
return false
}
fn install() ! {
console.print_header('install golang')
base.install()!
destroy()!
mut url := ''
if osal.is_linux_arm() {
url = 'https://go.dev/dl/go${version}.limux-arm64.tar.gz'
} else if osal.is_linux_intel() {
url = 'https://go.dev/dl/go${version}.linux-amd64.tar.gz'
} else if osal.is_osx_arm() {
url = 'https://go.dev/dl/go${version}.darwin-arm64.tar.gz'
} else if osal.is_osx_intel() {
url = 'https://go.dev/dl/go${version}.darwin-amd64.tar.gz'
} else {
return error('unsupported platform')
}
expand_dir := '/tmp/golang'
// the downloader is cool, it will check the download succeeds and also check the minimum size
_ = osal.download(
url: url
minsize_kb: 40000
expand_dir: expand_dir
)!
go_dest := '${osal.usr_local_path()!}/go'
os.mv('${expand_dir}/go', go_dest)!
os.rmdir_all(expand_dir)!
osal.profile_path_add_remove(paths2add: '${go_dest}/bin')!
paths := osal.profile_paths_preferred()!
for p in paths {
res := os.execute('source ${p}')
if res.exit_code != 0 {
return error(res.output)
}
}
}
fn build() ! {
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
fn destroy() ! {
console.print_debug('golang destroy')
osal.package_remove('golang')!
// will remove all paths where go/bin is found
osal.profile_path_add_remove(paths2delete: 'go/bin')!
osal.rm('
#next will find go as a binary and remove, is like cmd delete
go
/usr/local/go
/root/hero/bin/go
~/.go
~/go
')!
}
pub fn install_reset() ! {
mut installer := get()!
// will automatically do a destroy if the version changes, to make sure there are no left overs
installer.install()!
}

View File

@@ -0,0 +1,79 @@
module golang
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
golang_global map[string]&GolangInstaller
golang_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&GolangInstaller {
return &GolangInstaller{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self GolangInstaller) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self GolangInstaller) build() ! {
switch(self.name)
build()!
}
pub fn (mut self GolangInstaller) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for golang
pub fn switch(name string) {
golang_default = name
}

View File

@@ -0,0 +1,25 @@
module golang
import freeflowuniverse.herolib.data.paramsparser
import os
pub const version = '1.23.1'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct GolangInstaller {
pub mut:
name string = 'default'
}
fn obj_init(obj_ GolangInstaller) !GolangInstaller {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}

View File

@@ -0,0 +1,18 @@
# golang
To get started
```vlang
import freeflowuniverse.herolib.installers.lang.golang
mut installer:= golang.get()!
//will automatically do a destroy if the version changes, to make sure there are no left overs
installer.install()!
```

View File

@@ -0,0 +1,22 @@
module nodejs
import freeflowuniverse.herolib.osal
// import freeflowuniverse.herolib.ui.console
// import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.installers.base
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
_ := args_
pl := osal.platform()
if pl == .arch {
osal.package_install('npm')!
} else {
return error('only support arch for now')
}
}

View File

@@ -0,0 +1,33 @@
module python
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
pub fn install() ! {
if !osal.done_exists('install_python')
&& (!osal.cmd_exists('python') && !osal.cmd_exists('python3')) {
base.install()!
console.print_header('package install python')
osal.package_install('python3')!
pl := osal.platform()
if pl == .arch {
osal.package_install('python-pipx,python-pip,sqlite')!
} else if pl == .ubuntu {
osal.package_install('python-pipx,python-pip,sqlite')!
} else {
return error('only support arch & ubuntu.')
}
}
// console.print_header('python already done')
}
pub fn check() ! {
// todo: do a monitoring check to see if it works
// cmd := '
// '
// r := osal.execute_silent(cmd)!
// console.print_debug(r)
}

View File

@@ -0,0 +1,58 @@
module rust
import os
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.installers.base
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '1.78.0'
res := os.execute('rustc -V')
if res.exit_code == 0 {
r := res.output.split_into_lines()
.filter(it.contains('rustc'))
if r.len != 1 {
return error("couldn't parse rust version, expected 'rustc 1.' on 1 row.\n${res.output}")
}
mut vstring := r[0] or { panic('bug') }
vstring = vstring.all_after_first(' ').all_before('(').trim_space()
if texttools.version(version) > texttools.version(vstring) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
return
}
base.install()!
pl := osal.platform()
console.print_header('start install rust')
if pl == .ubuntu {
osal.package_install('build-essential,openssl,pkg-config,libssl-dev,gcc')!
}
if pl == .arch {
osal.package_install('rust, cargo, pkg-config, openssl')!
return
} else {
osal.execute_stdout("curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y")!
}
osal.profile_path_add_remove(paths2add: '${os.home_dir()}/.cargo/bin')!
return
}

View File

@@ -0,0 +1 @@
https://github.com/v-analyzer/v-analyzer

View File

@@ -0,0 +1,97 @@
module vlang
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import os
// import freeflowuniverse.herolib.sysadmin.downloader
pub fn v_analyzer_install(args_ InstallArgs) ! {
mut args := args_
console.print_header('install v-analyzer (reset: ${args.reset})')
version := '0.0.4'
_ := osal.platform()
res := os.execute('${osal.profile_path_source_and()} v-analyzer version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('v-analyzer'))
if r.len != 1 {
return error("couldn't parse v-analyzer version.\n${res.output}")
}
mut myversion := r[0].all_after_first('version').trim_space()
if texttools.version(version) > texttools.version(myversion) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
console.print_debug('v-analyzer already installed')
return
}
install()!
if args.reset {
console.print_header('install v-analyzer')
cmd := '
export TERM=xterm
mkdir -p ${os.home_dir()}/_code
cd ${os.home_dir()}/_code
rm -rf ${os.home_dir()}/_code/v-analyzer
git clone --filter=blob:none --recursive --shallow-submodules https://github.com/vlang/v-analyzer
cd v-analyzer
v build.vsh debug
'
osal.execute_stdout(cmd) or { return error('Cannot install hero.\n${err}') }
osal.cmd_add(
cmdname: 'v-analyzer'
source: '${os.home_dir()}/_code/v-analyzer/bin/v-analyzer'
)!
}
// if pl == .ubuntu {
// }else{
// mut url := ''
// if osal.is_linux_intel() {
// url = 'https://github.com/vlang/v-analyzer/releases/download/nightly/v-analyzer-linux-x86_64.zip'
// } else if osal.is_osx_arm() {
// url = 'https://github.com/vlang/v-analyzer/releases/download/nightly/v-analyzer-darwin-arm64.zip'
// } else if osal.is_osx_intel() {
// url = 'https://github.com/vlang/v-analyzer/releases/download/nightly/v-analyzer-darwin-x86_64.zip'
// } else {
// return error('unsported platform for installing v-analyzer')
// }
// mut dest := osal.download(
// url: url
// minsize_kb: 1000
// expand_dir: '/tmp/v-analyzer'
// )!
// mut binpath := dest.file_get('v-analyzer')!
// osal.cmd_add(
// cmdname: 'v-analyzer'
// source: binpath.path
// )!
// }
// if args.reset == false && osal.done_exists('install_v_analyzer') {
// console.print_debug(' v analyzer already installed')
// return
// }
// console.print_header('install v analyzer')
// cmd := '
// cd /tmp
// export TERM=xterm
// source ~/.profile
// rm -f install.sh
// curl -fksSL https://raw.githubusercontent.com/v-lang/v-analyzer/master/install.vsh > install.vsh
// v run install.vsh --no-interaction
// '
// osal.execute_stdout(cmd) or { return error('Cannot install hero.\n${err}') }
osal.done_set('install_v_analyzer', 'OK')!
return
}

View File

@@ -0,0 +1,72 @@
module vlang
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.ui.console
import os
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.develop.gittools
// import freeflowuniverse.herolib.sysadmin.downloader
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '0.4.8'
console.print_header('install vlang (reset: ${args.reset})')
res := os.execute('${osal.profile_path_source_and()} v --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('V'))
if r.len != 1 {
return error("couldn't parse v version.\n${res.output}")
}
myversion := r[0].all_after_first('V ').all_before(' ').trim_space()
console.print_debug("V version: '${myversion}'")
if texttools.version(version) > texttools.version(myversion) {
// println(texttools.version(version))
// println(texttools.version(myversion))
// if true{panic("s")}
args.reset = true
}
} else {
args.reset = true
}
// install vlang if it was already done will return true
if args.reset == false {
return
}
base.develop()!
mut gs := gittools.get(coderoot: '${os.home_dir()}/_code')!
mut repo := gs.get_repo(
pull: true
reset: true
url: 'https://github.com/vlang/v/tree/master'
)!
mut path1 := repo.get_path()!
mut extra := ''
if osal.is_linux() {
extra = './v symlink'
} else {
extra = 'cp v ${os.home_dir()}/bin/'
}
cmd := '
cd ${path1}
make
${extra}
'
console.print_header('compile')
osal.exec(cmd: cmd, stdout: true)!
console.print_header('compile done')
osal.done_set('install_vlang', 'OK')!
return
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}

View File

@@ -0,0 +1,233 @@
module mycelium
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.osal.screen
import freeflowuniverse.herolib.ui
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
import json
@[params]
pub struct InstallArgs {
pub mut:
reset bool
restart bool = true
}
// install mycelium will return true if it was already installed
pub fn install(args_ InstallArgs) ! {
mut args := args_
console.print_header('install mycelium.')
version := '0.5.6'
res := os.execute('${osal.profile_path_source_and()} mycelium -V')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('mycelium'))
if r.len != 1 {
return error("couldn't parse mycelium version.\n${res.output}")
}
if texttools.version(version) > texttools.version(r[0].all_after_first('mycelium')) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install mycelium')
mut url := ''
if osal.is_linux_arm() {
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-unknown-linux-musl.tar.gz'
} else if osal.is_linux_intel() {
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-unknown-linux-musl.tar.gz'
} else if osal.is_osx_arm() {
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-apple-darwin.tar.gz'
} else if osal.is_osx_intel() {
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-apple-darwin.tar.gz'
} else {
return error('unsported platform')
}
// console.print_debug(url)
mut dest := osal.download(
url: url
minsize_kb: 1000
reset: true
expand_dir: '/tmp/myceliumnet'
)!
mut myceliumfile := dest.file_get('mycelium')! // file in the dest
// console.print_debug(myceliumfile.str())
osal.cmd_add(
source: myceliumfile.path
)!
}
if args.restart {
stop()!
}
start()!
console.print_debug('install mycelium ok')
}
pub fn restart() ! {
stop()!
start()!
}
pub fn stop() ! {
name := 'mycelium'
console.print_debug('stop ${name}')
if osal.is_osx() {
mut scr := screen.new(reset: false)!
scr.kill(name)!
} else {
mut sm := startupmanager.get()!
sm.stop(name)!
}
}
pub fn start(args InstallArgs) ! {
if check() {
console.print_header('mycelium was already running')
return
}
myinitname := osal.initname()!
name := 'mycelium'
console.print_debug('start ${name} (startupmanger:${myinitname})')
mut cmd := ''
if osal.is_osx() {
cmd = 'sudo -s '
}
cmd += 'mycelium --key-file ${osal.hero_path()!}/cfg/priv_key.bin --peers tcp://188.40.132.242:9651 quic://185.69.166.7:9651 tcp://65.21.231.58:9651 --tun-name utun9'
console.print_debug(cmd)
if osal.is_osx() {
// do not change, because we need this on osx at least
mut scr := screen.new(reset: false)!
if scr.exists(name) {
console.print_header('mycelium was already running')
return
}
mut s := scr.add(name: name, start: true, reset: args.reset)!
s.cmd_send(cmd)!
mut myui := ui.new()!
console.clear()
console.print_stderr("
On the next screen you will be able to fill in your password.
Once done and the server is started: do 'control a + d'
")
_ = myui.ask_yesno(question: 'Please confirm you understand?')!
s.attach()! // to allow filling in passwd
} else {
mut sm := startupmanager.get()!
sm.new(
name: name
cmd: cmd
start: true
)!
}
console.print_debug('startup manager started')
time.sleep(100 * time.millisecond)
if !check() {
return error('cound not start mycelium')
}
console.print_header('mycelium is running')
}
pub fn check() bool {
// if osal.is_osx() {
// mut scr := screen.new(reset: false) or {return False}
// name := 'mycelium'
// if !scr.exists(name) {
// return false
// }
// }
// if !(osal.process_exists_byname('mycelium') or {return False}) {
// return false
// }
// TODO: might be dangerous if that one goes out
ping_result := osal.ping(address: '40a:152c:b85b:9646:5b71:d03a:eb27:2462', retry: 2) or {
return false
}
if ping_result == .ok {
console.print_debug('could reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
return true
}
console.print_stderr('could not reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
return false
}
// install mycelium will return true if it was already installed
pub fn build() ! {
rust.install()!
console.print_header('build mycelium')
if !osal.done_exists('build_mycelium') && !osal.cmd_exists('mycelium') {
panic('implement')
// USE OUR PRIMITIVES (TODO, needs to change, was from zola)
cmd := '
source ~/.cargo/env
cd /tmp
rm -rf mycelium
git clone https://github.com/getmycelium/mycelium.git
cd mycelium
cargo install --path . --locked
mycelium --version
cargo build --release --locked --no-default-features --features=native-tls
cp target/release/mycelium ~/.cargo/bin/mycelium
'
osal.execute_stdout(cmd)!
osal.done_set('build_mycelium', 'OK')!
console.print_header('mycelium installed')
} else {
console.print_header('mycelium already installed')
}
}
struct MyceliumInspectResult {
public_key string @[json: publicKey]
address string
}
pub fn inspect() !MyceliumInspectResult {
command := 'mycelium inspect --key-file /root/hero/cfg/priv_key.bin --json'
result := os.execute(command)
if result.exit_code != 0 {
return error('Command failed: ${result.output}')
}
inspect_result := json.decode(MyceliumInspectResult, result.output) or {
return error('Failed to parse JSON: ${err}')
}
return inspect_result
}
// if returns empty then probably mycelium is not installed
pub fn ipaddr() string {
r := inspect() or { MyceliumInspectResult{} }
return r.address
}

View File

@@ -0,0 +1,7 @@
see https://github.com/threefoldtech/mycelium/tree/master/docs
```bash
mycelium --peers tcp://188.40.132.242:9651 quic://185.69.166.7:9651 tcp://65.21.231.58:9651 --tun-name utun9
```

View File

@@ -0,0 +1,149 @@
module yggdrasil
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.lang.golang
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.osal.screen
import freeflowuniverse.herolib.ui
import freeflowuniverse.herolib.develop.gittools
import os
import time
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
// install yggdrasil will return true if it was already installed
pub fn install(args_ InstallArgs) ! {
peers := '
Peers:
[
tcp://45.138.172.192:5001
tcp://94.130.203.208:5999
tcp://185.69.166.140:9943
tcp://185.69.166.141:9943
tcp://185.69.167.141:9943
tcp://185.69.167.142:9943
tcp://185.206.122.31:9943
tcp://185.206.122.32:9943
tcp://185.206.122.131:9943
tcp://185.206.122.132:9943
tls://140.238.168.104:17121
tls://s2.i2pd.xyz:39575
tcp://62.210.85.80:39565
]
'
config_path := '/etc/yggdrasil.conf'
mut args := args_
res := os.execute('${osal.profile_path_source_and()} yggdrasil -version')
if res.exit_code != 0 {
args.reset = true
}
if args.reset {
golang.install()!
console.print_header('install yggdrasil')
mut gs := gittools.get(coderoot: '${os.home_dir()}/_code')!
mut repo := gs.get_repo(
url: 'https://github.com/yggdrasil-network/yggdrasil-go.git'
reset: false
)!
mut path := repo.get_path()!
osal.exec(cmd: 'cd ${path} && PATH=\$PATH:/usr/local/go/bin ./build')!
osal.cmd_add(
source: '${path}/yggdrasil'
)!
osal.cmd_add(
source: '${path}/yggdrasilctl'
)!
osal.cmd_add(
source: '${path}/contrib/docker/entrypoint.sh'
)!
if !os.exists(config_path) {
osal.exec(cmd: 'yggdrasil -genconf > /etc/yggdrasil.conf')!
config := os.read_file(config_path)!
config.replace('Peers: []', peers)
}
}
}
pub fn restart() ! {
name := 'yggdrasil'
mut scr := screen.new(reset: false)!
scr.kill(name)!
start()!
}
pub fn start() ! {
// console.print_debug("start")
mut scr := screen.new(reset: false)!
name := 'yggdrasil'
if scr.exists(name) {
console.print_header('yggdrasil was already running')
return
}
mut s := scr.add(name: name, start: true)!
mut cmd2 := ''
if osal.is_osx() {
cmd2 = 'sudo -s '
}
cmd2 += 'yggdrasil --useconf < "/etc/yggdrasil.conf"'
s.cmd_send(cmd2)!
console.print_debug(s)
console.print_debug('send done')
if osal.is_osx() {
mut myui := ui.new()!
console.clear()
console.print_stderr("
On the next screen you will be able to fill in your password.
Once done and the server is started: do 'control a + control d'
")
_ = myui.ask_yesno(question: 'Please confirm you understand?')!
s.attach()! // to allow filling in passwd
}
console.print_header('yggdrasil is running')
time.sleep(100 * time.millisecond)
if !running()! {
return error('cound not start yggdrasil')
}
}
pub fn running() !bool {
mut scr := screen.new(reset: false)!
name := 'yggdrasil'
if !scr.exists(name) {
return false
}
if !(osal.process_exists_byname('yggdrasil')!) {
return false
}
return true
}

View File

@@ -0,0 +1,65 @@
module actrunner
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '0.2.10'
res := os.execute('${osal.profile_path_source_and()} actrunner -v')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.contains('act_runner version'))
if r.len != 1 {
return error("couldn't parse actrunner version, expected 'actrunner 0' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('act_runner version'))
if v < texttools.version(version) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
return
}
console.print_header('install actrunner')
mut url := ''
if osal.is_linux_arm() {
url = 'https://dl.gitea.com/act_runner/${version}/act_runner-${version}-linux-arm64'
} else if osal.is_linux_intel() {
url = 'https://dl.gitea.com/act_runner/${version}/act_runner-${version}-linux-amd64'
} else if osal.is_osx_arm() {
url = 'https://dl.gitea.com/act_runner/${version}/act_runner-${version}-darwin-arm64'
} else if osal.is_osx_intel() {
url = 'https://dl.gitea.com/act_runner/${version}/act_runner-${version}-darwin-amd64'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 15000
)!
// console.print_debug(dest)
osal.cmd_add(
cmdname: 'actrunner'
source: dest.path
)!
return
}

View File

@@ -0,0 +1,23 @@
## OSX
to use with herocontainers
- https://herocontainers-desktop.io/docs/migrating-from-docker/using-the-docker_host-environment-variable
could export the path
```bash
export DOCKER_HOST=unix:///Users/despiegk1/.local/share/containers/herocontainers/machine/qemu/herocontainers.sock
#had to install docker to get some docker tools, but was not running it
brew install --cask docker
cd /tmp
git clone git@git.ourworld.tf:despiegk/test.git
cd test
actrunner exec
```
will run the runner locally

View File

@@ -0,0 +1,89 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 4
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
# env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `deamon`, will use labels in `.runner` file.
labels: []
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -0,0 +1,30 @@
module b2
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.lang.python
// import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
if args.reset == false && osal.done_exists('install_b2') {
return
}
console.print_header('install b2')
mut py := python.new(name: 'default')! // a python env with name test
py.update()!
py.pip('b2')!
osal.done_set('install_b2', 'OK')!
return
}

View File

@@ -0,0 +1,56 @@
module fungistor
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '2.0.6'
res := os.execute('rfs --version')
if res.exit_code == 0 {
r := res.output.trim_space().split(' ')
if r.len != 2 {
return error("couldn't parse rfs version.\n${res.output}")
}
if texttools.version(version) > texttools.version(r[1]) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
return
}
console.print_header('install rfs')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/threefoldtech/rfs/releases/download/v${version}/rfs'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 9000
dest: '/tmp/rfs'
reset: true
)!
osal.cmd_add(
cmdname: 'rfs'
source: '${dest.path}'
)!
}

View File

@@ -0,0 +1,121 @@
module garage_s3
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.crypt.secrets
// import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.clients.httpconnection
import os
import time
@[params]
pub struct S3Config {
pub mut:
replication_mode string = '3'
metadata_dir string = '/var/garage/meta'
data_dir string = '/var/garage/data'
sled_cache_capacity u32 = 128 // in MB
compression_level u8 = 1
rpc_secret string //{GARAGE_RPCSECRET}
rpc_bind_addr string = '[::]:3901'
rpc_bind_outgoing bool
rpc_public_addr string = '127.0.0.1:3901'
bootstrap_peers []string
api_bind_addr string = '[::]:3900'
s3_region string = 'garage'
root_domain string = '.s3.garage'
web_bind_addr string = '[::]:3902'
web_root_domain string = '.web.garage'
admin_api_bind_addr string = '[::]:3903'
admin_metrics_token string //{GARAGE_METRICSTOKEN}
admin_token string //{GARAGE_ADMINTOKEN}
admin_trace_sink string = 'http://localhost:4317'
reset bool
config_reset bool
start bool = true
restart bool = true
}
pub fn configure(args_ S3Config) !S3Config {
mut args := args_
if args.rpc_secret == '' {
args.rpc_secret = secrets.openssl_hex_secret()!
println('export GARAGE_RPCSECRET=${args.rpc_secret}')
}
if args.admin_metrics_token == '' {
args.admin_metrics_token = secrets.openssl_base64_secret()!
println('export GARAGE_METRICSTOKEN=${args.admin_metrics_token}')
}
if args.admin_token == '' {
args.admin_token = secrets.openssl_base64_secret()!
println('export GARAGE_ADMINTOKEN=${args.admin_token}')
}
mut config_file := $tmpl('templates/garage.toml')
myconfigpath_ := '/etc/garage.toml'
mut myconfigpath := pathlib.get_file(path: myconfigpath_, create: true)!
myconfigpath.write(config_file)!
console.print_header('garage start')
return args
}
pub fn start(args_ S3Config) !S3Config {
mut args := args_
myconfigpath_ := '/etc/garage.toml'
if args.config_reset || !os.exists(myconfigpath_) {
args = configure(args)!
}
if args.restart {
stop()!
}
mut sm := startupmanager.get()!
sm.new(
name: 'garage'
cmd: 'garage -c ${myconfigpath_} server'
start: true
)!
console.print_debug('garage -c ${myconfigpath_} server')
for _ in 0 .. 50 {
if check(args)! {
return args
}
time.sleep(100 * time.millisecond)
}
return error('garage server did not start properly.')
}
pub fn stop() ! {
console.print_header('garage stop')
mut sm := startupmanager.get()!
sm.stop('garage')!
}
fn check(args S3Config) !bool {
_ := 'garage status'
res := os.execute('garage status')
if res.exit_code == 0 {
return true
}
return false
}

View File

@@ -0,0 +1,55 @@
module garage_s3
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import os
pub fn install(args_ S3Config) ! {
mut args := args_
version := '1.0.0'
res := os.execute('garage --version')
if res.exit_code == 0 {
r := res.output.split(' ')
if r.len < 2 {
return error("couldn't parse garage version, expected 'garage v*'.\n${res.output}")
}
v := r[1]
if texttools.version(v) < texttools.version(version) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install garage')
mut url := ''
if osal.is_linux_arm() {
url = 'https://garagehq.deuxfleurs.fr/_releases/v${version}/aarch64-unknown-linux-musl/garage'
} else if osal.is_linux_intel() {
url = 'https://garagehq.deuxfleurs.fr/_releases/v${version}/x86_64-unknown-linux-musl/garage'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 15 * 1024
dest: '/tmp/garage'
reset: true
)!
console.print_debug('download garage done')
osal.cmd_add(
cmdname: 'garage'
source: '${dest.path}'
)!
}
if args.start {
start(args)!
}
}

View File

@@ -0,0 +1,9 @@
## garage S3 server
see [https://garagehq.deuxfleurs.fr](https://garagehq.deuxfleurs.fr)
https://garagehq.deuxfleurs.fr/documentation/quick-start/

View File

@@ -0,0 +1,59 @@
replication_mode = "${args.replication_mode}"
metadata_dir = "${args.metadata_dir}"
data_dir = "${args.data_dir}"
metadata_fsync = false
data_fsync = false
db_engine = "sqlite"
block_size = "1M"
sled_cache_capacity = "${args.sled_cache_capacity}MiB"
sled_flush_every_ms = 2000
lmdb_map_size = "1T"
compression_level = ${args.compression_level}
rpc_secret = "${args.rpc_secret}"
rpc_bind_addr = "${args.rpc_bind_addr}"
rpc_bind_outgoing = ${args.rpc_bind_outgoing}
rpc_public_addr = "${args.rpc_public_addr}"
bootstrap_peers = ${args.bootstrap_peers}
# [consul_discovery]
# api = "catalog"
# consul_http_addr = "http://127.0.0.1:8500"
# service_name = "garage-daemon"
# ca_cert = "/etc/consul/consul-ca.crt"
# client_cert = "/etc/consul/consul-client.crt"
# client_key = "/etc/consul/consul-key.crt"
# # for `agent` API mode, unset client_cert and client_key, and optionally enable `token`
# # token = "abcdef-01234-56789"
# tls_skip_verify = false
# tags = [ "dns-enabled" ]
# meta = { dns-acl = "allow trusted" }
# [kubernetes_discovery]
# namespace = "garage"
# service_name = "garage-daemon"
# skip_crd = false
[s3_api]
api_bind_addr = "${args.api_bind_addr}"
s3_region = "${args.s3_region}"
root_domain = "${args.root_domain}"
[s3_web]
bind_addr = "${args.web_bind_addr}"
root_domain = "${args.web_root_domain}"
[admin]
api_bind_addr = "${args.admin_api_bind_addr}"
metrics_token = "${args.admin_metrics_token}"
admin_token = "${args.admin_token}"
trace_sink = "${args.admin_trace_sink}"

View File

@@ -0,0 +1,200 @@
module grafana
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
@[params]
pub struct InstallArgs {
pub mut:
// homedir string
// configpath string
// username string = "admin"
// password string @[secret]
// secret string @[secret]
// title string = 'My Hero DAG'
reset bool
start bool = true
stop bool
restart bool
uninstall bool
// host string = 'localhost' // server host (default is localhost)
// port int = 8888
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '11.1.4'
res := os.execute('${osal.profile_path_source_and()} grafana --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('grafana'))
if r.len != 1 {
args.reset = true
}
version2 := r[0].split('version')[1]
if texttools.version(version) > texttools.version(version2) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install grafana')
mut url := ''
if osal.is_linux_intel() {
url = 'https://dl.grafana.com/oss/release/grafana-${version}.linux-amd64.tar.gz'
} else {
return error('unsuported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 15000
expand_dir: '/tmp/grafana'
)!
mut mypath := pathlib.get_dir(path: '/tmp/grafana/grafana-v${version}')!
mypath.copy(dest: '/root/hero/grafana', delete: false, rsync: true)!
osal.profile_path_add_remove(paths2add: '/root/hero/grafana/bin')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/grafana'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/grafana.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('grafana start')
// //println(args)
// configure(args)!
// cmd := 'grafana server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.grafana/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('grafana start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'grafana'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'grafana scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds grafana install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('grafana did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for grafana")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of grafana
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'grafana', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to grafana')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('grafana')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,185 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
pub fn install_alertmanager(args_ InstallArgs) ! {
mut args := args_
version := '0.27.0'
res := os.execute('${osal.profile_path_source_and()} alertmanager --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('alertmanager'))
if r.len != 1 {
args.reset = true
}
version2 := r[0].split('version')[1].split('(')[0]
if texttools.version(version) > texttools.version(version2) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install alertmanager')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/prometheus/alertmanager/releases/download/v${version}/alertmanager-${version}.linux-amd64.tar.gz'
} else {
return error('unsported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 28000
expand_dir: '/tmp/prometheus'
)!
mut dest2 := pathlib.get_dir(path: '/tmp/prometheus/alertmanager-${version}.linux-amd64')!
for abin in ['alertmanager', 'amtool'] {
mut binpath := dest2.file_get(abin)!
binpath.copy(dest: '/root/hero/prometheus/${abin}', delete: true, rsync: false)!
}
osal.profile_path_add_remove(paths2add: '/root/hero/prometheus')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/prometheus'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/prometheus.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('prometheus start')
// //println(args)
// configure(args)!
// cmd := 'prometheus server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.prometheus/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('prometheus start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'prometheus'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'prometheus scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds prometheus install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('prometheus did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for prometheus")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of prometheus
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'prometheus', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to prometheus')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('prometheus')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,186 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
pub fn install_blackbox_exporter(args_ InstallArgs) ! {
mut args := args_
version := '0.25.0'
res := os.execute('${osal.profile_path_source_and()} blackbox_exporter --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('blackbox_exporter'))
if r.len != 1 {
args.reset = true
}
version2 := r[0].split('version')[1].split('(')[0]
if texttools.version(version) > texttools.version(version2) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install blackbox_exporter')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/prometheus/blackbox_exporter/releases/download/v${version}/blackbox_exporter-${version}.linux-amd64.tar.gz'
} else {
return error('unsuported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 9000
expand_dir: '/tmp/prometheus'
)!
mut dest2 := pathlib.get_dir(
path: '/tmp/prometheus/blackbox_exporter-${version}.linux-amd64'
)!
for abin in ['blackbox_exporter'] {
mut binpath := dest2.file_get(abin)!
binpath.copy(dest: '/root/hero/prometheus/${abin}', delete: true, rsync: false)!
}
osal.profile_path_add_remove(paths2add: '/root/hero/prometheus')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/prometheus'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/prometheus.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('prometheus start')
// //println(args)
// configure(args)!
// cmd := 'prometheus server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.prometheus/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('prometheus start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'prometheus'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'prometheus scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds prometheus install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('prometheus did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for prometheus")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of prometheus
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'prometheus', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to prometheus')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('prometheus')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,184 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
pub fn install_node_exporter(args_ InstallArgs) ! {
mut args := args_
version := '1.8.2'
res := os.execute('${osal.profile_path_source_and()} node_exporter --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('node_exporter'))
if r.len != 1 {
args.reset = true
}
version2 := r[0].split('version')[1].split('(')[0]
if texttools.version(version) > texttools.version(version2) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install node_exporter')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/prometheus/node_exporter/releases/download/v${version}/node_exporter-${version}.linux-amd64.tar.gz'
} else {
return error('unsuported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 9000
expand_dir: '/tmp/prometheus'
)!
mut dest2 := pathlib.get_dir(path: '/tmp/prometheus/node_exporter-${version}.linux-amd64')!
for abin in ['node_exporter'] {
mut binpath := dest2.file_get(abin)!
binpath.copy(dest: '/root/hero/prometheus/${abin}', delete: true, rsync: false)!
}
osal.profile_path_add_remove(paths2add: '/root/hero/prometheus')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/prometheus'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/prometheus.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('prometheus start')
// //println(args)
// configure(args)!
// cmd := 'prometheus server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.prometheus/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('prometheus start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'prometheus'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'prometheus scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds prometheus install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('prometheus did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for prometheus")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of prometheus
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'prometheus', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to prometheus')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('prometheus')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,175 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
pub fn install_prom2json(args_ InstallArgs) ! {
mut args := args_
version := '1.4.0'
res := os.execute('${osal.profile_path_source_and()} prom2json --help')
if res.exit_code != 0 {
args.reset = true
}
if args.reset {
console.print_header('install prom2json')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/prometheus/prom2json/releases/download/v${version}/prom2json-${version}.linux-amd64.tar.gz'
} else {
return error('unsuported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 3000
expand_dir: '/tmp/prometheus'
)!
mut dest2 := pathlib.get_dir(path: '/tmp/prometheus/prom2json-${version}.linux-amd64')!
for abin in ['prom2json'] {
mut binpath := dest2.file_get(abin)!
binpath.copy(dest: '/root/hero/prometheus/${abin}', delete: true, rsync: false)!
}
osal.profile_path_add_remove(paths2add: '/root/hero/prometheus')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/prometheus'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/prometheus.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('prometheus start')
// //println(args)
// configure(args)!
// cmd := 'prometheus server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.prometheus/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('prometheus start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'prometheus'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'prometheus scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds prometheus install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('prometheus did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for prometheus")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of prometheus
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'prometheus', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to prometheus')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('prometheus')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,190 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
pub fn install_prometheus(args_ InstallArgs) ! {
mut args := args_
version := '2.54.0'
res := os.execute('${osal.profile_path_source_and()} prometheus --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().starts_with('prometheus'))
if r.len != 1 {
args.reset = true
}
version2 := r[0].split('version')[1].split('(')[0]
if texttools.version(version) > texttools.version(version2) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install prometheus')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/prometheus/prometheus/releases/download/v${version}/prometheus-${version}.linux-amd64.tar.gz'
} else {
return error('unsported platform, only linux amd64 for now')
}
_ := osal.download(
url: url
minsize_kb: 100000
expand_dir: '/tmp/prometheus'
)!
mut dest2 := pathlib.get_dir(path: '/tmp/prometheus/prometheus-${version}.linux-amd64')!
for abin in ['prometheus', 'promtool'] {
mut binpath := dest2.file_get(abin)!
binpath.copy(dest: '/root/hero/prometheus/${abin}', delete: true, rsync: false)!
}
for adir in ['console_libraries', 'consoles'] {
mut binpath := dest2.dir_get(adir)!
binpath.copy(dest: '/root/hero/prometheus/${adir}', delete: true, rsync: false)!
}
osal.profile_path_add_remove(paths2add: '/root/hero/prometheus')!
}
// if args.restart {
// restart(args)!
// return
// }
// if args.start {
// start(args)!
// return
// }
// if args.stop {
// stop()!
// }
}
// pub fn start(args_ InstallArgs) ! {
// mut args := args_
// if args.title == '' {
// args.title = 'HERO DAG'
// }
// if args.homedir == '' {
// args.homedir = '${os.home_dir()}/hero/var/prometheus'
// }
// if args.configpath == '' {
// args.configpath = '${os.home_dir()}/hero/cfg/prometheus.yaml'
// }
// if check(args)! {
// return
// }
// console.print_header('prometheus start')
// //println(args)
// configure(args)!
// cmd := 'prometheus server --host 0.0.0.0 --config ${args.configpath}'
// // TODO: we are not taking host & port into consideration
// // dags string // location of DAG files (default is /Users/<user>/.prometheus/dags)
// // host string // server host (default is localhost)
// // port string // server port (default is 8080)
// // result := os.execute_opt('prometheus start-all ${flags}')!
// mut sm := startupmanager.get()!
// sm.start(
// name: 'prometheus'
// cmd: cmd
// env: {
// 'HOME': '/root'
// }
// )!
// //cmd2 := 'prometheus scheduler' // TODO: do we need this
// console.print_debug(cmd)
// // if true{
// // panic("sdsdsds prometheus install")
// // }
// // time.sleep(100000000000)
// for _ in 0 .. 50 {
// if check(args)! {
// return
// }
// time.sleep(100 * time.millisecond)
// }
// return error('prometheus did not install propertly, could not call api.')
// }
// pub fn configure(args_ InstallArgs) ! {
// mut cfg := args_
// if cfg.password == "" || cfg.secret == ""{
// return error("password and secret needs to be filled in for prometheus")
// }
// mut mycode := $tmpl('templates/admin.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// }
// pub fn check(args InstallArgs) !bool {
// // this checks health of prometheus
// // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// mut conn := httpconnection.new(name: 'prometheus', url: 'http://127.0.0.1:${args.port}/api/v1/')!
// // console.print_debug("curl http://localhost:3333/api/v1/dags --oauth2-bearer ${secret}")
// if args.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${args.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug('check connection to prometheus')
// r0 := conn.get(prefix: 'dags') or { return false }
// // if it gets here then is empty but server answers, the below might not work if no dags loaded
// // println(r0)
// // if true{panic("ssss")}
// // r := conn.get_json_dict(prefix: 'dags', debug: false) or {return false}
// // println(r)
// // dags := r['DAGs'] or { return false }
// // // console.print_debug(dags)
// console.print_debug('Dagu is answering.')
// return true
// }
// pub fn stop() ! {
// console.print_header('Dagu Stop')
// mut sm := startupmanager.get()!
// sm.stop('prometheus')!
// }
// pub fn restart(args InstallArgs) ! {
// stop()!
// start(args)!
// }
// pub fn installargs(args InstallArgs) InstallArgs {
// return args
// }

View File

@@ -0,0 +1,36 @@
module prometheus
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
import time
@[params]
pub struct InstallArgs {
pub mut:
// homedir string
// configpath string
// username string = "admin"
// password string @[secret]
// secret string @[secret]
// title string = 'My Hero DAG'
reset bool
start bool = true
stop bool
restart bool
uninstall bool
// host string = 'localhost' // server host (default is localhost)
// port int = 8888
}
pub fn install(args_ InstallArgs) ! {
install_prometheus(args_)!
install_alertmanager(args_)!
install_node_exporter(args_)!
install_blackbox_exporter(args_)!
install_prom2json(args_)!
}

View File

@@ -0,0 +1,8 @@
# Prometheus monitoring system
```bash
```

View File

@@ -0,0 +1,16 @@
route:
group_by: ['alertname']
group_wait: 30s
group_interval: 5m
repeat_interval: 1h
receiver: 'web.hook'
receivers:
- name: 'web.hook'
webhook_configs:
- url: 'http://127.0.0.1:5001/'
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']

View File

@@ -0,0 +1,51 @@
modules:
http_2xx:
prober: http
http:
preferred_ip_protocol: "ip4"
http_post_2xx:
prober: http
http:
method: POST
tcp_connect:
prober: tcp
pop3s_banner:
prober: tcp
tcp:
query_response:
- expect: "^+OK"
tls: true
tls_config:
insecure_skip_verify: false
grpc:
prober: grpc
grpc:
tls: true
preferred_ip_protocol: "ip4"
grpc_plain:
prober: grpc
grpc:
tls: false
service: "service1"
ssh_banner:
prober: tcp
tcp:
query_response:
- expect: "^SSH-2.0-"
- send: "SSH-2.0-blackbox-ssh-check"
irc_banner:
prober: tcp
tcp:
query_response:
- send: "NICK prober"
- send: "USER prober prober prober :prober"
- expect: "PING :([^ ]+)"
send: "PONG ${1}"
- expect: "^:[^ ]+ 001"
icmp:
prober: icmp
icmp_ttl5:
prober: icmp
timeout: 5s
icmp:
ttl: 5

View File

@@ -0,0 +1,29 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]

View File

@@ -0,0 +1,15 @@
!!hero_code.generate_installer
name:'rclone'
classname:'RClone'
singleton:0 //there can only be 1 object in the globals, is called 'default'
templates:1 //are there templates for the installer
default:0 //can we create a default when the factory is used
title:''
supported_platforms:'' //osx, ... (empty means all)
reset: 0 // regenerate all, dangerous !!!
startupmanager:0 //managed by a startup manager, default true
build:0 //will we also build the component

View File

@@ -0,0 +1,80 @@
module rclone
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.clients.httpconnection
import os
// checks if a certain version or above is installed
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} rclone version')
if res.exit_code != 0 {
return false
}
r := res.output.split_into_lines().filter(it.contains('rclone v'))
if r.len != 1 {
return error("couldn't parse rclone version, expected 'rclone 0' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('rclone'))
if texttools.version(version) > v {
return false
}
return true
}
fn install() ! {
console.print_header('install rclone')
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
mut url := ''
if osal.is_linux_arm() {
url = 'https://github.com/rclone/rclone/releases/download/v${version}/rclone-v${version}-linux-arm64.zip'
} else if osal.is_linux_intel() {
url = 'https://github.com/rclone/rclone/releases/download/v${version}/rclone-v${version}-linux-amd64.zip'
} else if osal.is_osx_arm() {
url = 'https://downloads.rclone.org/rclone-current-osx-amd64.zip'
} else if osal.is_osx_intel() {
url = 'https://github.com/rclone/rclone/releases/download/v${version}/rclone-v${version}-osx-amd64.zip'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 9000
expand_dir: '/tmp/rclone'
)!
// dest.moveup_single_subdir()!
mut binpath := dest.file_get('rclone')!
osal.cmd_add(
cmdname: 'rclone'
source: binpath.path
)!
}
fn configure() ! {
_ := get()!
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
_ := $tmpl('templates/rclone.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
// implement if steps need to be done for configuration
}
fn destroy() ! {
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}

View File

@@ -0,0 +1,173 @@
module rclone
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
rclone_global map[string]&RClone
rclone_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
mut args := args_
if args.name == '' {
args.name = rclone_default
}
if args.name == '' {
args.name = 'default'
}
return args
}
pub fn get(args_ ArgsGet) !&RClone {
mut args := args_get(args_)
if args.name !in rclone_global {
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
}
return rclone_global[args.name] or {
println(rclone_global)
panic('could not get config for rclone with name:${args.name}')
}
}
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('rclone', args.name)
}
fn config_load(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
mut heroscript := context.hero_config_get('rclone', args.name)!
play(heroscript: heroscript)!
}
fn config_save(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
context.hero_config_set('rclone', args.name, heroscript_default()!)!
}
fn set(o RClone) ! {
mut o2 := obj_init(o)!
rclone_global[o.name] = &o2
rclone_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
}
pub fn play(args_ PlayArgs) ! {
mut args := args_
if args.heroscript == '' {
args.heroscript = heroscript_default()!
}
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
mut install_actions := plbook.find(filter: 'rclone.configure')!
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
console.print_debug('install action rclone.configure\n${mycfg}')
set(mycfg)!
}
}
mut other_actions := plbook.find(filter: 'rclone.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action rclone.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action rclone.install')
install()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self RClone) reload() ! {
switch(self.name)
self = obj_init(self)!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self RClone) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self RClone) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for rclone
pub fn switch(name string) {
rclone_default = name
}

View File

@@ -0,0 +1,63 @@
module rclone
import freeflowuniverse.herolib.data.paramsparser
pub const version = '1.67.0'
const singleton = false
const default = false
pub fn heroscript_default() !string {
heroscript := "
!!rclone.configure
name: 'default'
cat: 'b2'
s3_account: ''
s3_key: ''
s3_secret: ''
hard_delete: false
endpoint: ''
"
return heroscript
}
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct RClone {
pub mut:
name string = 'default'
cat RCloneCat
s3_account string
s3_key string
s3_secret string
hard_delete bool // hard delete a file when delete on server, not just hide
endpoint string
}
pub enum RCloneCat {
b2
s3
ftp
}
fn cfg_play(p paramsparser.Params) !RClone {
mut mycfg := RClone{
name: p.get_default('name', 'default')!
cat: match p.get_default('cat', 'b2')! {
'b2' { RCloneCat.b2 }
's3' { RCloneCat.s3 }
'ftp' { RCloneCat.ftp }
else { return error('Invalid RCloneCat') }
}
s3_account: p.get_default('s3_account', '')!
s3_key: p.get_default('s3_key', '')!
s3_secret: p.get_default('s3_secret', '')!
hard_delete: p.get_default_false('hard_delete')
endpoint: p.get_default('endpoint', '')!
}
return mycfg
}
fn obj_init(obj_ RClone) !RClone {
mut obj := obj_
return obj
}

View File

@@ -0,0 +1,36 @@
# rclone
To get started
```vlang
import freeflowuniverse.herolib.installers.something. rclone
mut installer:= rclone.get()!
installer.start()!
```
## example heroscript
```hero
!!rclone.install
homedir: '/home/user/rclone'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -0,0 +1,51 @@
module restic
// import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.installers.lang.golang
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.ui.console
const url = 'https://github.com/restic/restic'
@[params]
pub struct BuildArgs {
pub mut:
reset bool
bin_push bool = true
}
// install restic will return true if it was already installed
pub fn build(args BuildArgs) ! {
// make sure we install base on the node
if osal.platform() != .ubuntu {
return error('only support ubuntu for now')
}
golang.install()!
// install restic if it was already done will return true
console.print_header('build restic')
mut gs := gittools.get(coderoot: '/tmp/builder')!
mut repo := gs.get_repo(
url: url
reset: true
pull: true
)!
mut gitpath := repo.get_path()!
cmd := '
source ~/.cargo/env
cd ${gitpath}
exit 1 #todo
'
osal.execute_stdout(cmd)!
// if args.bin_push {
// installers.bin_push(
// cmdname: 'restic'
// source: '/tmp/builder/github/threefoldtech/restic/target/x86_64-unknown-linux-musl/release/restic'
// )!
// }
}

View File

@@ -0,0 +1,64 @@
module restic
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '0.16.2'
res := os.execute('${osal.profile_path_source_and()} restic version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.contains('restic 0'))
if r.len != 1 {
return error("couldn't parse restic version, expected 'restic 0' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_before('compiled').all_after('restic'))
if v < texttools.version(version) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
return
}
console.print_header('install restic')
mut url := ''
if osal.is_linux() {
url = 'https://github.com/restic/restic/releases/download/v${version}/restic_${version}_linux_amd64.bz2'
} else if osal.is_osx_arm() {
url = 'https://github.com/restic/restic/releases/download/v${version}/restic_${version}_darwin_arm64.bz2'
} else if osal.is_osx_intel() {
url = 'https://github.com/restic/restic/releases/download/v${version}/restic_${version}_darwin_amd64.bz2'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 7000
expand_file: '/tmp/restic'
)!
// console.print_debug(dest)
osal.cmd_add(
cmdname: 'restic'
source: dest.path
)!
return
}

View File

@@ -0,0 +1,48 @@
module s3
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.ui.console
@[params]
pub struct BuildArgs {
pub mut:
reset bool
// bin_push bool = true
}
// install s3cas will return true if it was already installed
pub fn build(args BuildArgs) ! {
// make sure we install base on the node
if osal.platform() != .ubuntu {
return error('only support ubuntu for now')
}
rust.install()!
// install s3cas if it was already done will return true
console.print_header('build s3cas')
osal.package_install('libssl-dev,pkg-config')!
mut gs := gittools.get()!
mut repo := gs.get_repo(
url: 'https://github.com/leesmet/s3-cas'
reset: false
pull: true
)!
mut path := repo.get_path()!
cmd := '
set -ex
cd ${path}
cargo build --all-features
'
osal.execute_stdout(cmd) or { return error('Cannot install s3.\n${err}') }
osal.cmd_add(
// cmdname: ''
source: '${path}/target/debug/s3-cas'
)!
}

View File

@@ -0,0 +1 @@
## S3

View File

@@ -0,0 +1,24 @@
module s3
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.installers.zinit as zinitinstaller
import freeflowuniverse.herolib.installers.rclone
import freeflowuniverse.herolib.ui.console
// install s3 will return true if it was already installed
pub fn install() ! {
base.install()!
zinitinstaller.install()!
rclone.install()!
if osal.done_exists('install_s3') {
return
}
build()!
console.print_header('install s3')
osal.done_set('install_s3', 'OK')!
}

View File

@@ -0,0 +1,145 @@
module s3
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.data.dbfs
import freeflowuniverse.herolib.core.texttools
import json
import rand
import freeflowuniverse.herolib.ui.console
// --fs-root <fs-root> [default: .]
// --host <host> [default: localhost]
// --meta-root <meta-root> [default: .]
// --metric-host <metric-host> [default: localhost]
// --metric-port <metric-port> [default: 9100]
// --port <port> [default: 8014]
// --access-key <access-key>
// --secret-key <secret-key>
@[params]
pub struct Config {
pub mut:
name string = 'default'
fs_root string = '/var/data/s3'
host string = 'localhost'
meta_root string = '/var/data/s3_meta'
metric_host string = 'localhost'
metric_port int = 9100
port int = 8014
access_key string
secret_key string
}
pub struct Server {
pub mut:
name string
config Config
process ?zinit.ZProcess
}
// get the s3 server
//```js
// fs_root string = "/var/data/s3"
// host string = "localhost"
// meta_root string = "/var/data/s3_meta"
// metric_host string
// metric_port int //9100
// port int = 8014
// access_key string @[required]
// secret_key string
//```
// if name exists already in the config DB, it will load for that name
pub fn new(args_ Config) !Server {
install()! // make sure it has been build & ready to be used
mut args := args_
args.name = texttools.name_fix(args.name)
key := 's3_config_${args.name}'
mut kvs := dbfs.new(name: 'config')!
if !kvs.exists(key) {
if args.access_key == '' {
args.access_key = rand.string(12)
}
if args.secret_key == '' {
args.secret_key = rand.string(12)
}
data := json.encode(args)
console.print_debug('set config s3')
kvs.set(key, data)!
}
return get(args.name)!
}
pub fn get(name_ string) !Server {
name := texttools.name_fix(name_)
key := 's3_config_${name}'
mut kvs := dbfs.new(name: 'config')!
if kvs.exists(key) {
data := kvs.get(key)!
args := json.decode(Config, data)!
mut server := Server{
name: name
config: args
}
mut z := zinit.new()!
processname := 's3_${args.name}'
if z.process_exists(processname) {
server.process = z.process_get(processname)!
}
return server
}
return error("can't find S3 server with name:'${name}'")
}
pub fn (mut server Server) start() ! {
mut args := server.config
mut cmd := 's3-cas --fs-root ${args.fs_root} --host ${args.host} --meta-root ${args.meta_root} --port ${args.port}'
if args.metric_host.len > 0 {
cmd += ' --metric-host ${args.metric_host}'
}
if args.metric_port > 0 {
cmd += ' --metric-port ${args.metric_port}'
}
if args.secret_key == '' {
args.secret_key = args.access_key
}
cmd += ' --access-key ${args.access_key}'
cmd += ' --secret-key ${args.secret_key}'
mut z := zinit.new()!
mut p := z.process_new(
name: 's3_${args.name}'
cmd: cmd
)!
p.status()!
p.output_wait('server is running at', 10)!
}
// return status
// ```
// pub enum ZProcessStatus {
// unknown
// init
// ok
// error
// blocked
// spawned
// }
// ```
pub fn (mut server Server) status() !zinit.ZProcessStatus {
mut process := server.process or { return error("can't find process yet.") }
return process.status()!
}
// will check if running
pub fn (mut server Server) check() ! {
mut process := server.process or { return error("can't find process yet.") }
process.check()!
// TODO: need to do more checks S3 checks
}
pub fn (mut server Server) stop() ! {
mut process := server.process or { return error("can't find process yet.") }
return process.stop()
}

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'griddriver'
classname:'GridDriverInstaller'
singleton:1
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
build:1
hasconfig:0

View File

@@ -0,0 +1,97 @@
module griddriver
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.installers.ulist
import freeflowuniverse.herolib.installers.lang.golang
import freeflowuniverse.herolib.core.texttools
import os
// checks if a certain version or above is installed
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} griddriver --version')
if res.exit_code != 0 {
return false
}
r := res.output.split(' ')
if r.len != 3 {
return error("couldn't parse griddriver version.\n${res.output}")
}
if texttools.version(version) > texttools.version(r[2]) {
return false
}
return true
}
fn install() ! {
// console.print_header('install griddriver')
build()!
}
fn build() ! {
console.print_header('build griddriver')
mut installer := golang.get()!
installer.install()!
mut gs := gittools.get()!
mut repo := gs.get_repo(
url: 'https://github.com/threefoldtech/web3gw/tree/development_integration/griddriver'
reset: true
pull: true
)!
mut path := repo.get_path()!
cmd := '
set -ex
cd ${path}
go env -w CGO_ENABLED="0"
go build -ldflags="-X \'main.version=$(git describe --tags --abbrev=0)\'" -o /tmp/griddriver .
echo build ok
'
osal.execute_stdout(cmd)!
osal.cmd_add(
cmdname: 'griddriver'
source: '/tmp/griddriver'
)!
console.print_header('build griddriver OK')
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// mut installer := get()!
// installers.upload(
// cmdname: 'griddriver'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/griddriver'
// )!
}
fn destroy() ! {
// mut installer := get()!
// cmd:="
// systemctl disable griddriver_scheduler.service
// systemctl disable griddriver.service
// systemctl stop griddriver_scheduler.service
// systemctl stop griddriver.service
// systemctl list-unit-files | grep griddriver
// pkill -9 -f griddriver
// ps aux | grep griddriver
// "
// osal.exec(cmd: cmd, stdout:true, debug: false)!
}

View File

@@ -0,0 +1,79 @@
module griddriver
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
griddriver_global map[string]&GridDriverInstaller
griddriver_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&GridDriverInstaller {
return &GridDriverInstaller{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self GridDriverInstaller) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self GridDriverInstaller) build() ! {
switch(self.name)
build()!
}
pub fn (mut self GridDriverInstaller) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for griddriver
pub fn switch(name string) {
griddriver_default = name
}

View File

@@ -0,0 +1,19 @@
module griddriver
pub const version = 'v0.1.0'
const singleton = true
const default = true
pub struct GridDriverInstaller {
pub mut:
name string = 'default'
}
fn obj_init(obj_ GridDriverInstaller) !GridDriverInstaller {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
fn configure() ! {
}

View File

@@ -0,0 +1,36 @@
# griddriver
To get started
```vlang
import freeflowuniverse.herolib.installers.something. griddriver
mut installer:= griddriver.get()!
installer.start()!
```
## example heroscript
```hero
!!griddriver.install
homedir: '/home/user/griddriver'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -0,0 +1,68 @@
module tfrobot
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.installers.lang.golang
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.core.texttools
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
uninstall bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '0.14.0'
res := os.execute('${osal.profile_path_source_and()} tfrobot version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().contains('v0.'))
if r.len != 1 {
console.print_debug(r)
return error("couldn't parse tfrobot version.\n${res.output}")
}
if texttools.version(version) > texttools.version(r[0].replace('v', '')) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install tfrobot')
build()!
}
}
pub fn build() ! {
mut g := golang.get()!
g.install()!
console.print_header('build tfrobot')
mut dest_on_os := '${os.home_dir()}/hero/bin'
if osal.is_linux() {
dest_on_os = '/usr/local/bin'
}
mut gs := gittools.get()!
mut repo := gs.get_repo(
url: 'https://github.com/threefoldtech/tfgrid-sdk-go'
reset: true
pull: true
)!
mut path := repo.get_path()!
cmd := '
cd ${path}
cd tfrobot
make build
cp ${path}/tfrobot/bin/tfrobot ${dest_on_os}/
'
console.print_header('build tfrobot')
osal.execute_stdout(cmd)!
console.print_header('build tfrobot OK')
}

17
lib/installers/upload.v Normal file
View File

@@ -0,0 +1,17 @@
module installers
// import freeflowuniverse.herolib.core.pathlib
// import freeflowuniverse.herolib.develop.gittools
@[params]
pub struct UploadArgs {
pub mut:
cmdname string
source string
reset bool
}
pub fn upload(args_ UploadArgs) ! {
//_ := args_
panic('to implement')
}

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'buildah'
classname:'BuildahInstaller'
singleton:1
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:1

View File

@@ -0,0 +1,80 @@
module buildah
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.installers.ulist
import freeflowuniverse.herolib.installers.lang.golang
import os
// checks if a certain version or above is installed
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} buildah -v')
if res.exit_code != 0 {
return false
}
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 1 {
return error("couldn't parse herocontainers version, expected 'buildah -v' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('version').all_before('(').replace('-dev', ''))
if texttools.version(version) == v {
return true
}
return false
}
fn install() ! {
console.print_header('install buildah')
build()!
}
fn build() ! {
console.print_header('build buildah')
osal.package_install('runc,bats,btrfs-progs,git,go-md2man,libapparmor-dev,libglib2.0-dev,libgpgme11-dev,libseccomp-dev,libselinux1-dev,make,skopeo,libbtrfs-dev')!
mut g := golang.get()!
g.install()!
cmd := '
cd /tmp
rm -rf buildah
git clone https://github.com/containers/buildah
cd buildah
make SECURITYTAGS="apparmor seccomp"
'
osal.execute_stdout(cmd)!
// now copy to the default bin path
osal.cmd_add(
cmdname: 'buildah'
source: '/tmp/buildah/bin/buildah'
)!
osal.rm('
/tmp/buildah
')!
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
fn destroy() ! {
osal.package_remove('
buildah
')!
// will remove all paths where go/bin is found
osal.profile_path_add_remove(paths2delete: 'go/bin')!
osal.rm('
buildah
/var/lib/buildah
/tmp/buildah
')!
}

View File

@@ -0,0 +1,79 @@
module buildah
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
buildah_global map[string]&BuildahInstaller
buildah_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&BuildahInstaller {
return &BuildahInstaller{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self BuildahInstaller) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self BuildahInstaller) build() ! {
switch(self.name)
build()!
}
pub fn (mut self BuildahInstaller) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for buildah
pub fn switch(name string) {
buildah_default = name
}

View File

@@ -0,0 +1,18 @@
module buildah
pub const version = '1.38.0'
const singleton = true
const default = true
pub struct BuildahInstaller {
pub mut:
name string = 'default'
}
fn obj_init(obj_ BuildahInstaller) !BuildahInstaller {
mut obj := obj_
return obj
}
fn configure() ! {
}

View File

@@ -0,0 +1,34 @@
# buildah
To get started
```vlang
import freeflowuniverse.herolib.installers.something. buildah
mut installer:= buildah.get()!
installer.start()!
```
## example heroscript
```hero
!!buildah.install
homedir: '/home/user/buildah'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'cloudhypervisor'
classname:'CloudHypervisor'
singleton:1
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:0
hasconfig:0
build:1

View File

@@ -0,0 +1,109 @@
module cloudhypervisor
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.installers.ulist
// import freeflowuniverse.herolib.installers.lang.rust
import os
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} cloud-hypervisor --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.contains('cloud-hypervisor'))
if r.len != 1 {
return error("couldn't parse cloud-hypervisor version, expected 'cloud hypervisor version' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('ypervisor v'))
// console.print_debug("version: ${v} ${texttools.version(version)}")
if v != texttools.version(version) {
return false
}
} else {
return false
}
return true
}
fn install() ! {
console.print_header('install cloudhypervisor')
// mut installer := get()!
mut url := ''
if osal.is_linux_arm() {
url = 'https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/v${version0}/cloud-hypervisor-static-aarch64'
} else if osal.is_linux_intel() {
url = 'https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/v${version0}/cloud-hypervisor-static'
} else {
return error('unsuported platform for cloudhypervisor')
}
osal.package_install('
qemu-kvm
bridge-utils
ovmf
swtpm
')!
console.print_header('download ${url}')
dest := osal.download(
url: url
minsize_kb: 1000
dest: '/tmp/cloud-hypervisor'
)!
console.print_debug('download cloudhypervisor done')
osal.cmd_add(
cmdname: 'cloud-hypervisor'
source: '${dest.path}'
)!
}
fn build() ! {
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// mut installer := get()!
// installers.upload(
// cmdname: 'cloudhypervisor'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/cloudhypervisor'
// )!
}
fn destroy() ! {
osal.process_kill_recursive(name: 'cloud-hypervisor')!
osal.package_remove('
cloudhypervisor
cloud-hypervisor
')!
// will remove all paths where go/bin is found
osal.profile_path_add_remove(paths2delete: 'go/bin')!
cmd := '
set +e
find / -name "*.img" -type f -exec rm -f {} \\;
rm -rf /tmp/cloud-hypervisor*
rm -f /tmp/cloud-hypervisor.sock
rm -f /var/log/cloud-hypervisor.log
umount /mnt/virtiofs
ip link delete tap0 2>/dev/null
ip link delete tap1 2>/dev/null
'
osal.execute_silent(cmd)!
osal.rm('
cloud-hypervisor
/var/lib/cloud-hypervisor/
')!
}

View File

@@ -0,0 +1,79 @@
module cloudhypervisor
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
cloudhypervisor_global map[string]&CloudHypervisor
cloudhypervisor_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
pub fn get(args_ ArgsGet) !&CloudHypervisor {
return &CloudHypervisor{}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self CloudHypervisor) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self CloudHypervisor) build() ! {
switch(self.name)
build()!
}
pub fn (mut self CloudHypervisor) destroy() ! {
switch(self.name)
destroy()!
}
// switch instance to be used for cloudhypervisor
pub fn switch(name string) {
cloudhypervisor_default = name
}

View File

@@ -0,0 +1,26 @@
module cloudhypervisor
import freeflowuniverse.herolib.data.paramsparser
import os
pub const version0 = '41.0'
pub const version = '${version0}.0'
const singleton = true
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct CloudHypervisor {
pub mut:
name string = 'default'
}
fn obj_init(obj_ CloudHypervisor) !CloudHypervisor {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
}

View File

@@ -0,0 +1,34 @@
# cloudhypervisor
To get started
```vlang
import freeflowuniverse.herolib.installers.something. cloudhypervisor
mut installer:= cloudhypervisor.get()!
installer.start()!
```
## example heroscript
```hero
!!cloudhypervisor.install
homedir: '/home/user/cloudhypervisor'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
```

View File

@@ -0,0 +1,64 @@
module docker
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
// install docker will return true if it was already installed
pub fn install() ! {
console.print_header('package install install docker')
if osal.platform() != .ubuntu {
return error('only support ubuntu for now')
}
base.install()!
if !osal.done_exists('install_docker') && !osal.cmd_exists('docker') {
// osal.upgrade()!
osal.package_install('mc,wget,htop,apt-transport-https,ca-certificates,curl,software-properties-common')!
cmd := '
rm -f /usr/share/keyrings/docker-archive-keyring.gpg
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
apt-cache policy docker-ce
#systemctl status docker
'
osal.execute_silent(cmd)!
osal.package_install('docker-ce')!
check()!
osal.done_set('install_docker', 'OK')!
}
console.print_header('docker already done')
}
pub fn check() ! {
// todo: do a monitoring check to see if it works
cmd := '
# Check if docker command exists
if ! command -v docker &> /dev/null; then
echo "Error: Docker command-line tool is not installed."
exit 1
fi
# Check if Docker daemon is running
if ! pgrep -f "dockerd" &> /dev/null; then
echo "Error: Docker daemon is not running."
exit 1
fi
# Run the hello-world Docker container
output=$(docker run hello-world 2>&1)
if [[ "\$output" == *"Hello from Docker!"* ]]; then
echo "Docker is installed and running properly."
else
echo "Error: Failed to run the Docker hello-world container."
echo "Output: \$output"
exit 1
fi
'
r := osal.execute_silent(cmd)!
console.print_debug(r)
}

View File

@@ -0,0 +1,118 @@
module lima
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.installers.virt.qemu
import os
@[params]
pub struct InstallArgs {
pub mut:
reset bool
uninstall bool
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '0.22.0'
if args.reset || args.uninstall {
console.print_header('uninstall lima')
uninstall()!
console.print_debug(' - ok')
if args.uninstall {
return
}
}
console.print_header('install install lima')
base.install()!
res := os.execute('${osal.profile_path_source_and()} lima -v')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.contains('limactl version'))
if r.len != 1 {
return error("couldn't parse lima version, expected 'lima version' on 1 row.\n${res.output}")
}
v := texttools.version(r[0].all_after('version'))
if v < texttools.version(version) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset == false {
return
}
if args.reset {
console.print_header('install lima')
qemu.install()!
mut url := ''
mut dest_on_os := '${os.home_dir()}/hero'
if osal.is_linux_arm() {
dest_on_os = '/usr/local'
url = 'https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-Linux-aarch64.tar.gz'
} else if osal.is_linux_intel() {
dest_on_os = '/usr/local'
url = 'https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-Linux-x86_64.tar.gz'
} else if osal.is_osx() {
osx_install()!
// } else if osal.is_osx_arm() {
// url = 'https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-Darwin-arm64.tar.gz'
// } else if osal.is_osx_intel() {
// url = 'https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-Darwin-x86_64.tar.gz'
} else {
return error('unsported platform')
}
console.print_header('download ${url}')
osal.download(
url: url
minsize_kb: 45000
reset: args.reset
dest: '/tmp/lima.tar.gz'
expand_file: '/tmp/download/lima'
)!
cmd := '
rsync -rv /tmp/download/lima/ ${dest_on_os}
'
osal.exec(cmd: cmd)!
}
}
@[params]
pub struct ExtensionsInstallArgs {
pub mut:
extensions string
default bool = true
}
pub fn exists() !bool {
e := osal.cmd_exists_profile('limactl')
if e {
console.print_header('lima already installed')
}
return e
}
pub fn uninstall() ! {
cmd := '
// # Quit Google Chrome
// osascript -e \'quit app "Google Chrome"\'
// # Wait a bit to ensure Chrome has completely quit
// sleep 2
'
osal.exec(cmd: cmd)!
}
pub fn osx_install() ! {
osal.package_install('lima')!
}

Some files were not shown because too many files have changed in this diff Show More