This commit is contained in:
2024-12-25 10:11:52 +01:00
parent 38aaba018e
commit 37d2501067
145 changed files with 12629 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'meilisearchinstaller'
classname:'MeilisearchServer'
singleton:0
templates:0
default:1
title:''
supported_platforms:''
reset:0
startupmanager:1
hasconfig:1
build:1

View File

@@ -0,0 +1,168 @@
module meilisearchinstaller
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.installers.ulist
// import freeflowuniverse.herolib.installers.lang.rust
import os
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()} meilisearch -V')
if res.exit_code != 0 {
return false
}
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 1 {
return error("couldn't parse meilisearch version.\n${res.output}")
}
r2 := r[0].all_after('meilisearch').trim(' ')
if texttools.version(version) != texttools.version(r2) {
return false
}
return true
}
fn install() ! {
console.print_header('install meilisearch')
mut url := ''
if osal.is_linux_arm() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-linux-aarch64'
} else if osal.is_linux_intel() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-linux-amd64'
} else if osal.is_osx_arm() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-macos-apple-silicon'
} else if osal.is_osx_intel() {
url = 'https://github.com/meilisearch/meilisearch/releases/download/v${version}/meilisearch-macos-amd64'
} else {
return error('unsported platform')
}
mut dest := osal.download(
url: url
minsize_kb: 100000
expand_dir: '/tmp/meilisearch'
)!
// dest.moveup_single_subdir()!
mut binpath := dest.file_get('meilisearch')!
osal.cmd_add(
cmdname: 'meilisearch'
source: binpath.path
)!
}
fn build() ! {
// mut installer := get()!
// url := 'https://github.com/threefoldtech/meilisearch'
// console.print_header('compile meilisearch')
// rust.install()!
// mut dest_on_os := '${os.home_dir()}/hero/bin'
// if osal.is_linux() {
// dest_on_os = '/usr/local/bin'
// }
// console.print_debug(' - dest path for meilisearchs is on: ${dest_on_os}')
// //osal.package_install('pkg-config,openssl')!
// cmd := '
// echo "start meilisearch installer"
// set +ex
// source ~/.cargo/env > /dev/null 2>&1
// //TODO
// cargo install meilisearch
// cp ${os.home_dir()}/.cargo/bin/mdb* ${dest_on_os}/
// '
// defer {
// destroy()!
// }
// osal.execute_stdout(cmd)!
// osal.done_set('install_meilisearch', 'OK')!
// console.print_header('meilisearch installed')
}
// get the Upload List of the files
fn ulist_get() !ulist.UList {
// mut installer := get()!
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// mut installer := get()!
// installers.upload(
// cmdname: 'meilisearch'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/meilisearch'
// )!
}
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut res := []zinit.ZProcessNewArgs{}
mut installer := get()!
mut env := 'development'
if installer.production {
env = 'production'
}
res << zinit.ZProcessNewArgs{
name: 'meilisearch'
cmd: 'meilisearch --no-analytics --http-addr ${installer.host}:${installer.port} --env ${env} --db-path ${installer.path} --master-key ${installer.masterkey}'
}
return res
}
fn running() !bool {
mut installer := get()!
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
// this checks health of meilisearch
// curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
// url:='http://127.0.0.1:${cfg.port}/api/v1'
// mut conn := httpconnection.new(name: 'meilisearch', url: url)!
// if cfg.secret.len > 0 {
// conn.default_header.add(.authorization, 'Bearer ${cfg.secret}')
// }
// conn.default_header.add(.content_type, 'application/json')
// console.print_debug("curl -X 'GET' '${url}'/tags --oauth2-bearer ${cfg.secret}")
// r := conn.get_json_dict(prefix: 'tags', debug: false) or {return false}
// println(r)
// if true{panic("ssss")}
// tags := r['Tags'] or { return false }
// console.print_debug(tags)
// console.print_debug('meilisearch is answering.')
return false
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("meilisearch")!
osal.process_kill_recursive(name: 'meilisearch')!
osal.cmd_delete('meilisearch')!
osal.package_remove('
meilisearch
') or { println('') }
// osal.rm("
// ")!
}

View File

@@ -0,0 +1,271 @@
module meilisearchinstaller
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
meilisearchinstaller_global map[string]&MeilisearchServer
meilisearchinstaller_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
mut args := args_
if args.name == '' {
args.name = meilisearchinstaller_default
}
if args.name == '' {
args.name = 'default'
}
return args
}
pub fn get(args_ ArgsGet) !&MeilisearchServer {
mut args := args_get(args_)
if args.name !in meilisearchinstaller_global {
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
}
return meilisearchinstaller_global[args.name] or {
println(meilisearchinstaller_global)
panic('could not get config for meilisearchinstaller with name:${args.name}')
}
}
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('meilisearchinstaller', args.name)
}
fn config_load(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
mut heroscript := context.hero_config_get('meilisearchinstaller', args.name)!
play(heroscript: heroscript)!
}
fn config_save(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
context.hero_config_set('meilisearchinstaller', args.name, heroscript_default()!)!
}
fn set(o MeilisearchServer) ! {
mut o2 := obj_init(o)!
meilisearchinstaller_global[o.name] = &o2
meilisearchinstaller_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
}
pub fn play(args_ PlayArgs) ! {
mut args := args_
if args.heroscript == '' {
args.heroscript = heroscript_default()!
}
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
mut install_actions := plbook.find(filter: 'meilisearchinstaller.configure')!
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
console.print_debug('install action meilisearchinstaller.configure\n${mycfg}')
set(mycfg)!
}
}
mut other_actions := plbook.find(filter: 'meilisearchinstaller.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action meilisearchinstaller.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action meilisearchinstaller.install')
install()!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
mut p := other_action.params
name := p.get('name')!
mut meilisearchinstaller_obj := get(name: name)!
console.print_debug('action object:\n${meilisearchinstaller_obj}')
if other_action.name == 'start' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action meilisearchinstaller.${other_action.name}')
meilisearchinstaller_obj.restart()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self MeilisearchServer) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self MeilisearchServer) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('meilisearchinstaller start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting meilisearchinstaller with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('meilisearchinstaller did not install properly.')
}
pub fn (mut self MeilisearchServer) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self MeilisearchServer) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self MeilisearchServer) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self MeilisearchServer) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self MeilisearchServer) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self MeilisearchServer) build() ! {
switch(self.name)
build()!
}
pub fn (mut self MeilisearchServer) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for meilisearchinstaller
pub fn switch(name string) {
meilisearchinstaller_default = name
}

View File

@@ -0,0 +1,59 @@
module meilisearchinstaller
import freeflowuniverse.herolib.data.paramsparser
pub const version = '1.11.3'
const singleton = false
const default = true
pub fn heroscript_default() !string {
heroscript := "
!!meilisearch.configure
name:'default'
masterkey: '1234'
host: 'localhost'
port: 7700
production: 0
"
return heroscript
}
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
pub struct MeilisearchServer {
pub mut:
name string = 'default'
path string
masterkey string @[secret]
host string
port int
production bool
}
fn cfg_play(p paramsparser.Params) !MeilisearchServer {
name := p.get_default('name', 'default')!
mut mycfg := MeilisearchServer{
name: name
path: p.get_default('path', '{HOME}/hero/var/meilisearch/${name}')!
host: p.get_default('host', 'localhost')!
masterkey: p.get_default('masterkey', '1234')!
port: p.get_int_default('port', 7700)!
production: p.get_default_false('production')
}
return mycfg
}
fn obj_init(obj_ MeilisearchServer) !MeilisearchServer {
// never call get here, only thing we can do here is work on object itself
mut obj := obj_
return obj
}
// called before start if done
fn configure() ! {
// mut installer := get()!
// mut mycode := $tmpl('templates/atemplate.yaml')
// mut path := pathlib.get_file(path: cfg.configpath, create: true)!
// path.write(mycode)!
// console.print_debug(mycode)
}

View File

@@ -0,0 +1,44 @@
# meilisearch
To get started
```vlang
import freeflowuniverse.herolib.installers.db.meilisearch as meilisearchinstaller
heroscript:="
!!meilisearch.configure name:'test'
masterkey: '1234'
port: 7701
!!meilisearch.start name:'test' reset:1
"
meilisearchinstaller.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= meilisearch_installer.get()!
//installer.start(reset:true)!
```
## example heroscript
```hero
!!meilisearch.configure
name:'default'
path: '{HOME}/hero/var/meilisearch/default'
masterkey: ''
host: 'localhost'
port: 7700
production: 0
```

View File

@@ -0,0 +1,13 @@
!!hero_code.generate_installer
name:'postgresql'
classname:'Postgresql'
singleton:1
templates:1
default:1
title:''
supported_platforms:'linux'
reset:0
startupmanager:1
hasconfig:1
build:0

View File

@@ -0,0 +1,16 @@
module postgresql
// import freeflowuniverse.herolib.osal
// import freeflowuniverse.herolib.ui.console
// import freeflowuniverse.herolib.installers.virt.docker
// pub fn requirements() ! {
// if !osal.done_exists('postgres_install') {
// panic('to implement, check is ubuntu and then install, for now only ubuntu')
// osal.package_install('libpq-dev,postgresql-client')!
// osal.done_set('postgres_install', 'OK')!
// console.print_header('postgresql installed')
// } else {
// console.print_header('postgresql already installed')
// }
// }

View File

@@ -0,0 +1,8 @@
```bash
brew install postgresql
brew services start postgresql
psql postgres -c "CREATE ROLE postgres WITH LOGIN SUPERUSER PASSWORD 'mypasswd';"
psql -U postgres
```

View File

@@ -0,0 +1,88 @@
module postgresql
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.osal.zinit
fn installed() !bool {
return true
}
fn install() ! {
osal.execute_silent('podman pull docker.io/library/postgres:latest')!
}
fn startupcmd() ![]zinit.ZProcessNewArgs {
mut cfg := get()!
mut res := []zinit.ZProcessNewArgs{}
db_user := 'root'
cmd := "
mkdir -p ${cfg.path}
podman run --name ${cfg.name} -e POSTGRES_USER=${db_user} -e POSTGRES_PASSWORD=\"${cfg.passwd}\" -v ${cfg.path}:/var/lib/postgresql/data -p 5432:5432 --health-cmd=\"pg_isready -U ${db_user}\" postgres:latest
"
res << zinit.ZProcessNewArgs{
name: 'postgresql'
cmd: cmd
workdir: cfg.path
startuptype: .zinit
}
return res
}
fn running() !bool {
mut mydb := get()!
mydb.check() or { return false }
return true
}
fn start_pre() ! {
}
fn start_post() ! {
}
fn stop_pre() ! {
}
fn stop_post() ! {
}
fn destroy() ! {
mut mydb := get()!
mydb.destroy()!
// mut cfg := get()!
// osal.rm("
// ${cfg.path}
// /etc/postgresql/
// /etc/postgresql-common/
// /var/lib/postgresql/
// /etc/systemd/system/multi-user.target.wants/postgresql
// /lib/systemd/system/postgresql.service
// /lib/systemd/system/postgresql@.service
// ")!
// c := '
// #dont die
// set +e
// # Stop the PostgreSQL service
// sudo systemctl stop postgresql
// # Purge PostgreSQL packages
// sudo apt-get purge -y postgresql* pgdg-keyring
// # Remove all data and configurations
// sudo userdel -r postgres
// sudo groupdel postgres
// # Reload systemd configurations and reset failed systemd entries
// sudo systemctl daemon-reload
// sudo systemctl reset-failed
// echo "PostgreSQL has been removed completely"
// '
// osal.exec(cmd: c)!
}

View File

@@ -0,0 +1,103 @@
module postgresql
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import db.pg
import os
import net
pub fn (mut server Postgresql) path_config() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/config', create: true)!
}
pub fn (mut server Postgresql) path_data() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/data', create: true)!
}
pub fn (mut server Postgresql) path_export() !pathlib.Path {
return pathlib.get_dir(path: '${server.path}/exports', create: true)!
}
fn is_port_open(host string, port int) bool {
mut socket := net.dial_tcp('${host}:${port}') or { return false }
socket.close() or { return false }
return true
}
pub fn (mut server Postgresql) db() !pg.DB {
if is_port_open('localhost', 5432) == false {
return error('PostgreSQL is not listening on port 5432')
}
conn_string := 'postgresql://root:${server.passwd}@localhost:5432/postgres?connect_timeout=5'
mut db := pg.connect_with_conninfo(conn_string)!
// console.print_header("Database connected: ${db}")
return db
}
pub fn (mut server Postgresql) check() ! {
mut db := server.db() or { return error('failed to check server: ${err}') }
db.exec('SELECT version();') or { return error('postgresql could not do select version') }
cmd := 'podman healthcheck run ${server.name}'
result := os.execute(cmd)
if result.exit_code != 0 {
return error("Postgresql container isn't healthy: ${result.output}")
}
container_id := 'podman container inspect default --format {{.Id}}'
container_id_result := os.execute(container_id)
if container_id_result.exit_code != 0 {
return error('Cannot get the container ID: ${result.output}')
}
server.container_id = container_id
console.print_header('Container ID: ${container_id_result.output}')
}
pub fn (mut server Postgresql) db_exists(name_ string) !bool {
mut db := server.db()!
// SELECT datname FROM pg_database WHERE datname='gitea';
r := db.exec("SELECT datname FROM pg_database WHERE datname='${name_}';")!
if r.len == 1 {
console.print_header('db exists: ${name_}')
return true
}
if r.len > 1 {
return error('should not have more than 1 db with name ${name_}')
}
return false
}
pub fn (mut server Postgresql) db_create(name_ string) ! {
name := texttools.name_fix(name_)
server.check()!
mut db := server.db()!
db_exists := server.db_exists(name_)!
if !db_exists {
console.print_header('db create: ${name_}')
db.exec('CREATE DATABASE ${name};')!
}
db_exists2 := server.db_exists(name_)!
if !db_exists2 {
return error('Could not create db: ${name_}, could not find in DB.')
}
}
pub fn (mut server Postgresql) db_delete(name_ string) ! {
name := texttools.name_fix(name_)
server.check()!
mut db := server.db()!
db_exists := server.db_exists(name_)!
if db_exists {
console.print_header('db delete: ${name_}')
db.exec('DROP DATABASE ${name};')!
}
db_exists2 := server.db_exists(name_)!
if db_exists2 {
return error('Could not delete db: ${name_}, could not find in DB.')
}
}

View File

@@ -0,0 +1,266 @@
module postgresql
import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.osal.zinit
import freeflowuniverse.herolib.ui.console
import time
__global (
postgresql_global map[string]&Postgresql
postgresql_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
mut args := args_
if args.name == '' {
args.name = postgresql_default
}
if args.name == '' {
args.name = 'default'
}
return args
}
pub fn get(args_ ArgsGet) !&Postgresql {
mut args := args_get(args_)
if args.name !in postgresql_global {
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
}
return postgresql_global[args.name] or {
println(postgresql_global)
panic('could not get config for postgresql with name:${args.name}')
}
}
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('postgresql', args.name)
}
fn config_load(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
mut heroscript := context.hero_config_get('postgresql', args.name)!
play(heroscript: heroscript)!
}
fn config_save(args_ ArgsGet) ! {
mut args := args_get(args_)
mut context := base.context()!
context.hero_config_set('postgresql', args.name, heroscript_default()!)!
}
fn set(o Postgresql) ! {
mut o2 := obj_init(o)!
postgresql_global[o.name] = &o2
postgresql_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
}
pub fn play(args_ PlayArgs) ! {
mut args := args_
if args.heroscript == '' {
args.heroscript = heroscript_default()!
}
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
mut install_actions := plbook.find(filter: 'postgresql.configure')!
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
console.print_debug('install action postgresql.configure\n${mycfg}')
set(mycfg)!
}
}
mut other_actions := plbook.find(filter: 'postgresql.')!
for other_action in other_actions {
if other_action.name in ['destroy', 'install', 'build'] {
mut p := other_action.params
reset := p.get_default_false('reset')
if other_action.name == 'destroy' || reset {
console.print_debug('install action postgresql.destroy')
destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action postgresql.install')
install()!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
mut p := other_action.params
name := p.get('name')!
mut postgresql_obj := get(name: name)!
console.print_debug('action object:\n${postgresql_obj}')
if other_action.name == 'start' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action postgresql.${other_action.name}')
postgresql_obj.restart()!
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
// unknown
// screen
// zinit
// tmux
// systemd
match cat {
.zinit {
console.print_debug('startupmanager: zinit')
return startupmanager.get(cat: .zinit)!
}
.systemd {
console.print_debug('startupmanager: systemd')
return startupmanager.get(cat: .systemd)!
}
else {
console.print_debug('startupmanager: auto')
return startupmanager.get()!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self Postgresql) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self Postgresql) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('postgresql start')
if !installed()! {
install()!
}
configure()!
start_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('starting postgresql with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
start_post()!
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('postgresql did not install properly.')
}
pub fn (mut self Postgresql) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self Postgresql) stop() ! {
switch(self.name)
stop_pre()!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
stop_post()!
}
pub fn (mut self Postgresql) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self Postgresql) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
reset bool
}
pub fn (mut self Postgresql) install(args InstallArgs) ! {
switch(self.name)
if args.reset || (!installed()!) {
install()!
}
}
pub fn (mut self Postgresql) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}
// switch instance to be used for postgresql
pub fn switch(name string) {
postgresql_default = name
}

View File

@@ -0,0 +1,62 @@
module postgresql
import freeflowuniverse.herolib.data.paramsparser
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.osal
import os
pub const version = '0.0.0'
const singleton = true
const default = true
pub fn heroscript_default() !string {
heroscript := "
!!postgresql.configure
name:'default'
passwd:'mysecret'
path:''
"
return heroscript
}
pub struct Postgresql {
pub mut:
name string = 'default'
path string
passwd string
container_id string
}
fn cfg_play(p paramsparser.Params) !Postgresql {
mut mycfg := Postgresql{
name: p.get_default('name', 'default')!
passwd: p.get('passwd')!
path: p.get_default('path', '')!
}
return mycfg
}
fn obj_init(obj_ Postgresql) !Postgresql {
mut obj := obj_
if obj.path == '' {
if osal.is_linux() {
obj.path = '/data/postgresql/${obj.name}'
} else {
obj.path = '${os.home_dir()}/hero/var/postgresql/${obj.name}'
}
}
osal.dir_ensure(obj.path)!
return obj
}
// called before start if done
fn configure() ! {
// t2 := $tmpl('templates/pg_hba.conf')
// mut p2 := server.path_config.file_get_new('pg_hba.conf')!
// p2.write(t2)!
// mut t3 := $tmpl('templates/postgresql.conf')
// t3 = t3.replace('@@', '$') // to fix templating issues
// mut p3 := server.path_config.file_get_new('postgresql.conf')!
// p3.write(t3)!
}

View File

@@ -0,0 +1,56 @@
# postgresql
To get started
```vlang
import freeflowuniverse.herolib.installers.db.postgresql
mut installer:= postgresql.get()!
installer.start()!
```
## example heroscript
```hero
!!postgresql.install
path: ''
passwd: 'asecret'
```
## use psql
uses our hero configure output and jq command line trick
```bash
#default is the instance name
export PGPASSWORD=`hero configure -c postgres -i default -s | jq -r '.passwd'`
psql -U "root" -h localhost
```
## to use in other installer
```v
//e.g. in server configure function
import freeflowuniverse.herolib.installers.db.postgresql
mut mydbinstaller:=postgresql.get()!
mydbinstaller.start()!
// now create the DB
mydbinstaller.db_create('gitea')!
```

View File

@@ -0,0 +1,100 @@
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# Refer to the "Client Authentication" section in the PostgreSQL
# documentation for a complete description of this file. A short
# synopsis follows.
#
# This file controls: which hosts are allowed to connect, how clients
# are authenticated, which PostgreSQL user names they can use, which
# databases they can access. Records take one of these forms:
#
# local DATABASE USER METHOD [OPTIONS]
# host DATABASE USER ADDRESS METHOD [OPTIONS]
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
#
# (The uppercase items must be replaced by actual values.)
#
# The first field is the connection type:
# - "local" is a Unix-domain socket
# - "host" is a TCP/IP socket (encrypted or not)
# - "hostssl" is a TCP/IP socket that is SSL-encrypted
# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
#
# DATABASE can be "all", "sameuser", "samerole", "replication", a
# database name, or a comma-separated list thereof. The "all"
# keyword does not match "replication". Access to replication
# must be enabled in a separate record (see example below).
#
# USER can be "all", a user name, a group name prefixed with "+", or a
# comma-separated list thereof. In both the DATABASE and USER fields
# you can also write a file name prefixed with "@" to include names
# from a separate file.
#
# ADDRESS specifies the set of hosts the record matches. It can be a
# host name, or it is made up of an IP address and a CIDR mask that is
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
# specifies the number of significant bits in the mask. A host name
# that starts with a dot (.) matches a suffix of the actual host name.
# Alternatively, you can write an IP address and netmask in separate
# columns to specify the set of hosts. Instead of a CIDR-address, you
# can write "samehost" to match any of the server's own IP addresses,
# or "samenet" to match any address in any subnet that the server is
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
# Note that "password" sends passwords in clear text; "md5" or
# "scram-sha-256" are preferred since they send encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
# NAME=VALUE. The available options depend on the different
# authentication methods -- refer to the "Client Authentication"
# section in the documentation for a list of which options are
# available for which authentication methods.
#
# Database and user names containing spaces, commas, quotes and other
# special characters must be quoted. Quoting one of the keywords
# "all", "sameuser", "samerole" or "replication" makes the name lose
# its special character, and just match a database or username with
# that name.
#
# This file is read on server startup and when the server receives a
# SIGHUP signal. If you edit the file on a running system, you have to
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
# or execute "SELECT pg_reload_conf()".
#
# Put your actual configuration here
# ----------------------------------
#
# If you want to allow non-local connections, you need to add more
# "host" records. In that case you will also need to make PostgreSQL
# listen on a non-local interface via the listen_addresses
# configuration parameter, or via the -i or -h command line switches.
# CAUTION: Configuring the system for local "trust" authentication
# allows any local user to connect as any PostgreSQL user, including
# the database superuser. If you do not trust all your local users,
# use another authentication method.
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all trust
# IPv4 local connections:
host all all 0.0.0.0/0 trust
# IPv6 local connections:
host all all ::1/128 trust
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all all scram-sha-256

View File

@@ -0,0 +1,815 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = 'localhost'
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:@@{sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
#ssl = off
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
#ssl_crl_dir = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a logfile segment
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#print_debug_parse = off
#print_debug_rewritten = off
#print_debug_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
#log_line_prefix = '%m [%p] ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
#cluster_name = '' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"@@user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.utf8' # locale for system error message
# strings
lc_monetary = 'en_US.utf8' # locale for monetary formatting
lc_numeric = 'en_US.utf8' # locale for number formatting
lc_time = 'en_US.utf8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '@@libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
#include_dir = '...' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -0,0 +1,108 @@
module redis
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.sysadmin.startupmanager
import time
import os
@[params]
pub struct InstallArgs {
pub mut:
port int = 6379
datadir string = '${os.home_dir()}/hero/var/redis'
ipaddr string = 'localhost' // can be more than 1, space separated
reset bool
start bool
restart bool // do not put on true
}
// ```
// struct InstallArgs {
// port int = 6379
// datadir string = '${os.home_dir()}/hero/var/redis'
// ipaddr string = "localhost" //can be more than 1, space separated
// reset bool
// start bool
// restart bool = true
// }
// ```
pub fn install(args_ InstallArgs) ! {
mut args := args_
if !args.reset {
if check() {
return
}
}
console.print_header('install redis.')
if !(osal.cmd_exists_profile('redis-server')) {
if osal.is_linux() {
osal.package_install('redis-server')!
} else {
osal.package_install('redis')!
}
}
osal.execute_silent('mkdir -p ${args.datadir}')!
if args.restart {
stop()!
}
start(args)!
}
fn configfilepath(args InstallArgs) string {
if osal.is_linux() {
return '/etc/redis/redis.conf'
} else {
return '${args.datadir}/redis.conf'
}
}
fn configure(args InstallArgs) ! {
c := $tmpl('template/redis_config.conf')
pathlib.template_write(c, configfilepath(), true)!
}
pub fn check(args InstallArgs) bool {
res := os.execute('redis-cli -c -p ${args.port} ping > /dev/null 2>&1')
if res.exit_code == 0 {
return true
}
return false
}
pub fn start(args InstallArgs) ! {
if check() {
return
}
configure(args)!
// remove all redis in memory
osal.process_kill_recursive(name: 'redis-server')!
if osal.platform() == .osx {
osal.exec(cmd: 'redis-server ${configfilepath()} --daemonize yes')!
// osal.exec(cmd:"brew services start redis") or {
// osal.exec(cmd:"redis-server ${configfilepath()} --daemonize yes")!
// }
} else {
mut sm := startupmanager.get()!
sm.new(name: 'redis', cmd: 'redis-server ${configfilepath()}', start: true)!
}
for _ in 0 .. 100 {
if check() {
console.print_debug('redis started.')
return
}
time.sleep(100)
}
return error("Redis did not install propertly could not do:'redis-cli -c ping'")
}
pub fn stop() ! {
osal.execute_silent('redis-cli shutdown')!
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
# rfs
more info see https://github.com/threefoldtech/rfs

View File

@@ -0,0 +1,31 @@
module rfs
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.lang.rust
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.installers.zinit
import freeflowuniverse.herolib.ui.console
pub fn install() ! {
rust.install()!
zinit.install()!
console.print_header('install rfs')
if !osal.done_exists('install_rfs') || !osal.cmd_exists('rfs') {
osal.package_install('musl-dev,musl-tools')!
mut gs := gittools.new()!
mut repo := gs.get_repo(url: 'https://github.com/threefoldtech/rfs', reset: true)!
path := repo.get_path()!
cmd := '
cd ${path}
rustup target add x86_64-unknown-linux-musl
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
cp ~/code/github/threefoldtech/rfs/target/x86_64-unknown-linux-musl/release/rfs /usr/local/bin/
'
console.print_header('build rfs')
osal.execute_stdout(cmd)!
osal.done_set('install_rfs', 'OK')!
}
console.print_header('rfs already done')
}

View File

@@ -0,0 +1,26 @@
## Vlang ZDB Client
to use:
- build zero db from source: https://github.com/threefoldtech/0-db
- run zero db from root of 0db folder:
`./zdbd/zdb --help || true` for more info
## to use test
```bash
#must set unix domain with --socket argument when running zdb
#run zdb as following:
mkdir -p ~/.zdb
zdb --socket ~/.zdb/socket --admin 1234
redis-cli -s ~/.zdb/socket
#or easier:
redis-cli -s ~/.zdb/socket --raw nsinfo default
```
then in the redis-cli can do e.g.
```
nsinfo default
```

View File

@@ -0,0 +1,29 @@
module zdb
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
import freeflowuniverse.herolib.ui.console
// install zdb will return true if it was already installed
pub fn build() ! {
base.install()!
console.print_header('package_install install zdb')
if !osal.done_exists('install_zdb') && !osal.cmd_exists('zdb') {
mut gs := gittools.new()!
mut repo := gs.get_repo(
url: 'git@github.com:threefoldtech/0-db.git'
reset: false
pull: true
)!
path := repo.get_path()!
cmd := '
set -ex
cd ${path}
make
sudo rsync -rav ${path}/bin/zdb* /usr/local/bin/
'
osal.execute_silent(cmd) or { return error('Cannot install zdb.\n${err}') }
osal.done_set('install_zdb', 'OK')!
}
}

View File

@@ -0,0 +1,156 @@
module zdb
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.clients.httpconnection
import freeflowuniverse.herolib.crypt.secrets
import freeflowuniverse.herolib.sysadmin.startupmanager
import freeflowuniverse.herolib.clients.zdb
import os
import time
@[params]
pub struct InstallArgs {
pub mut:
reset bool
secret string
start bool = true
restart bool
sequential bool // if sequential then we autoincrement the keys
datadir string = '${os.home_dir()}/var/zdb/data'
indexdir string = '${os.home_dir()}/var/zdb/index'
rotateperiod int = 1200 // 20 min
}
pub fn install(args_ InstallArgs) ! {
mut args := args_
version := '2.0.7'
res := os.execute('${osal.profile_path_source_and()} zdb --version')
if res.exit_code == 0 {
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
if r.len != 3 {
return error("couldn't parse zdb version.\n${res.output}")
}
myversion := r[1].all_after_first('server, v').all_before_last('(').trim_space()
if texttools.version(version) > texttools.version(myversion) {
args.reset = true
}
} else {
args.reset = true
}
if args.reset {
console.print_header('install zdb')
mut url := ''
if osal.is_linux_intel() {
url = 'https://github.com/threefoldtech/0-db/releases/download/v${version}/zdb-${version}-linux-amd64-static'
} else {
return error('unsported platform, only linux 64 for zdb for now')
}
mut dest := osal.download(
url: url
minsize_kb: 1000
)!
osal.cmd_add(
cmdname: 'zdb'
source: dest.path
)!
}
if args.restart {
restart(args)!
return
}
if args.start {
start(args)!
}
}
pub fn restart(args_ InstallArgs) ! {
stop(args_)!
start(args_)!
}
pub fn stop(args_ InstallArgs) ! {
console.print_header('zdb stop')
mut sm := startupmanager.get()!
sm.stop('zdb')!
}
pub fn start(args_ InstallArgs) ! {
mut args := args_
console.print_header('zdb start')
mut box := secrets.get()!
secret := box.secret(key: 'ZDB.SECRET', default: args.secret)!
mut sm := startupmanager.get()!
mut cmd := 'zdb --socket ${os.home_dir()}/hero/var/zdb.sock --port 3355 --admin ${secret} --data ${args.datadir} --index ${args.indexdir} --dualnet --protect --rotate ${args.rotateperiod}'
if args.sequential {
cmd += ' --mode seq'
}
pathlib.get_dir(path: '${os.home_dir()}/hero/var', create: true)!
sm.start(
name: 'zdb'
cmd: cmd
)!
console.print_debug(cmd)
for _ in 0 .. 50 {
if check()! {
return
}
time.sleep(10 * time.millisecond)
}
return error('zdb not installed properly, check failed.')
}
pub fn check() !bool {
cmd := 'redis-cli -s /root/hero/var/zdb.sock PING'
result := os.execute(cmd)
if result.exit_code > 0 {
return error('${cmd} failed with exit code: ${result.exit_code} and error: ${result.output}')
}
if result.output.trim_space() == 'PONG' {
console.print_debug('zdb is answering.')
// return true
}
// TODO: need to work on socket version
// mut db := zdb.get('${os.home_dir()}/hero/var/zdb.sock', secret()!, 'test')!
mut db := client()!
// check info returns info about zdb
info := db.info()!
// console.print_debug(info)
assert info.contains('server_name: 0-db')
console.print_debug('zdb is answering.')
return true
}
pub fn secret() !string {
mut box := secrets.get()!
secret := box.get('ZDB.SECRET')!
return secret
}
pub fn client() !zdb.ZDB {
mut db := zdb.get('localhost:3355', secret()!, 'test')!
return db
}

View File

@@ -0,0 +1,22 @@
module zdb
import freeflowuniverse.herolib.clients.zdb
fn test_get() {
// must set unix domain with --socket argument when running zdb
// run zdb as following:
// mkdir -p ~/.zdb/ && zdb --socket ~/.zdb/socket --admin 1234
install(secret: 'hamada', start: true) or { panic(err) }
mut client := zdb.get('/root/hero/var/zdb.sock', 'hamada', 'test') or { panic(err) }
// check info returns info about zdb
info := client.info()!
assert info.contains('server_name: 0-db')
nslist := client.nslist()!
assert nslist == ['default', 'test']
nsinfo := client.nsinfo('default')!
assert nsinfo['name'] == 'default'
}