This commit is contained in:
2024-12-30 08:01:17 +01:00
parent dfafeecf2c
commit 7894f7d420
218 changed files with 8981 additions and 20 deletions

View File

@@ -5,7 +5,7 @@ works very well in combination with heroscript
## How to get the paramsparser ## How to get the paramsparser
```v ```v
import freeflowuniverse.crystallib.data.paramsparser import freeflowuniverse.herolib.data.paramsparser
// Create new params from text // Create new params from text
params := paramsparser.new("color:red size:'large' priority:1 enable:true")! params := paramsparser.new("color:red size:'large' priority:1 enable:true")!

View File

@@ -4,7 +4,7 @@
import as import as
```vlang ```vlang
import freeflowuniverse.crystallib.osal import freeflowuniverse.herolib.osal
osal.ping... osal.ping...
@@ -72,14 +72,14 @@ mut pm:=process.processmap_get()?
info returns like: info returns like:
```json ```json
}, freeflowuniverse.crystallib.process.ProcessInfo{ }, freeflowuniverse.herolib.process.ProcessInfo{
cpu_perc: 0 cpu_perc: 0
mem_perc: 0 mem_perc: 0
cmd: 'mc' cmd: 'mc'
pid: 84455 pid: 84455
ppid: 84467 ppid: 84467
rss: 3168 rss: 3168
}, freeflowuniverse.crystallib.process.ProcessInfo{ }, freeflowuniverse.herolib.process.ProcessInfo{
cpu_perc: 0 cpu_perc: 0
mem_perc: 0 mem_perc: 0
cmd: 'zsh -Z -g' cmd: 'zsh -Z -g'

View File

@@ -8,7 +8,7 @@ Chalk offers functions:- `console.color_fg(text string, color string)` - To chan
Example: Example:
```vlang ```vlang
import freeflowuniverse.crystallib.ui.console import freeflowuniverse.herolib.ui.console
# basic usage # basic usage
println('I am really ' + console.color_fg('happy', 'green')) println('I am really ' + console.color_fg('happy', 'green'))

View File

@@ -5,7 +5,7 @@ has mechanisms to print better to console, see the methods below
import as import as
```vlang ```vlang
import freeflowuniverse.crystallib.ui.console import freeflowuniverse.herolib.ui.console
``` ```

View File

@@ -6,14 +6,14 @@ this is how we want example scripts to be, see the first line
```vlang ```vlang
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.crystallib.installers.sysadmintools.daguserver import freeflowuniverse.herolib.installers.sysadmintools.daguserver
mut ds := daguserver.get()! mut ds := daguserver.get()!
println(ds) println(ds)
``` ```
the files are in ~/code/github/freeflowuniverse/crystallib/examples for crystallib the files are in ~/code/github/freeflowuniverse/herolib/examples for herolib
## important instructions ## important instructions

50
examples/README.md Normal file
View File

@@ -0,0 +1,50 @@
# HeroLib Examples
This repository contains examples and utilities for working with HeroLib, a comprehensive library for V language.
## Sync Do Script
The `sync_do.sh` script is a utility for development that:
- Synchronizes the local HeroLib codebase with a remote server
- Uses rsync to efficiently transfer only changed files
- Automatically connects to a tmux session on the remote server
- Helps maintain development environment consistency
## Examples Structure
The examples directory demonstrates various capabilities of HeroLib:
- **builder/**: Examples of builder patterns and remote execution
- **core/**: Core functionality examples including configuration, database operations, and API integrations
- **data/**: Data handling examples including encryption and encoding
- **develop/**: Development tools including git integration and OpenAI examples
- **hero/**: Hero-specific implementations and API examples
- **installers/**: Various installation scripts for different tools and services
- **lang/**: Language integration examples (e.g., Python)
- **osal/**: Operating system abstraction layer examples
- **threefold/**: ThreeFold Grid related examples and utilities
- **tools/**: Utility examples for imagemagick, tmux, etc.
- **ui/**: User interface examples including console and telegram
- **virt/**: Virtualization examples for Docker, Lima, Windows, etc.
- **webtools/**: Web-related tools and utilities
## V Script Requirements
When creating V scripts (.vsh files), always use the following shebang:
```bash
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
```
This shebang ensures:
- Direct execution of V shell scripts without needing to use the V command
- No main() function requirement in .vsh files
- Proper compilation flags and settings
- OpenSSL support enabled
- Global variables enabled
- TCC compiler usage
- No retry compilation
These examples serve as practical demonstrations and reference implementations for various HeroLib features and integrations.

View File

@@ -0,0 +1,74 @@
# Remote Executor Example
This example demonstrates how to compile and execute V code remotely using SSH.
It shows a practical implementation of the herolib builder's remote execution capabilities, its good for debugging.
## Components
### `toexec.v`
A V program that demonstrates remote execution of system operations:
- Uses herolib's osal and installer modules
- Currently configured to uninstall brew as an example operation
- Can be modified to execute any remote system commands
> important the source & target system needs to be same architecture
### `run.sh`
A bash script that:
1. Compiles the V program
2. Copies it to a remote machine using SCP
3. Executes it remotely using SSH
## Prerequisites
1. SSH access to the remote machine
2. The `SECRET` environment variable must be set
3. V compiler installed locally
## Configuration
The `run.sh` script uses the following default configuration:
```bash
remote_user='despiegk'
remote_host='192.168.99.1'
remote_path='/Users/despiegk/hero/bin/toexec'
remote_port='2222'
```
Modify these values to match your remote system configuration.
## Usage
1. Set the required environment variable:
```bash
export SECRET=your_secret_value
```
2. Make the script executable:
```bash
chmod +x run.sh
```
3. Run the script:
```bash
./run.sh
```
## Integration with Builder
This example demonstrates practical usage of the herolib builder module's remote execution capabilities. For more complex implementations, see the builder documentation in `lib/builder/readme.md`.
The builder module provides a more structured way to manage remote nodes and execute commands:
```v
import freeflowuniverse.herolib.builder
mut b := builder.new()!
mut n := b.node_new(ipaddr:"user@host:port")!
// Execute commands on the remote node
```

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
# Check if the SECRET environment variable is set
if [ -z "$SECRET" ]; then
echo "Error: SECRET is not set."
exit 1
fi
cd "$(dirname "$0")"
v -n -w -enable-globals toexec.v
# Specify the local file to be copied and the remote destination
local_file='toexec' # Replace with the path to your local file
remote_user='despiegk'
remote_host='192.168.99.1'
remote_path='/Users/despiegk/hero/bin/toexec'
remote_port='2222'
scp -P ${remote_port} "${local_file}" "${remote_user}@${remote_host}:${remote_path}"
ssh -t -p ${remote_port} "${remote_user}@${remote_host}" -A "/bin/zsh -c 'source ~/.zshrc && ${remote_path}' && echo 'DONE'"

View File

@@ -0,0 +1,20 @@
module main
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.base
fn do() ! {
//base.uninstall_brew()!
//println("something")
if osal.is_osx() {
println('IS OSX')
}
// mut job2 := osal.exec(cmd: 'ls /')!
// println(job2)
}
fn main() {
do() or { panic(err) }
}

46
examples/builder/simple.vsh Executable file
View File

@@ -0,0 +1,46 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.builder
import freeflowuniverse.herolib.core.pathlib
import os
fn do1() ! {
mut b := builder.new()!
mut n := b.node_new(ipaddr: 'root@195.192.213.2')!
n.upload(source: myexamplepath, dest: '/tmp/myexamplepath2')!
n.download(source: '/tmp/myexamplepath2', dest: '/tmp/myexamplepath2', delete: true)!
}
fn do2() ! {
mut b := builder.new()!
mut n := b.node_local()!
n.upload(source: myexamplepath, dest: '/tmp/myexamplepath3', delete: true)!
// lets now put something in examplepath3, which should be deleted
n.file_write('/tmp/myexamplepath3/something', 'something')!
r := n.file_read('/tmp/myexamplepath3/something')!
assert r == 'something'
mut p2 := pathlib.get_dir(path: '/tmp/myexamplepath2')! // needs to exist, and is a dir
mut p3 := pathlib.get_dir(path: '/tmp/myexamplepath3')!
h2 := p2.md5hex()!
mut h3 := p3.md5hex()!
assert !(h2 == h3)
n.upload(source: '/tmp/myexamplepath2', dest: '/tmp/myexamplepath3', delete: true)!
// now hash should be the same, hashes work over all files in a dir
// its a good trick to compare if 2 directories are the same
h3 = p3.md5hex()!
assert h2 == h3
// there is also a size function, this one is in KByte
size := p3.size_kb() or { 0 }
println('size: ${size} KB')
}
do1()
do2()

17
examples/builder/simple_ip4.vsh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.builder
import freeflowuniverse.herolib.core.pathlib
import os
mut b := builder.new()!
mut n := b.node_new(ipaddr: 'root@51.195.61.5')!
// mut n := b.node_new(ipaddr: 'info.ourworld.tf')!
println(n)
r:=n.exec(cmd:"ls /")!
println(r)
// n.upload(source: myexamplepath, dest: '/tmp/myexamplepath2')!
// n.download(source: '/tmp/myexamplepath2', dest: '/tmp/myexamplepath2', delete: true)!

14
examples/builder/simple_ip6.vsh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.builder
import freeflowuniverse.herolib.core.pathlib
import os
mut b := builder.new()!
mut n := b.node_new(ipaddr: 'root@302:1d81:cef8:3049:ad01:796d:a5da:9c6')!
r:=n.exec(cmd:"ls /")!
println(r)
// n.upload(source: myexamplepath, dest: '/tmp/myexamplepath2')!
// n.download(source: '/tmp/myexamplepath2', dest: '/tmp/myexamplepath2', delete: true)!

View File

@@ -0,0 +1,20 @@
{
"folders": [
{
"path": "../lib"
},
{
"path": "../aiprompts"
},
{
"path": "../research"
},
{
"path": "../examples"
},
{
"path": "../cli"
}
],
"settings": {}
}

53
examples/data/encoder.vsh Executable file
View File

@@ -0,0 +1,53 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.data.encoder
import crypto.ed25519
import freeflowuniverse.herolib.ui.console
struct AStruct {
mut:
items []string
nr int
privkey []u8
}
_, privkey := ed25519.generate_key()!
mut a := AStruct{
items: ['a', 'b']
nr: 10
// privkey: []u8{len: 5, init: u8(0xf8)}
privkey: privkey
}
// do encoding
mut e := encoder.new()
e.add_list_string(a.items)
e.add_int(a.nr)
e.add_bytes(privkey)
console.print_debug('${e.data}')
// do decoding
mut d := encoder.decoder_new(e.data)
mut aa := AStruct{}
aa.items = d.get_list_string()
aa.nr = d.get_int()
aa.privkey = d.get_bytes()
assert a == aa
a = AStruct{
items: ['a', 'b']
nr: 10
privkey: []u8{len: 5, init: u8(0xf8)}
}
serialize_data := encoder.encode(a)!
r := encoder.decode[AStruct](serialize_data) or {
console.print_stderr('Failed to decode, error: ${err}')
return
}
console.print_debug('${r}')

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.crypt.aes_symmetric { decrypt, encrypt }
import freeflowuniverse.herolib.ui.console
msg := 'my message'.bytes()
console.print_debug('${msg}')
secret := '1234'
encrypted := encrypt(msg, secret)
console.print_debug('${encrypted}')
decrypted := decrypt(encrypted, secret)
console.print_debug('${decrypted}')
assert decrypted == msg

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.data.encoderhero
import freeflowuniverse.herolib.core.base
//this is docu at top
@[name:"teststruct " ; params]
pub struct TestStruct {
//this is docu at mid
pub mut:
id int @[hi]
descr string
secret string @[secret]
number int = 1 @[min:1 ;max:10]
yesno bool
liststr []string
listint []int
ss SubStruct
ss2 []SubStruct
}
pub struct SubStruct {
pub mut:
color string
size int
}
fn (self TestStruct) heroscript()!string {
mut out:=""
mut p := encoderhero.encode[TestStruct](self)!
// out += "!!hr.teststruct_define " + p.heroscript() + "\n"
// p = paramsparser.encode[SubStruct](self.ss)!
// p.set("teststruct_id",self.id.str())
// out += "!!hr.substruct_define " + p.heroscript() + "\n"
// for ss2 in self.ss2{
// p = paramsparser.encode[SubStruct](ss2)!
// p.set("teststruct_id",self.id.str())
// out += "!!hr.substruct_item_define " + p.heroscript() + "\n"
// }
return p
}
mut t := TestStruct{
id:100
descr: '
test
muliline
s
test
muliline
test
muliline
'
number: 2
yesno: true
liststr: ['one', 'two+two']
listint: [1, 2]
ss:SubStruct{color:"red",size:10}
}
t.ss2<< SubStruct{color:"red1",size:11}
t.ss2<< SubStruct{color:"red2",size:12}
println(t.heroscript()!)
// t2:=p.decode[TestStruct]()!
// println(t2)

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.data.paramsparser
import os
const testpath = os.dir(@FILE) + '/data'
ap := playbook.new(path: testpath)!
mut test := map[string]string{}
test['root'] = 'YEH'
test['roott'] = 'YEH2'
for action in ap.actions {
// action.params.replace(test)
mut p := action.params
p.replace(test)
println(p)
}
txt := '
this is a text \${aVAR}
this is a text \${aVAR}
\${A}
'
// println(txt)
// println(params.regexfind(txt))

View File

@@ -0,0 +1,8 @@
```javascript
//will add an action can be https file, https git, scp, or local path
!!runner.recipe_add source:'{ROOT}/core/base0' aname:'{ROOTT}/base0' execute:1
//cannot define the name when we add a directory to it
!!runner.recipe_add source:'{ROOT}/core' execute:1
```

View File

@@ -0,0 +1 @@
paramsfilter

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.data.paramsparser { Params, parse }
import time { Duration, sleep }
totalnr := 1000000
// some performance tests
mut res := []Params{}
mut sw := time.new_stopwatch()
for i in 0 .. totalnr {
mut text := "arg${i} arg2 color:red${i} priority:'incredible' description:'with spaces, lets see if ok'"
mut p := parse(text) or { panic(err) }
res << p
}
sw.stop()
mut elapsed := sw.elapsed()
println(elapsed)
sw.restart()
incl_test := ['description:*see*']
mut foundnr := 0
for i in 0 .. totalnr {
mut p2 := res[i]
e := p2.filter_match(include: incl_test)!
f := p2.filter_match(include: ['arg100'])!
if f {
foundnr += 1
}
}
assert foundnr == 1
elapsed = sw.elapsed()
println(elapsed)
// sw.restart()
mbused := 600.0
bytesused := mbused * 1000 * 1000
bytes_param := bytesused / totalnr
println('bytes used per param: ${bytes_param}')
println('nr of founds: ${foundnr}')
// sleep(Duration(60 * time.minute))
// 600 bytes per params for 1m records
// TODO: not sure needs to be redone
// takes 0.9 sec to walk over 1million records

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.data.resp
import crypto.ed25519
mut b := resp.builder_new()
b.add(resp.r_list_string(['a', 'b']))
b.add(resp.r_int(10))
b.add(resp.r_ok())
// to get some binary
pubkey, privkey := ed25519.generate_key()!
b.add(resp.r_bytestring(privkey))
// b.data now has the info as binary data
// println(b.data)
println(b.data.bytestr())
lr := resp.decode(b.data)!
println(lr)

1
examples/develop/gittools/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
gittools_example

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env -S v -cg -enable-globals run
import os
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.develop.performance
mut silent := false
coderoot := if 'CODEROOT' in os.environ() {
os.environ()['CODEROOT']
} else {os.join_path(os.home_dir(), 'code')}
mut gs := gittools.get()!
if coderoot.len > 0 {
//is a hack for now
gs = gittools.new(coderoot: coderoot)!
}
mypath := gs.do(
recursive: true
cmd: 'list'
)!
timer := performance.new('gittools')
timer.timeline()

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env -S v -cg -enable-globals run
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.osal
import time
mut gs_default := gittools.new()!
println(gs_default)
// // Initializes the Git structure with the coderoot path.
// coderoot := '/tmp/code'
// mut gs_tmo := gittools.new(coderoot: coderoot)!
// // Retrieve the specified repository.
// mut repo := gs_default.get_repo(name: 'herolib')!
// println(repo)

View File

@@ -0,0 +1,104 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.develop.gittools
import freeflowuniverse.herolib.osal
import time
// Creates a new file in the specified repository path and returns its name.
fn create_new_file(repo_path string, runtime i64)! string {
coded_now := time.now().unix()
file_name := 'hello_world_${coded_now}.py'
println('Creating a new ${file_name} file.')
// Create a new file in the repository.
osal.execute_silent("echo \"print('Hello, World!')\" > ${repo_path}/${file_name}")!
return file_name
}
// Resets all configurations and caches if needed.
// gittools.cachereset()!
// Initializes the Git structure with the coderoot path.
coderoot := '~/code'
mut gs_default := gittools.new(coderoot: coderoot)!
// Retrieve the specified repository.
mut repo := gs_default.get_repo(name: 'repo3')!
// In case we need to clone it, will clone the repo2 in a folder named repo3
// mut repo := gs_default.get_repo(name: 'repo3' clone: true, url: 'https://github.com/Mahmoud-Emad/repo2.git')!
runtime := time.now().unix()
branch_name := "branch_${runtime}"
tag_name := "tag_${runtime}"
repo_path := repo.get_path()!
mut file_name := create_new_file(repo_path, runtime)!
// Create a new branch to add our changes on it.
// We can simply checkout to the newly created branch, but we need to test the checkout method functionalty.
println('Creating a new \'${branch_name}\' branch...')
repo.create_branch(branch_name: branch_name, checkout: false) or {
error("Couldn't create branch due to: ${err}")
}
// Checkout to the created branch
println('Checkout to \'${branch_name}\' branch...')
repo.checkout(branch_name: branch_name, pull: false) or {
error("Couldn't checkout to branch ${branch_name} due to: ${err}")
}
// Check for changes and stage them if present.
if repo.has_changes()! {
println('Adding the changes...')
repo.add_changes() or {
error('Cannot add the changes due to: ${err}')
}
}
// Check if a commit is needed and commit changes if necessary.
if repo.need_commit()! {
commit_msg := 'feat: Added ${file_name} file.'
println('Committing the changes, Commit message: ${commit_msg}.')
repo.commit(msg: commit_msg) or {
error('Cannot commit the changes due to: ${err}')
}
}
// Push changes to the remote repository if necessary.
if repo.need_push()! {
println('Pushing the changes...')
repo.push() or {
error('Cannot push the changes due to: ${err}')
}
}
if repo.need_pull()! {
println('Pulling the changes.')
repo.pull() or {
error('Cannot pull the changes due to: ${err}')
}
}
// Checkout to the base branch
repo.checkout(checkout_to_base_branch: true, pull: true) or {
error("Couldn't checkout to branch ${branch_name} due to: ${err}")
}
// Create a new tag and add some changes on it then push it to the remote.
println('Creating a new \'${tag_name}\' tag...')
repo.create_tag(tag_name: tag_name, checkout: false) or {
error("Couldn't create tag due to: ${err}")
}
// Push the created tag.
println('Pushing the tag...')
repo.push(push_tag: true) or {
error('Cannot push the tag due to: ${err}')
}
// Check if the created tag exists.
println('Check if the created tag exists...')
repo.is_tag_exists(tag_name: tag_name) or {
println("Tag isn't exists.")
}

View File

@@ -0,0 +1,7 @@
## Juggler Example
This example demonstrates how juggler is able to trigger DAG's on a remote dagu server, upon webhook triggers from gitea.
To run example:
- configure gitea webhook to call `trigger` endpoint in your locally running juggler server
- run `main.vsh` with the appropriate `repo_path` and `dagu server url`

View File

@@ -0,0 +1 @@
hero run -u https://github.com/freeflowuniverse/herolib/tree/development_juggler/examples/develop/juggler/hero/playbook

View File

@@ -0,0 +1,14 @@
!!juggler.configure
url: 'https://git.ourworld.tf/projectmycelium/itenv'
username: ''
password: ''
port: 8000
!!juggler.start
!!caddy.add_reverse_proxy
from: ':8000'
to: juggler.protocol.me
!!caddy.generate
!!caddy.start

View File

@@ -0,0 +1 @@
hero juggler -u https://git.ourworld.tf/projectmycelium/itenv

Binary file not shown.

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import os
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.develop.juggler
import veb
osal.load_env_file('${os.dir(@FILE)}/.env')!
mut j := juggler.configure(
url: 'https://git.ourworld.tf/projectmycelium/itenv'
username: os.getenv('JUGGLER_USERNAME')
password: os.getenv('JUGGLER_PASSWORD')
reset: true
)!
spawn j.run(8000)
println(j.info())
for{}
// TODO
// - automate caddy install/start
// - create server/caddy which only calls install & can set config file from path or url & restart (see dagu server)
// - get caddy config from the itenv through (simple driver)
// - caddy through startup manager, also for dagu
// - expose dagu UI over caddy & make sure we use secret
// - have heroscript starting from itenv to start a full env (with secret): 'hero juggler -i -s mysecret --dns juggler2.protocol.me '
// - use domain name use https://github.com/Incubaid/dns/blob/main/protocol.me.lua over git ssh

Binary file not shown.

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.sysadmin.startupmanager
import os
mut sm := startupmanager.get()!
sm.start(
name: 'juggler'
cmd: 'hero juggler -secret planetfirst -u https://git.ourworld.tf/projectmycelium/itenv -reset true'
env: {'HOME': os.home_dir()}
restart: true
) or {panic('failed to start sm ${err}')}
// TODO
// - automate caddy install/start
// - create server/caddy which only calls install & can set config file from path or url & restart (see dagu server)
// - get caddy config from the itenv through (simple driver)
// - caddy through startup manager, also for dagu
// - expose dagu UI over caddy & make sure we use secret
// - have heroscript starting from itenv to start a full env (with secret): 'hero juggler -i -s mysecret --dns juggler2.protocol.me '
// - use domain name use https://github.com/Incubaid/dns/blob/main/protocol.me.lua over git ssh

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.develop.luadns
fn main() {
mut lua_dns := luadns.load('https://github.com/Incubaid/dns') or {
eprintln('Failed to parse LuaDNS files: $err')
return
}
lua_dns.set_domain('test.protocol.me', '65.21.132.119') or {
eprintln('Failed to set domain: $err')
return
}
lua_dns.set_domain('example.protocol.me', '65.21.132.119') or {
eprintln('Failed to set domain: $err')
return
}
for config in lua_dns.configs {
println(config)
}
for config in lua_dns.configs {
println(config)
}
}

1
examples/develop/openai/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
openai_example

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.clients.openai as op
mut ai_cli := op.new()!
mut msg := []op.Message{}
msg << op.Message{
role: op.RoleType.user
content: 'Say this is a test!'
}
mut msgs := op.Messages{
messages: msg
}
res := ai_cli.chat_completion(op.ModelType.gpt_3_5_turbo, msgs)!
print(res)
models := ai_cli.list_models()!
model := ai_cli.get_model(models.data[0].id)!
print(model)
images_created := ai_cli.create_image(op.ImageCreateArgs{
prompt: 'Calm weather'
num_images: 2
size: op.ImageSize.size_512_512
format: op.ImageRespType.url
})!
print(images_created)
images_updated := ai_cli.create_edit_image(op.ImageEditArgs{
image_path: '/path/to/image.png'
mask_path: '/path/to/mask.png'
prompt: 'Calm weather'
num_images: 2
size: op.ImageSize.size_512_512
format: op.ImageRespType.url
})!
print(images_updated)
images_variatons := ai_cli.create_variation_image(op.ImageVariationArgs{
image_path: '/path/to/image.png'
num_images: 2
size: op.ImageSize.size_512_512
format: op.ImageRespType.url
})!
print(images_variatons)
transcription := ai_cli.create_transcription(op.AudioArgs{
filepath: '/path/to/audio'
})!
print(transcription)
translation := ai_cli.create_tranlation(op.AudioArgs{
filepath: '/path/to/audio'
})!
print(translation)
file_upload := ai_cli.upload_file(filepath: '/path/to/file.jsonl', purpose: 'fine-tune')
print(file_upload)
files := ai_cli.list_filess()!
print(files)
resp := ai_cli.create_fine_tune(training_file: file.id, model: 'curie')!
print(resp)
fine_tunes := ai_cli.list_fine_tunes()!
print(fine_tunes)
fine_tune := ai_cli.get_fine_tune(fine_tunes.data[0].id)!
print(fine_tune)
moderations := ai_cli.create_moderation('Something violent', op.ModerationModel.text_moderation_latest)!
print(moderations)
embeddings := ai_cli.create_embeddings(
input: ['sample embedding input']
model: op.EmbeddingModel.text_embedding_ada
)!
print(embeddings)

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run #!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.crystallib.hero.bootstrap import freeflowuniverse.herolib.hero.bootstrap
mut al:=bootstrap.new_alpine_loader() mut al:=bootstrap.new_alpine_loader()

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env -S v -w -n -enable-globals run #!/usr/bin/env -S v -w -n -enable-globals run
import freeflowuniverse.crystallib.hero.generation import freeflowuniverse.herolib.hero.generation
generation.generate_actor( generation.generate_actor(
name: 'Example' name: 'Example'

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env -S v -w -n -enable-globals run #!/usr/bin/env -S v -w -n -enable-globals run
import freeflowuniverse.crystallib.hero.generation import freeflowuniverse.herolib.hero.generation
generation.generate_actor( generation.generate_actor(
name: 'Example' name: 'Example'

View File

@@ -1,8 +1,8 @@
module example_actor module example_actor
import os import os
import freeflowuniverse.crystallib.hero.baobab.actor {IActor, RunParams} import freeflowuniverse.herolib.hero.baobab.actor {IActor, RunParams}
import freeflowuniverse.crystallib.web.openapi import freeflowuniverse.herolib.web.openapi
import time import time
const openapi_spec_path = '${os.dir(@FILE)}/specs/openapi.json' const openapi_spec_path = '${os.dir(@FILE)}/specs/openapi.json'

View File

@@ -6,9 +6,9 @@ import veb
import json import json
import x.json2 import x.json2
import net.http import net.http
import freeflowuniverse.crystallib.web.openapi {Server, Context, Request, Response} import freeflowuniverse.herolib.web.openapi {Server, Context, Request, Response}
import freeflowuniverse.crystallib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams} import freeflowuniverse.herolib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams}
import freeflowuniverse.crystallib.clients.redisclient import freeflowuniverse.herolib.clients.redisclient
@[heap] @[heap]
struct Actor { struct Actor {

View File

@@ -6,10 +6,10 @@ import veb
import json import json
import x.json2 {Any} import x.json2 {Any}
import net.http import net.http
import freeflowuniverse.crystallib.data.jsonschema {Schema} import freeflowuniverse.herolib.data.jsonschema {Schema}
import freeflowuniverse.crystallib.web.openapi {Server, Context, Request, Response} import freeflowuniverse.herolib.web.openapi {Server, Context, Request, Response}
import freeflowuniverse.crystallib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams} import freeflowuniverse.herolib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams}
import freeflowuniverse.crystallib.clients.redisclient import freeflowuniverse.herolib.clients.redisclient
const spec_path = '${os.dir(@FILE)}/data/openapi.json' const spec_path = '${os.dir(@FILE)}/data/openapi.json'
const spec_json = os.read_file(spec_path) or { panic(err) } const spec_json = os.read_file(spec_path) or { panic(err) }

5
examples/installers/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
caddy
gitea
installers
postgresql
mycelium

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.sysadmintools.actrunner
import freeflowuniverse.herolib.installers.virt.herocontainers
actrunner.install()!
//herocontainers.start()!

View File

@@ -0,0 +1,6 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.fediverse.conduit
conduit.install()!

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer
coredns_installer.install()!

16
examples/installers/dagu.vsh Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env -S v -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
// #!/usr/bin/env -S v -n -cg -w -enable-globals run
import freeflowuniverse.herolib.installers.sysadmintools.daguserver
import freeflowuniverse.herolib.installers.infra.zinit
//make sure zinit is there and running, will restart it if needed
mut z:=zinit.get()!
z.destroy()!
z.start()!
// mut ds := daguserver.get()!
// ds.destroy()!
// ds.start()!
// println(ds)

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.sysadmintools.daguserver
//will call the installer underneith
mut dserver:=daguserver.new()!
dserver.install()!
dserver.restart()!
println("DAGU installed & running")
mut dagucl:=dserver.client()!
// name string // The name of the DAG, which is optional. The default name is the name of the file.
// description ?string // A brief description of the DAG.
// tags ?string // Free tags that can be used to categorize DAGs, separated by commas.
// env ?map[string]string // Environment variables that can be accessed by the DAG and its steps.
// restart_wait_sec ?int // The number of seconds to wait after the DAG process stops before restarting it.
// hist_retention_days ?int // The number of days to retain execution history (not for log files).
// delay_sec ?int // The interval time in seconds between steps.
// max_active_runs ?int // The maximum number of parallel running steps.
// max_cleanup_time_sec ?int // The maximum time to wait after sending a TERM signal to running steps before killing them.
mut mydag:=dagucl.dag_new(
nameswhere:"test11"
)
// nr int @[required]
// name string // The name of the step.
// description string // A brief description of the step.
// dir string // The working directory for the step.
// command string // The command and parameters to execute.
// stdout string // The file to which the standard output is written.
// output ?string // The variable to which the result is written.
// script ?string // The script to execute.
// signal_on_stop ?string // The signal name (e.g., SIGINT) to be sent when the process is stopped.
// continue_on_error bool
// depends string
// retry_nr int = 3
// retry_interval int = 5
mydag.step_add(
script : "ls /tmp"
retry_interval:1
retry_nr:3
)!
mydag.step_add(
script : "ls /root"
retry_interval:1
retry_nr:3
)!
dagresult:=dagucl.dag_register(mydag,start:true)!
println(dagresult)
println("DAGU should have new steps")

13
examples/installers/gitea.vsh Executable file
View File

@@ -0,0 +1,13 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.gitea
mut g := gitea.new(
passwd: '123'
postgresql_path: '/tmp/db'
postgresql_reset: true
domain: 'git.meet.tf'
appname: 'ourworld'
)!
// postgresql will be same passwd
g.restart()!

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.threefold.griddriver
mut griddriver_installer := griddriver.get()!
griddriver_installer.install()!

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.lang.vlang
import freeflowuniverse.herolib.installers.sysadmintools.daguserver
import freeflowuniverse.herolib.installers.sysadmintools.b2 as b2_installer
vlang.v_analyzer_install()!
daguserver.new()! //will install & start a daguserver

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env -S v -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.installers.lang.golang
import freeflowuniverse.herolib.installers.virt.podman as podman_installer
import freeflowuniverse.herolib.installers.virt.buildah as buildah_installer
mut podman_installer0:= podman_installer.get()!
mut buildah_installer0:= buildah_installer.get()!
//podman_installer0.destroy()! //will remove all
podman_installer0.install()!
buildah_installer0.install()!

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.data.dbfs
import freeflowuniverse.herolib.installers.lang.vlang
import freeflowuniverse.herolib.installers.db.redis as redis_installer
import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer
import freeflowuniverse.herolib.installers.sysadmintools.daguserver as dagu_installer
import freeflowuniverse.herolib.installers.sysadmintools.b2 as b2_installer
import freeflowuniverse.herolib.installers.net.mycelium as mycelium_installer
import freeflowuniverse.herolib.osal.screen
// import freeflowuniverse.herolib.osal
// redis_installer.new()!
// dagu_installer.install(passwd:"1234",secret:"1234",restart:true)!
// coredns_installer.install()!
mycelium_installer.install()!
// mycelium_installer.restart()!
// mut screens:=screen.new()!
// println(screens)
// dagu_installer.check(secret:"1234")!
vlang.v_analyzer_install()!
// b2_installer.install()!

View File

@@ -0,0 +1,4 @@
#!/usr/bin/env -S v -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.net.mycelium as mycelium_installer
mycelium_installer.start()!

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -cg -d use_openssl -enable-globals run
import time
import freeflowuniverse.herolib.installers.db.postgresql
mut db:= postgresql.get()!
// db.destroy()!
db.start()!
// db.db_create('my_new_db')!
// db.stop()!
// db.start()!

10
examples/installers/youki.vsh Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.virt.youki
mut youki_installer:= youki.get()!
youki_installer.install()!

1
examples/lang/python/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
pythonexample

View File

@@ -0,0 +1,22 @@
import json
for counter in range(1, @nrcount): # Loop from 1 to 10
print(f"done_{counter}")
# Define a simple Python structure (e.g., a dictionary)
example_struct = {
"name": "John Doe",
"age": @nrcount,
"is_member": True,
"skills": ["Python", "Data Analysis", "Machine Learning"]
}
# Convert the structure to a JSON string
json_string = json.dumps(example_struct, indent=4)
# Print the JSON string
print("==RESULT==")
print(json_string)

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.lang.python
import json
pub struct Person {
name string
age int
is_member bool
skills []string
}
mut py:=python.new(name:'test')! //a python env with name test
//py.update()!
py.pip("ipython")!
nrcount:=5
cmd:=$tmpl("pythonexample.py")
mut res:=""
for i in 0..5{
println(i)
res=py.exec(cmd:cmd)!
}
//res:=py.exec(cmd:cmd)!
person:=json.decode(Person,res)!
println(person)

1
examples/osal/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
lima_example

View File

@@ -0,0 +1,14 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal { download }
mut p := download(
url: 'https://cdnjs.cloudflare.com/ajax/libs/echarts/5.4.3/@name'
name: 'echarts.min.js'
reset: false
dest: '/tmp/@name'
minsize_kb: 1000
maxsize_kb: 5000
)!
println(p)

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal { ping }
assert ping(address: '338.8.8.8')! == .unknownhost
assert ping(address: '8.8.8.8')! == .ok
assert ping(address: '18.8.8.8')! == .timeout

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.builder
// name string @[required]
// address string @[required]
// remote_port int @[required]
builder.portforward_to_local(name:"holo1",address:"[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]",remote_port:45579)!
builder.portforward_to_local(name:"holo2",address:"[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]",remote_port:34639)!
builder.portforward_to_local(name:"holoui",address:"[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]",remote_port:8282)!

View File

@@ -0,0 +1,20 @@
module main
import freeflowuniverse.herolib.osal
fn do() ? {
if osal.is_osx() {
println('IS OSX')
}
mut job2 := osal.exec(cmd: 'ls /')?
println(job2)
// wont die, the result can be found in /tmp/execscripts
mut job := osal.exec(cmd: 'ls dsds', die: false)?
println(job)
}
fn main() {
do() or { panic(err) }
}

View File

@@ -0,0 +1,64 @@
module main
import os
import time
fn main() {
do1() or { panic(err) }
}
fn do1() ! {
mut p := os.new_process("/bin/bash")
p.set_work_folder("/tmp")
p.set_redirect_stdio()
p.use_stdio_ctl = true
p.use_pgroup = true
// p.set_args(['-i','-q'])
p.run()
// p.set_args("")
// time.sleep(100 * time.millisecond)
println( "alive: ${p.is_alive()}")
assert p.is_alive()
defer {
p.wait()
p.close()
}
// for {
// println(1)
// println(p.stdout_slurp())
// println(2)
// println(p.stderr_slurp())
// println(3)
// }
mut counter:=0
for {
counter+=1
println(counter)
out:=p.pipe_read(.stdout) or {""}
if out.len>0{
println("o")
println(out)
}
err:=p.pipe_read(.stderr) or {""}
if err.len>0{
println("e")
println(err)
}
time.sleep(100 * time.millisecond)
if counter==2{
p.stdin_write("echo '111'\n")
// os.fd_close(p.stdio_fd[0])
}
if counter==20{
p.stdin_write("echo '2222'\n")
// os.fd_close(p.stdio_fd[0])
}
}
}

View File

@@ -0,0 +1,131 @@
module main
import os
import time
fn main() {
do1() or { panic(err) }
}
fn do1() ! {
mut p := os.new_process("/opt/homebrew/bin/python3")
p.set_work_folder("/tmp")
p.set_redirect_stdio()
p.use_stdio_ctl = true
p.use_pgroup = true
p.set_args(['-i','-q'])
p.run()
// p.set_args("")
// time.sleep(100 * time.millisecond)
println( "alive: ${p.is_alive()}")
assert p.is_alive()
defer {
p.wait()
p.close()
}
// for {
// println(1)
// println(p.stdout_slurp())
// println(2)
// println(p.stderr_slurp())
// println(3)
// }
mut counter:=0
for {
counter+=1
println(counter)
out:=p.pipe_read(.stdout) or {""}
if out.len>0{
println("o")
println(out)
}
err:=p.pipe_read(.stderr) or {""}
if err.len>0{
println("e")
println(err)
}
time.sleep(100 * time.millisecond)
if counter==2{
p.stdin_write("print('something')\n\n\n")
// os.fd_close(p.stdio_fd[0])
}
if counter==20{
p.stdin_write("print('something else')\n\n\n")
// os.fd_close(p.stdio_fd[0])
}
}
}
fn do2() ! {
mut p := os.new_process("/opt/homebrew/bin/python3")
p.set_work_folder("/tmp")
p.set_redirect_stdio()
p.use_stdio_ctl = true
p.use_pgroup = true
p.run()
// p.set_args("")
// time.sleep(100 * time.millisecond)
println( "alive: ${p.is_alive()}")
assert p.is_alive()
defer {
p.wait()
p.close()
}
for {
fdi:=p.stdio_fd[0]
fdo:=p.stdio_fd[1]
fde:=p.stdio_fd[2]
println(1)
if os.fd_is_pending(fdo){
println(1.1)
println(os.fd_slurp(fdo))
}
println(2)
if os.fd_is_pending(fde){
println(2.1)
println(os.fd_slurp(fde))
}
println(3)
time.sleep(100 * time.millisecond)
}
mut counter:=0
for {
counter+=1
println(counter)
out:=p.pipe_read(.stdout) or {""}
if out.len>0{
println("o")
println(out)
}
err:=p.pipe_read(.stderr) or {""}
if err.len>0{
println("e")
println(err)
}
time.sleep(100 * time.millisecond)
if counter==2{
p.stdin_write("print('something')\n\n\n")
os.fd_close(p.stdio_fd[0])
}
if counter==20{
p.stdin_write("print('something else')\n\n\n")
os.fd_close(p.stdio_fd[0])
}
}
}

View File

@@ -0,0 +1,12 @@
module main
import freeflowuniverse.herolib.osal
fn do() ? {
mut pm := process.processmap_get()?
println(pm)
}
fn main() {
do() or { panic(err) }
}

View File

@@ -0,0 +1,50 @@
module main
import freeflowuniverse.herolib.osal
import freeflowuniverse.herolib.builder
import os
const myexamplepath = os.dir(@FILE) + '/../..'
fn do1() ! {
tstdir := '/tmp/testsync'
// source string
// dest string
// delete bool //do we want to delete the destination
// ipaddr_src string //e.g. root@192.168.5.5:33 (can be without root@ or :port)
// ipaddr_dst string
// ignore []string //arguments to ignore e.g. ['*.pyc','*.bak']
// ignore_default bool //if set will ignore a common set
// stdout bool
osal.rsync(source: myexamplepath, dest: tstdir, delete: true)!
cmd := osal.rsync_cmd(source: myexamplepath, dest: tstdir)!
println(cmd)
//"rsync -avz --no-perms --exclude='*.pyc' --exclude='*.bak' --exclude='*dSYM' /Users/despiegk1/code/github/freeflowuniverse/herolib/examples /tmp/testsync"
}
fn do2() ! {
mut b := builder.new()!
mut n := b.node_new(ipaddr: 'root@195.192.213.2')!
tstdir := '/tmp/testsync'
n.exec('mkdir -p ${tstdir}')!
ipaddr := 'root@195.192.213.2'
osal.rsync(source: myexamplepath, ipaddr_dst: ipaddr, dest: tstdir, delete: true)!
cmd := osal.rsync_cmd(source: myexamplepath, dest: tstdir)!
println(cmd)
}
fn do3() ! {
ipaddr := 'root@195.192.213.2:22'
tstdir := '/tmp/testsync'
osal.rsync(ipaddr_src: ipaddr, source: tstdir, dest: tstdir, delete: true)!
cmd := osal.rsync_cmd(source: tstdir, dest: tstdir)!
println(cmd)
}
fn main() {
do1() or { panic(err) }
do2() or { panic(err) }
do3() or { panic(err) }
}

View File

@@ -0,0 +1 @@
sandbox_example

View File

@@ -0,0 +1,26 @@
module main
import freeflowuniverse.herolib.osal.sandbox
import os
fn do() ! {
sandbox.install()! // will also do an upgrade of the OS
mut f := sandbox.new(path_images: '/var/sandbox/images')!
// get 2 bootstraps to work from
f.debootstrap(imagename: 'debian', reset: false)! // if reset then will download again
f.debootstrap(
imagename: 'ubuntu22'
repository: 'http://de.archive.ubuntu.com/ubuntu'
release: 'jammy'
reset: false
)!
// mut c := f.container_new(startcmd: ["ls", "/", "/proc"])!
// c.start()!
}
fn main() {
do() or { panic(err) }
}

1
examples/osal/sshagent/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
sshagent_example

View File

@@ -0,0 +1,28 @@
module main
import freeflowuniverse.herolib.osal.sshagent
fn do1() ! {
mut agent:=sshagent.new()!
println(agent)
k:=agent.get(name:"kds") or {panic("notgound")}
println(k)
mut k2:=agent.get(name:"books") or {panic("notgound")}
k2.load()!
println(k2.agent)
println(agent)
k2.forget()!
println(k2.agent)
// println(agent)
}
fn main() {
do1() or { panic(err) }
}

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.infra.zinit as zinitinstaller
import freeflowuniverse.herolib.sysadmin.startupmanager
mut z:=zinitinstaller.get()!
z.destroy()!
z.install()!
println("zinit installed")
cmd:= '/usr/local/bin/zinit init'
name:= 'zinit'
mut sm := startupmanager.get()!
println(sm.list()!)
sm.new(
name: name
cmd: cmd
start:false
)!
println(sm.list()!)
assert sm.exists(name)!
sm.delete(name)!

14
examples/osal/systemd.vsh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env -S v -gc none -cg -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal.systemd
mut systemdfactory := systemd.new()!
// mut systemdprocess := systemdfactory.new(
// cmd: '/usr/local/bin/zinit init'
// name: 'zinit'
// description: 'a super easy to use startup manager.'
// )!
l:=systemd.process_list()!
println(l)
systemdfactory.destroy("zinit")!

41
examples/osal/ufw.vsh Executable file
View File

@@ -0,0 +1,41 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal.ufw
ufw.enable()!
println(ufw.ufw_status()!)
mut ruleset := ufw.new()
// Allow HTTP traffic from a specific IPv4 address
ruleset.allow(
port: 80
from: '192.168.1.100'
)
// Allow HTTPS traffic from any IPv6 address
ruleset.allow(
port: 443
ipv6: true
)
// Deny SMTP traffic from a specific IPv4 subnet
ruleset.deny(
port: 25
from: '10.0.0.0/24'
)
// Deny FTP traffic from a specific IPv6 address
ruleset.deny(
port: 21
from: '2001:db8::1'
udp: true
tcp: false
ipv6: true
)
// Apply the ruleset
ufw.apply(ruleset) or { panic('Error applying ruleset: ${err}') }
ufw.reset()!
ufw.enable()!

40
examples/osal/ufw_play.vsh Executable file
View File

@@ -0,0 +1,40 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.osal.ufw
import freeflowuniverse.herolib.core.playbook
heroscript := "
!!ufw.configure
active: true
ssh: true
reset: true
!!ufw.add_rule
allow: true
port: 80
from: 'any'
tcp: true
udp: false
ipv6: false
!!ufw.add_rule
allow: false
port: 443
from: '192.168.1.0/24'
tcp: true
udp: false
ipv6: false
!!ufw.add_rule
allow: true
port: 53
from: 'any'
tcp: true
udp: true
ipv6: true
"
mut plbook := playbook.new(text: heroscript)!
rs:=ufw.play(mut plbook)!
println(rs)

View File

@@ -0,0 +1 @@
exec: "sleep 1m"

View File

@@ -0,0 +1,3 @@
exec: "sleep 1m"
after:
- service_1

View File

@@ -0,0 +1,68 @@
module main
import os
import time
import freeflowuniverse.herolib.osal.zinit
fn main() {
do() or { panic(err) }
}
fn do() ! {
start_zinit()!
client := zinit.new_rpc_client('herolib/osal/zinit/zinit/zinit.sock')
list_services(client)!
get_service_status(client, 'service_2')!
stop_service(client, 'service_2')!
forget_service(client, 'service_2')!
monitor_service(client, 'service_2')!
stop_service(client, 'service_2')!
start_service(client, 'service_2')!
kill_service(client, 'service_1', 'sigterm')!
}
fn start_zinit() ! {
spawn os.execute('zinit -s examples/osal/zinit/zinit.sock init -c examples/osal/zinit')
time.sleep(time.second)
}
fn list_services(client zinit.Client) ! {
mut ls := client.list()!
println('services watched by zinit: ${ls}\n\n')
}
fn get_service_status(client zinit.Client, service_name string) ! {
time.sleep(time.millisecond * 100)
mut st := client.status(service_name)!
println('${service_name} status: ${st}\n\n')
}
fn stop_service(client zinit.Client, service_name string) ! {
println('Stopping ${service_name}...')
client.stop(service_name)!
get_service_status(client, service_name)!
}
fn forget_service(client zinit.Client, service_name string) ! {
println('Forgetting ${service_name}...')
client.forget(service_name)!
list_services(client)!
}
fn monitor_service(client zinit.Client, service_name string) ! {
println('Monitoring service ${service_name}...')
client.monitor(service_name)!
get_service_status(client, service_name)!
}
fn start_service(client zinit.Client, service_name string) ! {
println('Starting service ${service_name}...')
client.start(service_name)!
get_service_status(client, service_name)!
}
fn kill_service(client zinit.Client, service_name string, sig string) ! {
println('Killing service ${service_name}...')
client.kill(service_name, sig)!
get_service_status(client, service_name)!
}

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import os
import time
import freeflowuniverse.herolib.osal.zinit
zinit.destroy()!
mut z := zinit.new()!
// name string [required]
// cmd string [required]
// cmd_file bool //if we wanna force to run it as a file which is given to bash -c (not just a cmd in zinit)
// test string
// test_file bool
// after []string
// env map[string]string
// oneshot bool
p := z.process_new(
name: 'test'
cmd: '/bin/bash'
)!
println(p)

23
examples/sync_do.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
# SSH and rsync configuration
SSH_HOST="verse.tf"
SSH_USER="root"
SOURCE_DIR="${HOME}/code/github/freeflowuniverse/herolib/"
DEST_DIR="/root/code/github/freeflowuniverse/herolib/"
FINAL_DIR="/root/code/github/freeflowuniverse/herolib/examples/hero"
# Check if the source directory exists, if not stop
if [ ! -d "$SOURCE_DIR" ]; then
echo "Source directory $SOURCE_DIR does not exist. Exiting."
exit 1
fi
# Perform rsync over SSH, ignoring .git directory
#--exclude '.git' --exclude '.venv'
rsync -avz --delete -e ssh "$SOURCE_DIR/" "$SSH_USER@$SSH_HOST:$DEST_DIR/"
set -x
# SSH into the remote machine and change to the specified directory
ssh -At root@verse.tf "tmux attach-session -t main || tmux new-session -s main -c ${FINAL_DIR}"

1
examples/threefold/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
holochain_deployer

View File

@@ -0,0 +1,90 @@
# Installing `griddriver`
To be able to run examples you need to install updated version of `griddriver`.
## Install from crytallib installer
Create some `griddriver_install.vsh` file containing following code:
```vlang
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.tfgrid.griddriver as griddriverinstaller
mut reset:=true
griddriverinstaller.install(reset:reset)!
```
Make script executable and run it
```sh
chmod +x ./griddriver_install.vsh
./griddriver_install.vsh
```
## Install from repo
Checkout the `griddriver` main branch
https://github.com/threefoldtech/web3gw/tree/development_integration
Inside the web3gw directory, run:
```sh
cd griddriver
./build.sh
```
# Run examples
These example scripts demonstrate various functionalities and interactions with
the TFGrid using the Hero programming language. They provide a starting point
for developers to understand and build upon when working with the TFGrid API and
deploying resources on the grid.
## Utils
- `billing_hourly.vsh`: calculate the hourly billing for a specific contract
ID.
- `cancel_contract.vsh`: cancel a specific contract on the TFGrid.
- `cancel_contracts.vsh`: cancel multiple contracts on the TFGrid.
- `deploy_vm_high_level.vsh`: deploy a virtual machine (VM) on the TFGrid
using a high-level approach.
- `get_contracts.vsh`: retrieve a list of all active contracts associated with
the configured identity on the TFGrid.
- `list_gateways.vsh`: list all available gateways on the TFGrid.
- `tfgrid_config.vsh`: configure the connection settings for interacting with
the TFGrid.
- `zos_version.vsh`: check the version of the Zero-OS (ZOS) running on a
specific node.
## Tests
- `create_update_deployments.vsh`: create a deployment with various workloads
(network, disk, public IP, VM, logs, ZDB) and a gateway name proxy, deploy
it to a node, and update the deployment with the gateway name workload.
- `deploy_gw_fqdn.vsh`: deploy a gateway workload using a Fully Qualified
Domain Name (FQDN).
- `deploy_gw_name.vsh`: deploy a gateway workload using a name contract. It
creates a GatewayNameProxy workload, reserves the name on the grid using a
name contract, and deploys it to a specific node.
- `deploy_vm.vsh`: deploy a network (Znet) and a virtual machine (Zmachine).
- `deploy_zdb.vsh`: deploy a ZDB (Zero-DB) workload on a specific node.
- `holochain_vm.vsh`: set up a Holochain development environment on the
ThreeFold Grid without manual configuration. The script is related to
Holochain because it specifically deploys a Holochain development
environment on the ThreeFold Grid. The Flist URL used in the virtual machine
workload points to a pre-built Holochain development environment image.
Usage:
```sh
./holochain_vm.vsh --mnemonic "your_mnemonic_phrase" --ssh_key "your_public_ssh_key" [--network main|test|qa|dev] [--code_server_pass "your_password"] [--cpu 4] [--ram 8] [--disk 30] [--public_ip]
```
- `vm_with_gw_name.vsh`: deploy a VM workload along with a gateway using a
name contract. It finds a node matching the VM capacity requirements,
creates a network, a VM, and a gateway workload pointing to the VM. It then
deploys the VM and gateway workloads to their respective nodes. Usage:
```sh
./vm_with_gw_name.vsh --mnemonic "your_mnemonic_phrase" --ssh_key "your_public_ssh_key" [--network main|test|qa|dev] [--cpu 4] [--ram 4] [--disk 5] [--public_ip]
```

View File

@@ -0,0 +1,150 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import json
import log
const pubkey = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP'
fn test_create_and_update_deployment() ! {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics()!
mut deployer := tfgrid.new_deployer(mnemonics, .dev, mut logger)!
node_privkey := deployer.client.generate_wg_priv_key()!
user_privkey := deployer.client.generate_wg_priv_key()!
twin_id := deployer.client.get_user_twin()!
println('your wireguard privatekey is ${user_privkey[0]}')
mut network := models.Znet{
ip_range: '10.1.0.0/16'
subnet: '10.1.1.0/24'
wireguard_private_key: node_privkey[0] // node private key
wireguard_listen_port: 3012
peers: [
models.Peer{
subnet: '10.1.2.0/24'
wireguard_public_key: user_privkey[1] // user public key
allowed_ips: ['10.1.2.0/24', '100.64.1.2/32']
},
]
}
mut znet_workload := models.Workload{
version: 0
name: 'networkaa'
type_: models.workload_types.network
data: json.encode_pretty(network)
description: 'test network2'
}
disk_name := 'mydisk'
zmount := models.Zmount{
size: 2 * 1024 * 1024 * 1024
}
zmount_workload := zmount.to_workload(name: disk_name)
mount := models.Mount{
name: disk_name
mountpoint: '/disk1'
}
public_ip_name := 'mypubip'
ip := models.PublicIP{
v4: true
}
ip_workload := ip.to_workload(name: public_ip_name)
zmachine := models.Zmachine{
flist: 'https://hub.grid.tf/tf-official-apps/base:latest.flist'
entrypoint: '/sbin/zinit init'
network: models.ZmachineNetwork{
public_ip: public_ip_name
interfaces: [
models.ZNetworkInterface{
network: 'networkaa'
ip: '10.1.1.3'
},
]
planetary: true
}
compute_capacity: models.ComputeCapacity{
cpu: 1
memory: i64(1024) * 1024 * 1024 * 2
}
env: {
'SSH_KEY': pubkey
}
mounts: [mount]
}
mut zmachine_workload := models.Workload{
version: 0
name: 'vm2'
type_: models.workload_types.zmachine
data: json.encode(zmachine)
description: 'zmachine test'
}
zlogs := models.ZLogs{
zmachine: 'vm2'
output: 'wss://example_ip.com:9000'
}
zlogs_workload := zlogs.to_workload(name: 'myzlogswl')
zdb := models.Zdb{
size: 2 * 1024 * 1024
mode: 'seq'
}
zdb_workload := zdb.to_workload(name: 'myzdb')
mut deployment := models.Deployment{
version: 0
twin_id: twin_id
description: 'zm kjasdf1nafvbeaf1234t21'
workloads: [znet_workload, zmount_workload, zmachine_workload, zlogs_workload, zdb_workload,
ip_workload]
signature_requirement: models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: twin_id
weight: 1
},
]
}
}
deployment.add_metadata('myproject', 'hamada')
node_id := u32(14)
solution_provider := u64(0)
contract_id := deployer.deploy(node_id, mut deployment, deployment.metadata, solution_provider)!
deployer.logger.info('created contract id ${contract_id}')
res_deployment := deployer.get_deployment(contract_id, node_id)!
mut zmachine_planetary_ip := ''
for wl in res_deployment.workloads {
if wl.name == zmachine_workload.name {
res := json.decode(models.ZmachineResult, wl.result.data)!
zmachine_planetary_ip = res.planetary_ip
break
}
}
gw_name := models.GatewayNameProxy{
name: 'mygwname1'
backends: ['http://[${zmachine_planetary_ip}]:9000']
}
gw_name_wl := gw_name.to_workload(name: 'mygwname1')
name_contract_id := deployer.client.create_name_contract('mygwname1')!
deployer.logger.info('name contract id: ${name_contract_id}')
deployment.workloads << gw_name_wl
deployer.update_deployment(node_id, mut deployment, deployment.metadata)!
}
fn main() {
test_create_and_update_deployment() or { println('error happened: ${err}') }
}

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
import freeflowuniverse.herolib.threefold.grid.models
import log
fn main() {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics() or {
logger.error(err.str())
exit(1)
}
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
gw := models.GatewayFQDNProxy{
tls_passthrough: false
backends: ['http://1.1.1.1:9000']
fqdn: 'domaind.gridtesting.xyz'
}
wl := gw.to_workload(name: 'mywlname')
node_id := u32(14)
logger.info('trying to get node ${node_id} public configuration')
deployer.get_node_pub_config(node_id) or {
logger.error('please select another node: ${err}')
exit(1)
}
logger.info('preparing the deployment..')
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
workloads: [wl]
signature_requirement: signature_requirement
)
node_contract_id := deployer.deploy(node_id, mut deployment, '', 0) or {
logger.error(err.str())
exit(1)
}
logger.info('node contract created with id ${node_contract_id}')
}

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
import freeflowuniverse.herolib.threefold.grid.models
import log
fn main() {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics() or {
logger.error(err.str())
exit(1)
}
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
gw := models.GatewayNameProxy{
tls_passthrough: false
backends: ['http://1.1.1.1']
name: 'hamada_gw'
}
wl := gw.to_workload(name: 'hamada_gw')
name_contract_id := deployer.client.create_name_contract(wl.name)!
logger.info('name contract ${wl.name} created with id ${name_contract_id}')
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
workloads: [wl]
signature_requirement: signature_requirement
)
node_id := u32(14)
node_contract_id := deployer.deploy(node_id, mut deployment, '', 0)!
logger.info('node contract created with id ${node_contract_id}')
}

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import json
import log
import os
fn main() {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := os.getenv('TFGRID_MNEMONIC')
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
node_id := u32(14)
network_name := 'network1'
wg_port := deployer.assign_wg_port(node_id)!
mut network := models.Znet{
ip_range: '10.1.0.0/16'
subnet: '10.1.1.0/24'
wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE='
wireguard_listen_port: wg_port
peers: [
models.Peer{
subnet: '10.1.2.0/24'
wireguard_public_key: '4KTvZS2KPWYfMr+GbiUUly0ANVg8jBC7xP9Bl79Z8zM='
allowed_ips: ['10.1.2.0/24', '100.64.1.2/32']
},
]
}
mut znet_workload := network.to_workload(name: network_name, description: 'test_network1')
zmachine := models.Zmachine{
flist: 'https://hub.grid.tf/tf-official-apps/threefoldtech-ubuntu-22.04.flist'
network: models.ZmachineNetwork{
public_ip: ''
interfaces: [
models.ZNetworkInterface{
network: network_name
ip: '10.1.1.3'
},
]
planetary: true
}
entrypoint: '/sbin/zinit init'
compute_capacity: models.ComputeCapacity{
cpu: 1
memory: i64(1024) * 1024 * 1024 * 2
}
env: {
'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP'
}
}
mut zmachine_workload := zmachine.to_workload(name: 'vm2', description: 'zmachine_test')
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
description: 'test deployment'
workloads: [znet_workload, zmachine_workload]
signature_requirement: signature_requirement
)
deployment.add_metadata('vm', 'SimpleVM')
contract_id := deployer.deploy(node_id, mut deployment, deployment.metadata, 0) or {
logger.error('failed to deploy deployment: ${err}')
exit(1)
}
logger.info('deployment contract id: ${contract_id}')
dl := deployer.get_deployment(contract_id, node_id) or {
logger.error('failed to get deployment data: ${err}')
exit(1)
}
machine_res := get_machine_result(dl)!
logger.info('zmachine result: ${machine_res}')
}
fn get_machine_result(dl models.Deployment) !models.ZmachineResult {
for _, w in dl.workloads {
if w.type_ == models.workload_types.zmachine {
res := json.decode(models.ZmachineResult, w.result.data)!
return res
}
}
return error('failed to get zmachine workload')
}

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import log
import os
fn test_deploy_vm_hight_level(node_id u32) ! {
mnemonics := tfgrid.get_mnemonics()!
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut logger := &log.Log{}
logger.set_level(.debug)
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
vm := models.VM{
name: 'vm1'
env_vars: {
'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP'
}
}
res := deployer.client.deploy_single_vm(node_id, 'myproject', vm, deployer.env)!
deployer.logger.info('${res}')
}
fn main() {
test_deploy_vm_hight_level(u32(14)) or { println('error happened: ${err}') }
}

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import log
fn main() {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics() or {
logger.error(err.str())
exit(1)
}
chain_network := tfgrid.ChainNetwork.dev // Use your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
zdb := models.Zdb{
size: u64(2) * 1024 * 1024
mode: 'user'
password: 'pass'
}
wl := zdb.to_workload(name: 'mywlname')
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
workloads: [wl]
signature_requirement: signature_requirement
)
node_id := u32(14)
node_contract_id := deployer.deploy(node_id, mut deployment, '', 0)!
logger.info('node contract created with id ${node_contract_id}')
}

View File

@@ -0,0 +1,185 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import freeflowuniverse.herolib.threefold.gridproxy
import flag
import rand
import json
import log
import os
fn main() {
mut fp := flag.new_flag_parser(os.args)
fp.application('Holochain dev tool')
fp.version('v0.0.1')
fp.skip_executable()
mnemonics := fp.string_opt('mnemonic', `m`, 'Your Mnemonic phrase')!
chain_network := fp.string('network', `n`, 'main', 'Your desired chain network (main, test, qa, dev). Defaults to main')
ssh_key := fp.string_opt('ssh_key', `s`, 'Your public ssh key')!
code_server_pass := fp.string('code_server_pass', `p`, 'password', 'Machine code server password. This will be set as a password for the code server on the deployed machine. Defaults to password')
cpu := fp.int('cpu', `c`, 4, 'Machine CPU provisioning. Defaults to 4')
memory := fp.int('ram', `r`, 8, 'Machine memory provisioning in GB. Defaults to 8')
disk := fp.int('disk', `d`, 30, 'Machine Disk space provisioning in GB. Defaults to 30')
public_ip := fp.bool('public_ip', `i`, false, 'True to allow public ip v4')
mut logger := &log.Log{}
logger.set_level(.debug)
chain_net_enum := get_chain_network(chain_network)!
mut deployer := tfgrid.new_deployer(mnemonics, chain_net_enum, mut logger)!
mut workloads := []models.Workload{}
node_id := get_node_id(chain_net_enum, memory, disk, cpu, public_ip)!
// node_id := u32(150)
logger.info('deploying on node: ${node_id}')
network_name := 'net_${rand.string(5).to_lower()}' // autocreate a network
wg_port := deployer.assign_wg_port(node_id)!
mut network := models.Znet{
ip_range: '10.1.0.0/16' // auto-assign
subnet: '10.1.1.0/24' // auto-assign
wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE=' // autocreate
wireguard_listen_port: wg_port
mycelium: models.Mycelium{
hex_key: rand.string(32).bytes().hex()
}
}
workloads << network.to_workload(name: network_name, description: 'test_network1')
mut public_ip_name := ''
if public_ip{
public_ip_name = rand.string(5).to_lower()
workloads << models.PublicIP{
v4: true
}.to_workload(name: public_ip_name)
}
zmachine := models.Zmachine{
flist: 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist' // from user or default to ubuntu
network: models.ZmachineNetwork{
interfaces: [
models.ZNetworkInterface{
network: network_name
ip: '10.1.1.3'
},
]
public_ip: public_ip_name
planetary: true
mycelium: models.MyceliumIP{
network: network_name
hex_seed: rand.string(6).bytes().hex()
}
}
entrypoint: '/sbin/zinit init' // from user or default
compute_capacity: models.ComputeCapacity{
cpu: u8(cpu)
memory: i64(memory) * 1024 * 1024 * 1024
}
size: u64(disk) * 1024 * 1024 * 1024
env: {
'SSH_KEY': ssh_key
'CODE_SERVER_PASSWORD': code_server_pass
}
}
workloads << zmachine.to_workload(
name: 'vm_${rand.string(5).to_lower()}'
description: 'zmachine_test'
)
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
description: 'holochain deployment'
workloads: workloads
signature_requirement: signature_requirement
)
deployment.add_metadata('vm', 'SimpleVM')
contract_id := deployer.deploy(node_id, mut deployment, deployment.metadata, 0) or {
logger.error('failed to deploy deployment: ${err}')
exit(1)
}
logger.info('deployment contract id: ${contract_id}')
dl := deployer.get_deployment(contract_id, node_id) or {
logger.error('failed to get deployment data: ${err}')
exit(1)
}
// logger.info('deployment:\n${dl}')
machine_res := get_machine_result(dl)!
logger.info('zmachine result: ${machine_res}')
}
fn get_machine_result(dl models.Deployment) !models.ZmachineResult {
for _, w in dl.workloads {
if w.type_ == models.workload_types.zmachine {
res := json.decode(models.ZmachineResult, w.result.data)!
return res
}
}
return error('failed to get zmachine workload')
}
fn get_chain_network(network string) !tfgrid.ChainNetwork {
chain_net_enum := match network {
'dev' { tfgrid.ChainNetwork.dev }
'qa' { tfgrid.ChainNetwork.qa }
'test' { tfgrid.ChainNetwork.test }
'main' { tfgrid.ChainNetwork.main }
else { return error('invalid chain newtork ${network}. must be one of (dev, qa, test, main)') }
}
return chain_net_enum
}
fn get_node_id(network tfgrid.ChainNetwork, memory int, disk int, cpu int, public_ip bool) !u32{
gp_net := match network {
.dev { gridproxy.TFGridNet.dev }
.qa { gridproxy.TFGridNet.qa }
.test { gridproxy.TFGridNet.test }
.main { gridproxy.TFGridNet.main }
}
mut gridproxy_client := gridproxy.get(gp_net, false)!
mut free_ips := u64(0)
if public_ip{
free_ips = 1
}
mut node_it := gridproxy_client.get_nodes_has_resources(
free_mru_gb: u64(memory)
free_sru_gb: u64(disk)
free_cpu: u64(cpu)
free_ips: free_ips
)
nodes := node_it.next()
mut node_id := u32(0) // get from user or use gridproxy to get nodeid
if nodes_list := nodes {
node_id = u32(nodes_list[0].node_id)
} else {
return error('cannot find a suitable node matching your specs')
}
return node_id
}
/*
gridproxy call to assign node - done
generate private key for wireguard
add option to add public ip
*/

View File

@@ -0,0 +1,218 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid.models
import freeflowuniverse.herolib.threefold.grid as tfgrid
import freeflowuniverse.herolib.threefold.gridproxy
import freeflowuniverse.herolib.threefold.gridproxy.model {NodeFilter}
import rand
import log
import os
import flag
import json
fn get_machine_result(dl models.Deployment) !models.ZmachineResult {
for _, w in dl.workloads {
if w.type_ == models.workload_types.zmachine {
res := json.decode(models.ZmachineResult, w.result.data)!
return res
}
}
return error('failed to get zmachine workload')
}
fn get_gateway_name_result(dl models.Deployment) !models.GatewayProxyResult {
for _, w in dl.workloads {
if w.type_ == models.workload_types.gateway_name {
res := json.decode(models.GatewayProxyResult, w.result.data)!
return res
}
}
return error('failed to get gateway_name workload')
}
fn get_chain_network(network string) !tfgrid.ChainNetwork {
chain_net_enum := match network {
'dev' { tfgrid.ChainNetwork.dev }
'qa' { tfgrid.ChainNetwork.qa }
'test' { tfgrid.ChainNetwork.test }
'main' { tfgrid.ChainNetwork.main }
else { return error('invalid chain newtork ${network}. must be one of (dev, qa, test, main)') }
}
return chain_net_enum
}
fn get_node_id(network tfgrid.ChainNetwork, memory int, disk int, cpu int, public_ip bool, has_domain bool, available_for u64) !u32{
gp_net := match network {
.dev { gridproxy.TFGridNet.dev }
.qa { gridproxy.TFGridNet.qa }
.test { gridproxy.TFGridNet.test }
.main { gridproxy.TFGridNet.main }
}
mut gridproxy_client := gridproxy.get(gp_net, false)!
mut free_ips := u64(0)
if public_ip{
free_ips = 1
}
mut filter_ := NodeFilter{
free_ips: free_ips
free_mru: u64(memory) * (1204 * 1204 * 1204)
free_sru: u64(disk) * (1204 * 1204 * 1204)
total_cru: u64(cpu)
domain: has_domain
available_for: available_for
status: 'up'
randomize: true
size: u64(1)
}
nodes := gridproxy_client.get_nodes(filter_)!
if nodes.len != 1{
return error('cannot find a suitable node matching your specs')
}
return u32(nodes[0].node_id)
}
mut fp := flag.new_flag_parser(os.args)
fp.application('VM with gateway deployer tool')
fp.version('v0.0.1')
fp.skip_executable()
mnemonics := fp.string_opt('mnemonic', `m`, 'Your Mnemonic phrase')!
chain_network := fp.string('network', `n`, 'main', 'Your desired chain network (main, test, qa, dev). Defaults to main')
ssh_key := fp.string_opt('ssh_key', `s`, 'Your public ssh key')!
cpu := fp.int('cpu', `c`, 4, 'Machine CPU provisioning. Defaults to 4')
memory := fp.int('ram', `r`, 4, 'Machine memory provisioning in GB. Defaults to 4')
disk := fp.int('disk', `d`, 5, 'Machine Disk space provisioning in GB. Defaults to 5')
public_ip := fp.bool('public_ip', `i`, false, 'True to allow public ip v4')
mut logger := &log.Log{}
logger.set_level(.debug)
chain_net_enum := get_chain_network(chain_network)!
mut deployer := tfgrid.new_deployer(mnemonics, chain_net_enum, mut logger)!
mut workloads := []models.Workload{}
node_id := get_node_id(chain_net_enum, memory, disk, cpu, public_ip, false, deployer.twin_id)!
// node_id := u32(150)
logger.info('deploying on node: ${node_id}')
network_name := 'net_${rand.string(5).to_lower()}' // autocreate a network
wg_port := deployer.assign_wg_port(node_id)!
mut network := models.Znet{
ip_range: '10.1.0.0/16' // auto-assign
subnet: '10.1.1.0/24' // auto-assign
wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE=' // autocreate
wireguard_listen_port: wg_port
// mycelium: models.Mycelium{
// hex_key: rand.string(32).bytes().hex()
// }
}
workloads << network.to_workload(name: network_name, description: 'test_network1')
mut public_ip_name := ''
if public_ip{
public_ip_name = rand.string(5).to_lower()
workloads << models.PublicIP{
v4: true
}.to_workload(name: public_ip_name)
}
zmachine := models.Zmachine{
flist: 'https://hub.grid.tf/tf-official-apps/base:latest.flist'
network: models.ZmachineNetwork{
interfaces: [
models.ZNetworkInterface{
network: network_name
ip: '10.1.1.3'
},
]
public_ip: public_ip_name
planetary: true
// mycelium: models.MyceliumIP{
// network: network_name
// hex_seed: rand.string(6).bytes().hex()
// }
}
entrypoint: '/sbin/zinit init' // from user or default
compute_capacity: models.ComputeCapacity{
cpu: u8(cpu)
memory: i64(memory) * 1024 * 1024 * 1024
}
size: u64(disk) * 1024 * 1024 * 1024
env: {
'SSH_KEY': ssh_key
}
}
workloads << zmachine.to_workload(
name: 'vm_${rand.string(5).to_lower()}'
description: 'zmachine_test'
)
signature_requirement := models.SignatureRequirement{
weight_required: 1
requests: [
models.SignatureRequest{
twin_id: deployer.twin_id
weight: 1
},
]
}
mut deployment := models.new_deployment(
twin_id: deployer.twin_id
description: 'vm with gateway'
workloads: workloads
signature_requirement: signature_requirement
)
deployment.add_metadata('vm', 'SimpleVM')
contract_id := deployer.deploy(node_id, mut deployment, deployment.metadata, 0) or {
logger.error('failed to deploy deployment: ${err}')
exit(1)
}
logger.info('deployment contract id: ${contract_id}')
dl := deployer.get_deployment(contract_id, node_id) or {
logger.error('failed to get deployment data: ${err}')
exit(1)
}
machine_res := get_machine_result(dl)!
logger.info('zmachine result: ${machine_res}')
gw_name := rand.string(5).to_lower()
gw := models.GatewayNameProxy{
tls_passthrough: false
backends: ['http://[${machine_res.planetary_ip}]:9000']
name: gw_name
}
gw_workload := gw.to_workload(name: gw_name)
name_contract_id := deployer.client.create_name_contract(gw_name)!
logger.info('name contract ${gw_workload.name} created with id ${name_contract_id}')
mut gw_deployment := models.new_deployment(
twin_id: deployer.twin_id
workloads: [gw_workload]
signature_requirement: signature_requirement
)
gw_node_id := get_node_id(chain_net_enum, 0, 0, 0, false, true, deployer.twin_id)!
gw_node_contract_id := deployer.deploy(gw_node_id, mut gw_deployment, '', 0)!
logger.info('gateway node contract created with id ${gw_node_contract_id}')
gateway_dl := deployer.get_deployment(gw_node_contract_id, gw_node_id) or {
logger.error('failed to get deployment data: ${err}')
exit(1)
}
gw_res := get_gateway_name_result(gateway_dl)!
logger.info('gateway: ${gw_res}')

View File

@@ -0,0 +1,39 @@
struct DeploymentStateDB{
secret ... //to encrypt symmetric
//...
}
struct DeploymentState{
name ...
vms []VMDeployed
zdbs []ZDBDeployed
...
}
pub fn (db DeploymentStateDB) set(deployment_name string, key string, val string)! {
//store e.g. \n separated list of all keys per deployment_name
//encrypt
}
pub fn (db DeploymentStateDB) get(deployment_name string, key string)!string {
}
pub fn (db DeploymentStateDB) delete(deployment_name string, key string)! {
}
pub fn (db DeploymentStateDB) keys(deployment_name string)![]string {
}
pub fn (db DeploymentStateDB) load(deployment_name string)!DeploymentState {
}

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
import log
fn test_cancel_contract(contract_id u64) ! {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics()!
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
deployer.client.cancel_contract(contract_id)!
deployer.logger.info('contract ${contract_id} is canceled')
}
fn main() {
test_cancel_contract(u64(119497)) or { println('error happened: ${err}') }
}

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
import log
fn test_cancel_contracts(contracts_ids []u64) ! {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics()!
mut deployer := tfgrid.new_deployer(mnemonics, .dev, mut logger)!
for cont_id in contracts_ids {
deployer.client.cancel_contract(cont_id)!
deployer.logger.info('contract ${cont_id} is canceled')
}
}
fn main() {
test_cancel_contracts([u64(119493), u64(119492)]) or { println('error happened: ${err}') }
}

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
mut cl := tfgrid.get("my_config")!
mut cfg := cl.config()!
println(cl.instance)
cfg = cl.config()!
println(cfg)
if cfg.mnemonics == "" {
// will ask questions if not filled in yet
cl.config_interactive()!
}
println(cl.instance)
cfg = cl.config()!
println(cfg)
// cl.instance = 'new_name'
cfg.mnemonics = ''
cfg.network = 'qa'
cl.config_save()!
println(cl.instance)
cfg = cl.config()!
println(cfg)
cl = tfgrid.get("empty_config")!
println(cl.instance)
cfg = cl.config()!
println(cfg)
// TO CONFIGURE NEW
// cl.config_delete()!

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.threefold.grid as tfgrid
import freeflowuniverse.herolib.threefold.griddriver { Client }
import freeflowuniverse.herolib.ui.console
import log
fn test_get_zos_version(node_id u32) ! {
mut logger := &log.Log{}
logger.set_level(.debug)
mnemonics := tfgrid.get_mnemonics()!
chain_network := tfgrid.ChainNetwork.dev // User your desired network
mut deployer := tfgrid.new_deployer(mnemonics, chain_network, mut logger)!
node_twin_id := deployer.client.get_node_twin(node_id)!
zos_version := deployer.client.get_zos_version(node_twin_id)!
deployer.logger.info('Zos version is: ${zos_version}')
}
fn main() {
test_get_zos_version(u32(14)) or { println('error happened: ${err}') }
}

View File

@@ -0,0 +1,40 @@
struct VMSpecs{
deployment_name string
name string
nodeid string
pub_sshkeys []string
flist string //if any, if used then ostype not used
ostype OSType
}
enum OSType{
ubuntu_22_04
ubuntu_24_04
arch
alpine
}
struct VMDeployed{
name string
nodeid string
//size ..
guid string
yggdrasil_ip string
mycelium_ip string
}
pub fn (vm VMDeployed) builder_node() builder.Node {
}
//only connect to yggdrasil and mycelium
//
fn vm_deploy(args_ VMSpecs) VMDeployed{
deploymentstate_db.set(args.deployment_name,"vm_${args.name}",VMDeployed.json)
}

View File

@@ -0,0 +1,38 @@
struct NodeQuery{
location string //how to define location
capacity_available_hdd_gb int
capacity_available_ssd_gb int
capacity_available_mem_gb int
capacity_available_vcpu int //vcpu core's
capacity_free_hdd_gb int
capacity_free_ssd_gb int
capacity_free_mem_gb int
capacity_free_vcpu int //vcpu core's
uptime_min int = 70 //0..99
bw_min_mb_sec int = 0 //bandwith in mbit per second, min
}
struct NodeInfo{
location string //how to define location
capacity_available_hdd_gb int
capacity_available_ssd_gb int
capacity_available_mem_gb int
capacity_available_vcpu int //vcpu core's
capacity_free_hdd_gb int
capacity_free_ssd_gb int
capacity_free_mem_gb int
capacity_free_vcpu int //vcpu core's
uptime_min int = 70 //0..99
bw_min_mb_sec int = 0 //bandwith in mbit per second, min
guid str
...
}
fn node_find(args_ NodeQuery) []NodeInfo{
}

View File

@@ -0,0 +1,18 @@
struct WebGWArgs{
deployment_name string
//...
}
//connect domain name, or exising to it
fn webgateway_rule_deploy(args_ WebGWArgs) []VMDeployed{
}

View File

@@ -0,0 +1,30 @@
struct ZDBSpecs{
deployment_name string
nodeid string
namespace string
secret string
}
struct ZDBDeployed{
nodeid string
namespace string
secret string
}
//test zdb is answering
pub fn (vm ZDBDeployed) ping() bool {
}
pub fn (vm ZDBDeployed) redisclient() redisclient... {
}
//only connect to yggdrasil and mycelium
//
fn zdb_deploy(args_ ZDBSpecs) ZDBDeployed{
}

Some files were not shown because too many files have changed in this diff Show More