Merge branch 'development' of https://github.com/freeflowuniverse/herolib into development

This commit is contained in:
2025-01-23 09:42:45 +01:00
63 changed files with 1977 additions and 1148 deletions

View File

@@ -1,4 +1,4 @@
name: Build Hero & Run tests
name: Build Hero on Linux & Run tests
permissions:
contents: write
@@ -22,7 +22,7 @@ jobs:
# os: macos-latest
# short-name: macos-arm64
# - target: x86_64-apple-darwin
# os: macos-latest
# os: macos-13
# short-name: macos-i64
runs-on: ${{ matrix.os }}
steps:
@@ -47,30 +47,24 @@ jobs:
ln -s $GITHUB_WORKSPACE/lib ~/.vmodules/freeflowuniverse/herolib
echo "Installing secp256k1..."
if [ "${{ matrix.os }}" = "macos-latest" ]; then
brew install secp256k1
elif [ "${{ matrix.os }}" = "ubuntu-latest" ]; then
# Install build dependencies
sudo apt-get install -y build-essential wget autoconf libtool
# Install build dependencies
sudo apt-get install -y build-essential wget autoconf libtool
# Download and extract secp256k1
cd /tmp
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
tar -xvf v0.3.2.tar.gz
# Download and extract secp256k1
cd /tmp
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
tar -xvf v0.3.2.tar.gz
# Build and install
cd secp256k1-0.3.2/
./autogen.sh
./configure
make -j 5
sudo make install
# Build and install
cd secp256k1-0.3.2/
./autogen.sh
./configure
make -j 5
sudo make install
# Cleanup
rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
# Cleanup
rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
else
echo "secp256k1 installation not implemented for ${OSNAME}"
exit 1
fi
echo "secp256k1 installation complete!"
- name: Install and Start Redis
@@ -82,32 +76,18 @@ jobs:
# Install Redis
sudo apt-get update
sudo apt-get install -y redis
# Start Redis
redis-server --daemonize yes
# Print versions
redis-cli --version
redis-server --version
# Start Redis
sudo systemctl start redis-server
redis-cli ping
- name: Build Hero
run: |
if [ "${{ matrix.os }}" = "ubuntu-latest" ]; then
v -cg -enable-globals -w -n cli/hero.v
# else if [ "${{ matrix.os }}" = "macos-latest" ]; then
# v -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals cli/hero.v
fi
v -cg -enable-globals -w -n cli/hero.v
- name: Do all the basic tests
run: |
alias vtest='v -stats -enable-globals -n -w -cg -gc none -no-retry-compilation -cc tcc test'
./test_basic.vsh
# - name: Upload to S3
# run: |
# echo 'export S3KEYID=${{ secrets.S3KEYID }}' > ${HOME}/mysecrets.sh
# echo 'export S3APPID=${{ secrets.S3APPID }}' >> ${HOME}/mysecrets.sh
# set -e && cat ${HOME}/mysecrets.sh
# sudo bash +x scripts/githubactions.sh
- name: Extract tag name
run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV

66
.github/workflows/hero_build_macos.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Build Hero on Macos & Run tests
permissions:
contents: write
on:
push:
workflow_dispatch:
jobs:
build:
strategy:
matrix:
include:
- target: aarch64-apple-darwin
os: macos-latest
short-name: macos-arm64
- target: x86_64-apple-darwin
os: macos-13
short-name: macos-i64
runs-on: ${{ matrix.os }}
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!"
- run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}."
- name: Check out repository code
uses: actions/checkout@v3
- name: Setup Vlang
run: |
git clone --depth=1 https://github.com/vlang/v
cd v
make
sudo ./v symlink
cd ..
- name: Setup Herolib
run: |
mkdir -p ~/.vmodules/freeflowuniverse
ln -s $GITHUB_WORKSPACE/lib ~/.vmodules/freeflowuniverse/herolib
echo "Installing secp256k1..."
brew install secp256k1
echo "secp256k1 installation complete!"
- name: Install and Start Redis
run: |
brew update
brew install redis
# Start Redis
redis-server --daemonize yes
# Print versions
redis-cli --version
redis-server --version
- name: Build Hero
run: |
v -w -cg -gc none -no-retry-compilation -d use_openssl -enable-globals cli/hero.v
- name: Do all the basic tests
run: |
./test_basic.vsh

132
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,132 @@
name: Release
on:
push:
tags:
- v*
jobs:
upload:
strategy:
matrix:
include:
- target: aarch64-apple-darwin
os: macos-latest
short-name: macos-arm64
- target: x86_64-apple-darwin
os: macos-13
short-name: macos-i64
- target: x86_64-unknown-linux-musl
os: ubuntu-latest
short-name: linux-i64
runs-on: ${{ matrix.os }}
permissions:
contents: write
steps:
- name: Check out repository code
uses: actions/checkout@v4
- name: Setup Vlang
run: |
git clone --depth=1 https://github.com/vlang/v
cd v
make
sudo ./v symlink
cd ..
- name: Setup Herolib
run: |
mkdir -p ~/.vmodules/freeflowuniverse
ln -s $GITHUB_WORKSPACE/lib ~/.vmodules/freeflowuniverse/herolib
echo "Installing secp256k1..."
if [[ ${{ matrix.os }} == 'macos-latest' || ${{ matrix.os }} == 'macos-13' ]]; then
brew install secp256k1
elif [[ ${{ matrix.os }} == 'ubuntu-latest' ]]; then
# Install build dependencies
sudo apt-get install -y build-essential wget autoconf libtool
# Download and extract secp256k1
cd /tmp
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
tar -xvf v0.3.2.tar.gz
# Build and install
cd secp256k1-0.3.2/
./autogen.sh
./configure
make -j 5
sudo make install
else
echo "Unsupported OS: ${{ matrix.os }}"
exit 1
fi
echo "secp256k1 installation complete!"
- name: Build Hero
run: |
v -w -cg -gc none -no-retry-compilation -d use_openssl -enable-globals cli/hero.v -o cli/hero-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v4
with:
name: hero-${{ matrix.target }}
path: cli/hero-${{ matrix.target }}
release_hero:
needs: upload
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Check out repository code
uses: actions/checkout@v4
# TODO: this adds commits that don't belong to this branhc, check another action
# - name: Generate changelog
# id: changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.GITHUB_TOKEN }}
# headerLabel: "# 📑 Changelog"
# breakingLabel: "### 💥 Breaking"
# enhancementLabel: "### 🚀 Enhancements"
# bugsLabel: "### 🐛 Bug fixes"
# securityLabel: "### 🛡️ Security"
# issuesLabel: "### 📁 Other issues"
# prLabel: "### 📁 Other pull requests"
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]}}'
# onlyLastTag: true
# issues: false
# issuesWoLabels: false
# pullRequests: true
# prWoLabels: true
# author: true
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: cli/bins
merge-multiple: true
- name: Release
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
name: Release ${{ github.ref_name }}
draft: false
fail_on_unmatched_files: true
# body: ${{ steps.changelog.outputs.changelog }}
files: cli/bins/*

View File

@@ -6,90 +6,41 @@ import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.installers.threefold.griddriver
fn main() {
mut installer := griddriver.get()!
installer.install()!
griddriver.install()!
// v := tfgrid3deployer.get()!
// println('cred: ${v}')
// deployment_name := "my_deployment27"
v := tfgrid3deployer.get()!
println('cred: ${v}')
deployment_name := 'my_deployment27'
// // mut deployment := tfgrid3deployer.new_deployment(deployment_name)!
// mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
// // deployment.add_machine(name: "my_vm1" cpu: 1 memory: 2 planetary: true mycelium: tfgrid3deployer.Mycelium{} nodes: [u32(11)])
// // deployment.add_machine(name: "my_vm3" cpu: 1 memory: 2 planetary: true mycelium: tfgrid3deployer.Mycelium{} nodes: [u32(11)])
// // deployment.add_machine(name: "my_vm3" cpu: 1 memory: 2 planetary: true mycelium: tfgrid3deployer.Mycelium{} nodes: [u32(28)])
// // deployment.add_zdb(name: "my_zdb", password: "my_passw&rd", size: 2)
// // deployment.add_webname(name: 'mywebname2', backend: 'http://37.27.132.47:8000')
// // deployment.deploy()!
mut deployment := tfgrid3deployer.new_deployment(deployment_name)!
// mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
deployment.add_machine(
name: 'my_vm1'
cpu: 1
memory: 2
planetary: false
public_ip4: true
mycelium: tfgrid3deployer.Mycelium{}
nodes: [u32(167)]
)
deployment.add_machine(
name: 'my_vm2'
cpu: 1
memory: 2
planetary: false
public_ip4: true
mycelium: tfgrid3deployer.Mycelium{}
// nodes: [u32(164)]
)
// // deployment.add_machine(name: "my_vm2" cpu: 2 memory: 3 planetary: true mycelium: true nodes: [u32(28)])
// // deployment.deploy()!
deployment.add_zdb(name: 'my_zdb', password: 'my_passw&rd', size: 2)
deployment.add_webname(name: 'mywebname2', backend: 'http://37.27.132.47:8000')
deployment.deploy()!
// deployment.remove_machine("my_vm1")!
// // deployment.remove_webname("mywebname2")!
// // deployment.remove_zdb("my_zdb")!
// // deployment.deploy()!
deployment.remove_machine('my_vm1')!
deployment.remove_webname('mywebname2')!
deployment.remove_zdb('my_zdb')!
deployment.deploy()!
// // deployment.vm_get("my_vm1")!
// // // deployment.remove_machine(name: "my_vm121")
// // // deployment.update_machine(name: "my_vm121")
// println("deployment: ${deployment}")
// If not sent: The client will create a network for the deployment.
// deployment.network = NetworkSpecs{
// name: 'hamadanetcafe'
// ip_range: '10.10.0.0/16'
// }
// deployment.add_machine(name: "my_vm121" cpu: 1 memory: 2 planetary: true mycelium: true nodes: [u32(11)])
// deployment.add_zdb(name: "my_zdb", password: "my_passw&rd", size: 2)
// deployment.add_webname(name: 'mywebname2', backend: 'http://37.27.132.47:8000')
// deployment.add_machine(name: "my_vm1" cpu: 1 memory: 2 planetary: true mycelium: true nodes: [u32(28)])
// deployment.deploy()!
// vm1 := deployment.vm_get("my_vm1")!
// reachable := vm1.healthcheck()!
// println("vm reachable: ${reachable}")
// if !reachable {
// deployment.vm_delete()!
// deployment.vm_deploy()!
// }
// if !rach {
// vm1.delete()!
// vm1.deploy()!
// }
/*
TODO: Agreed on
# Deploying a new deployemnt
mut deployment := tfgrid3deployer.new_deployment(deployment_name)!
deployment.add_machine(name: "my_vm121" cpu: 1 memory: 2 planetary: true mycelium: true nodes: [u32(11)])
deployment.add_zdb(name: "my_zdb", password: "my_passw&rd", size: 2)
deployment.deploy()!
# if the user wants to load the deployment and do some actions on it:
mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
deployment.add_webname(name: 'mywebname2', backend: 'http://37.27.132.47:8000')
deployment.add_machine(name: "my_vm1" cpu: 1 memory: 2 planetary: true mycelium: true nodes: [u32(28)])
deployment.deploy()!
# The user wants to delete the recently deployed machine
mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
deployment.remove_machine(name: "my_vm1")
deployment.deploy()!
# The user wants to update the first deployed machine
mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
deployment.remove_machine(name: "my_vm1")
deployment.add_machine(name: "my_vm1" cpu: 1 memory: 2 planetary: true mycelium: true nodes: [u32(28)])
deployment.deploy()!
## PS: The same goes with ZDBs and Webnames
# How deploy works:
1. Let's assume the user wants to add one more workload
*/
tfgrid3deployer.delete_deployment(deployment_name)!
}

View File

@@ -12,13 +12,4 @@ v := tfgrid3deployer.get()!
println('cred: ${v}')
deployment_name := 'vm_caddy1'
mut deployment := tfgrid3deployer.get_deployment(deployment_name)!
deployment.remove_machine('vm_caddy1')!
deployment.deploy()!
os.rm('${os.home_dir()}/hero/db/0/session_deployer/${deployment_name}')!
deployment_name2 := 'vm_caddy_gw'
mut deployment2 := tfgrid3deployer.get_deployment(deployment_name2)!
deployment2.remove_webname('gwnamecaddy')!
deployment2.deploy()!
os.rm('${os.home_dir()}/hero/db/0/session_deployer/${deployment_name2}')!
tfgrid3deployer.delete_deployment(deployment_name)!

View File

@@ -29,12 +29,9 @@ println('vm1 info: ${vm1}')
vm1_public_ip4 := vm1.public_ip4.all_before('/')
deployment_name2 := 'vm_caddy_gw'
mut deployment2 := tfgrid3deployer.new_deployment(deployment_name2)!
deployment2.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80')
deployment2.deploy()!
gw1 := deployment2.webname_get('gwnamecaddy')!
deployment.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80')
deployment.deploy()!
gw1 := deployment.webname_get('gwnamecaddy')!
println('gw info: ${gw1}')
// Retry logic to wait for the SSH server to be up

View File

@@ -0,0 +1,7 @@
!!hero_code.generate_client
name:'livekit'
classname:'LivekitClient'
singleton:0
default:1
reset:0

View File

@@ -0,0 +1,9 @@
module livekit
// App struct with `livekit.Client`, API keys, and other shared data
pub struct Client {
pub:
url string @[required]
api_key string @[required]
api_secret string @[required]
}

View File

@@ -0,0 +1,6 @@
module livekit
pub fn new(client Client) Client {
return Client{...client}
}

View File

@@ -0,0 +1,25 @@
# livekit
To get started
```vlang
import freeflowuniverse.herolib.clients.livekit
mut client:= livekit.get()!
client...
```
## example heroscript
```hero
!!livekit.configure
livekit_url:''
livekit_api_key:''
livekit_api_secret:''
```

View File

@@ -0,0 +1,51 @@
module livekit
import net.http
import json
@[params]
pub struct ListRoomsParams {
names []string
}
pub struct ListRoomsResponse {
pub:
rooms []Room
}
pub fn (c Client) list_rooms(params ListRoomsParams) !ListRoomsResponse {
// Prepare request body
request := params
request_json := json.encode(request)
// create token and give grant to list rooms
mut token := c.new_access_token()!
token.grants.video.room_list = true
// make POST request
url := '${c.url}/twirp/livekit.RoomService/ListRooms'
// Configure HTTP request
mut headers := http.new_header_from_map({
http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}',
http.CommonHeader.content_type: 'application/json'
})
response := http.fetch(http.FetchConfig{
url: url
method: .post
header: headers
data: request_json
})!
if response.status_code != 200 {
return error('Failed to list rooms: $response.status_code')
}
// Parse response
rooms_response := json.decode(ListRoomsResponse, response.body) or {
return error('Failed to parse response: $err')
}
return rooms_response
}

View File

@@ -0,0 +1,33 @@
module livekit
import net.http
import json
pub struct Codec {
pub:
fmtp_line string
mime string
}
pub struct Version {
pub:
ticks u64
unix_micro string
}
pub struct Room {
pub:
active_recording bool
creation_time string
departure_timeout int
empty_timeout int
enabled_codecs []Codec
max_participants int
metadata string
name string
num_participants int
num_publishers int
sid string
turn_password string
version Version
}

View File

@@ -0,0 +1,21 @@
module livekit
import os
import freeflowuniverse.herolib.osal
fn testsuite_begin() ! {
osal.load_env_file('${os.dir(@FILE)}/.env')!
}
fn new_test_client() Client {
return new(
url: os.getenv('LIVEKIT_URL')
api_key: os.getenv('LIVEKIT_API_KEY')
api_secret: os.getenv('LIVEKIT_API_SECRET')
)
}
fn test_client_list_rooms() ! {
client := new_test_client()
rooms := client.list_rooms()!
}

View File

@@ -0,0 +1,34 @@
module livekit
import time
import rand
import crypto.hmac
import crypto.sha256
import encoding.base64
import json
// Define AccessTokenOptions struct
@[params]
pub struct AccessTokenOptions {
pub mut:
ttl int = 21600// TTL in seconds
name string // Display name for the participant
identity string // Identity of the user
metadata string // Custom metadata to be passed to participants
}
// Constructor for AccessToken
pub fn (client Client) new_access_token(options AccessTokenOptions) !AccessToken {
return AccessToken{
api_key: client.api_key
api_secret: client.api_secret
identity: options.identity
ttl: options.ttl
grants: ClaimGrants{
exp: time.now().unix()+ options.ttl
iss: client.api_key
sub: options.name
name: options.name
}
}
}

View File

@@ -0,0 +1,76 @@
module livekit
import time
import rand
import crypto.hmac
import crypto.sha256
import encoding.base64
import json
// Struct representing grants
pub struct ClaimGrants {
pub mut:
video VideoGrant
iss string
exp i64
nbf int
sub string
name string
}
// VideoGrant struct placeholder
pub struct VideoGrant {
pub mut:
room string
room_join bool @[json: 'roomJoin']
room_list bool @[json: 'roomList']
can_publish bool @[json: 'canPublish']
can_publish_data bool @[json: 'canPublishData']
can_subscribe bool @[json: 'canSubscribe']
}
// SIPGrant struct placeholder
struct SIPGrant {}
// AccessToken class
pub struct AccessToken {
mut:
api_key string
api_secret string
grants ClaimGrants
identity string
ttl int
}
// Method to add a video grant to the token
pub fn (mut token AccessToken) add_video_grant(grant VideoGrant) {
token.grants.video = grant
}
// Method to generate a JWT token
pub fn (token AccessToken) to_jwt() !string {
// Create JWT payload
payload := json.encode(token.grants)
println('payload: ${payload}')
// Create JWT header
header := '{"alg":"HS256","typ":"JWT"}'
// Encode header and payload in base64
header_encoded := base64.url_encode_str(header)
payload_encoded := base64.url_encode_str(payload)
// Create the unsigned token
unsigned_token := '${header_encoded}.${payload_encoded}'
// Create the HMAC-SHA256 signature
signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
// Encode the signature in base64
signature_encoded := base64.url_encode(signature)
// Create the final JWT
jwt := '${unsigned_token}.${signature_encoded}'
return jwt
}

View File

@@ -1,6 +1,6 @@
module mailclient
// import freeflowuniverse.herolib.core.base
import freeflowuniverse.herolib.core.base
// import freeflowuniverse.herolib.core.playbook
// __global (
@@ -45,11 +45,11 @@ module mailclient
// mailclient_default = name
// }
// fn config_exists(args_ ArgsGet) bool {
// mut args := args_get(args_)
// mut context := base.context() or { panic('bug') }
// return context.hero_config_exists('mailclient', args.name)
// }
fn config_exists(args_ ArgsGet) bool {
mut args := args_get(args_)
mut context := base.context() or { panic('bug') }
return context.hero_config_exists('mailclient', args.name)
}
// fn config_load(args_ ArgsGet) ! {
// mut args := args_get(args_)

View File

@@ -36,7 +36,7 @@ pub fn get(args_ ArgsGet) !&MailClient {
if !config_exists(args) {
if default {
mut context := base.context() or { panic('bug') }
context.hero_config_set('mailclient', model.name, heroscript_default()!)!
context.hero_config_set('mailclient', args.name, heroscript_default())!
}
}
load(args)!
@@ -44,7 +44,7 @@ pub fn get(args_ ArgsGet) !&MailClient {
}
return mailclient_global[args.name] or {
println(mailclient_global)
panic('could not get config for ${args.name} with name:${model.name}')
panic('could not get config for ${args.name} with name:${args.name}')
}
}
@@ -70,12 +70,12 @@ pub fn load(args_ ArgsGet) ! {
play(heroscript: heroscript)!
}
// save the config to the filesystem in the context
pub fn save(o MailClient) ! {
mut context := base.context()!
heroscript := encoderhero.encode[MailClient](o)!
context.hero_config_set('mailclient', model.name, heroscript)!
}
// // save the config to the filesystem in the context
// pub fn save(o MailClient) ! {
// mut context := base.context()!
// heroscript := encoderhero.encode[MailClient](o)!
// context.hero_config_set('mailclient', model.name, heroscript)!
// }
@[params]
pub struct PlayArgs {
@@ -89,7 +89,7 @@ pub fn play(args_ PlayArgs) ! {
mut model := args_
if model.heroscript == '' {
model.heroscript = heroscript_default()!
model.heroscript = heroscript_default()
}
mut plbook := model.plbook or { playbook.new(text: model.heroscript)! }
@@ -97,10 +97,7 @@ pub fn play(args_ PlayArgs) ! {
if configure_actions.len > 0 {
for config_action in configure_actions {
mut p := config_action.params
mycfg := cfg_play(p)!
console.print_debug('install action mailclient.configure\n${mycfg}')
set(mycfg)!
save(mycfg)!
cfg_play(p)!
}
}
}

View File

@@ -73,7 +73,7 @@ to call in code
import freeflowuniverse.herolib.core.generator.generic
generic.scan(path:"~/code/github/freeflowuniverse/crystallib/crystallib/installers",force:true)!
generic.scan(path:"~/code/github/freeflowuniverse/herolib/herolib/installers",force:true)!
```
@@ -81,6 +81,6 @@ generic.scan(path:"~/code/github/freeflowuniverse/crystallib/crystallib/installe
to run from bash
```bash
~/code/github/freeflowuniverse/crystallib/scripts/fix_installers.vsh
~/code/github/freeflowuniverse/herolib/scripts/fix_installers.vsh
```

View File

@@ -116,7 +116,6 @@ pub fn play(args_ PlayArgs) ! {
for install_action in install_actions {
mut p := install_action.params
cfg_play(p)!
console.print_debug("install action ${args.name}.configure\n??{mycfg}")
}
}
@end

View File

@@ -103,17 +103,17 @@ fn cmd_bootstrap_execute(cmd Command) ! {
}
if compileupload {
// mycmd:='
// \${HOME}/code/github/freeflowuniverse/crystallib/scripts/package.vsh
// \${HOME}/code/github/freeflowuniverse/herolib/scripts/package.vsh
// '
// osal.exec(cmd: mycmd)!
println('please execute:\n~/code/github/freeflowuniverse/crystallib/scripts/githubactions.sh')
println('please execute:\n~/code/github/freeflowuniverse/herolib/scripts/githubactions.sh')
}
if update {
// mycmd:='
// \${HOME}/code/github/freeflowuniverse/crystallib/scripts/package.vsh
// \${HOME}/code/github/freeflowuniverse/herolib/scripts/package.vsh
// '
// osal.exec(cmd: mycmd)!
println('please execute:\n~/code/github/freeflowuniverse/crystallib/scripts/install_hero.sh')
println('please execute:\n~/code/github/freeflowuniverse/herolib/scripts/install_hero.sh')
}
}

View File

@@ -13,8 +13,8 @@ pub fn cmd_init(mut cmdroot Command) {
Initialization Helpers for Hero
-r will reset everything e.g. done states (when installing something)
-d will put the platform in development mode, get V, crystallib, hero...
-c will compile hero on local platform (requires local crystallib)
-d will put the platform in development mode, get V, herolib, hero...
-c will compile hero on local platform (requires local herolib)
'
description: 'initialize hero environment (reset, development mode, )'
@@ -58,7 +58,7 @@ Initialization Helpers for Hero
required: false
name: 'gitpull'
abbrev: 'gp'
description: 'will try to pull git repos for crystallib.'
description: 'will try to pull git repos for herolib.'
})
cmd_run.add_flag(Flag{

View File

@@ -40,7 +40,7 @@ pub fn cmd_installers(mut cmdroot Command) {
required: false
name: 'gitpull'
abbrev: 'gp'
description: 'e.g. in crystallib or other git repo pull changes.'
description: 'e.g. in herolib or other git repo pull changes.'
})
cmd_run.add_flag(Flag{
@@ -48,7 +48,7 @@ pub fn cmd_installers(mut cmdroot Command) {
required: false
name: 'gitreset'
abbrev: 'gr'
description: 'e.g. in crystallib or other git repo pull & reset changes.'
description: 'e.g. in herolib or other git repo pull & reset changes.'
})
cmdroot.add_command(cmd_run)
}

25
lib/core/log/backend_db.v Normal file
View File

@@ -0,0 +1,25 @@
module log
import db.sqlite
pub struct DBBackend {
pub:
db sqlite.DB
}
@[params]
pub struct DBBackendConfig {
pub:
db sqlite.DB
}
// factory for
pub fn new_backend(config DBBackendConfig) !DBBackend {
sql config.db {
create table Log
} or { panic(err) }
return DBBackend{
db: config.db
}
}

10
lib/core/log/events.v Normal file
View File

@@ -0,0 +1,10 @@
module log
import time
@[params]
pub struct ViewEvent {
pub mut:
page string
duration time.Duration
}

18
lib/core/log/factory.v Normal file
View File

@@ -0,0 +1,18 @@
module log
import db.sqlite
pub struct Logger {
db_path string
// DBBackend
}
pub fn new(db_path string) !Logger {
db := sqlite.connect(db_path)!
sql db {
create table Log
} or { panic(err) }
return Logger{
db_path: db_path
}
}

55
lib/core/log/logger.v Normal file
View File

@@ -0,0 +1,55 @@
module log
import db.sqlite
pub fn (logger Logger) new_log(log Log) ! {
db := sqlite.connect(logger.db_path)!
sql db {
insert log into Log
}!
}
pub struct LogFilter {
Log
matches_all bool
limit int
}
pub fn (logger Logger) filter_logs(filter LogFilter) ![]Log {
db := sqlite.connect(logger.db_path)!
mut select_stmt := 'select * from Log'
mut matchers := []string{}
if filter.event != '' {
matchers << "event == '${filter.event}'"
}
if filter.subject != '' {
matchers << "subject == '${filter.subject}'"
}
if filter.object != '' {
matchers << "object == '${filter.object}'"
}
if matchers.len > 0 {
matchers_str := if filter.matches_all {
matchers.join(' AND ')
} else {
matchers.join(' OR ')
}
select_stmt += ' where ${matchers_str}'
}
responses := db.exec(select_stmt)!
mut logs := []Log{}
for response in responses {
logs << sql db {
select from Log where id == response.vals[0].int()
}!
}
return logs
}

32
lib/core/log/model.v Normal file
View File

@@ -0,0 +1,32 @@
module log
import time
pub struct Log {
id int @[primary; sql: serial]
pub:
timestamp time.Time
pub mut:
event string
subject string
object string
message string // a custom message that can be attached to a log
}
// pub struct Event {
// name string
// description string
// }
// // log_request logs http requests
// pub fn create_log(log Log) Log {
// return Log{
// ...log
// timestamp: time.now()
// })
// }
// // log_request logs http requests
// pub fn (mut a Analyzer) get_logs(subject string) []Log {
// return []Log{}
// }

View File

@@ -43,12 +43,15 @@ pub fn (action Action) heroscript() string {
if action.comments.len > 0 {
out += texttools.indent(action.comments, '// ')
}
if action.actiontype == .sal {
if action.actiontype == .dal {
out += '!'
} else if action.actiontype == .sal {
out += '!!'
} else if action.actiontype == .macro {
out += '!!!'
} else {
panic('only action sal and macro supported for now,\n${action}')
panic('only action sal and macro supported for now')
}
if action.actor != '' {

View File

@@ -146,7 +146,7 @@ pub fn (mut db DB) set(args_ SetArgs) !u32 {
args.id = db.parent.incr()!
pathsrc = db.path_get(args.id)!
}
console.print_debug('keydb ${pathsrc}')
if db.config.encrypted {
args.valueb = aes_symmetric.encrypt(args.valueb, db.secret()!)
pathsrc.write(base64.encode(args.valueb))!

View File

@@ -3,7 +3,7 @@
## page_not_found
path: /Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md
path: /Users/timurgordon/code/github/freeflowuniverse/herolib/herolib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md
msg: page col3:file5.md not found

View File

@@ -1 +1 @@
name:col1 src:'/Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir1'
name:col1 src:'/Users/timurgordon/code/github/freeflowuniverse/herolib/herolib/data/doctree/testdata/export_test/mytree/dir1'

View File

@@ -3,7 +3,7 @@
## page_not_found
path: /Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md
path: /Users/timurgordon/code/github/freeflowuniverse/herolib/herolib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md
msg: page col3:file5.md not found

View File

@@ -1 +1 @@
name:col2 src:'/Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir3'
name:col2 src:'/Users/timurgordon/code/github/freeflowuniverse/herolib/herolib/data/doctree/testdata/export_test/mytree/dir3'

View File

@@ -102,7 +102,7 @@ pub fn (mut gs GitStructure) gitlocation_from_url(url string) !GitLocation {
}
}
// Return a crystallib path object on the filesystem pointing to the locator
// Return a herolib path object on the filesystem pointing to the locator
pub fn (mut l GitLocation) patho() !pathlib.Path {
mut addrpath := pathlib.get_dir(path: '${l.provider}/${l.account}/${l.name}', create: false)!
if l.path.len > 0 {

View File

@@ -1,7 +1,7 @@
export PATH=${home_dir}/hero/bin:??PATH
export TERM=xterm
cd ${home_dir}/code/github/freeflowuniverse/crystallib/cli/hero
cd ${home_dir}/code/github/freeflowuniverse/herolib/cli/hero
PRF="${home_dir}/.profile"
[ -f "??PRF" ] && source "??PRF"

View File

@@ -85,7 +85,7 @@ print("==RESULT==")
print(json_string)
```
> see `crystallib/examples/lang/python/pythonexample.vsh`
> see `herolib/examples/lang/python/pythonexample.vsh`
## remark

View File

@@ -0,0 +1,9 @@
# Email authentication module
Module to verify user email by sending the user a link.The functions in the module can be implemented manually in a web server, but the recommended way is simply to use the API.
## API
## Examples
- see publisher/view/auth_controllers

View File

@@ -0,0 +1,245 @@
module authentication
import time
import net.smtp
import crypto.hmac
import crypto.sha256
import crypto.rand
import encoding.hex
import encoding.base64
import log
// Creates and updates, authenticates email authentication sessions
@[noinit]
pub struct Authenticator {
secret string
mut:
config SmtpConfig @[required]
backend IBackend // Backend for authenticator
}
// Is initialized when an auth link is sent
// Represents the state of the authentication session
pub struct AuthSession {
pub mut:
email string
timeout time.Time
auth_code string // hex representation of 64 bytes
attempts_left int = 3
authenticated bool
}
@[params]
pub struct AuthenticatorConfig {
secret string
smtp SmtpConfig
backend IBackend
}
pub fn new(config AuthenticatorConfig) !Authenticator {
// send email with link in body
// mut client := smtp.new_client(
// server: config.smtp.server
// from: config.smtp.from
// port: config.smtp.port
// username: config.smtp.username
// password: config.smtp.password
// )!
return Authenticator{
config: config.smtp
// client: smtp.new_client(
// server: config.smtp.server
// from: config.smtp.from
// port: config.smtp.port
// username: config.smtp.username
// password: config.smtp.password
// )!
backend: config.backend
secret: config.secret
}
}
@[params]
pub struct SendMailConfig {
email string
mail VerificationMail
link string
}
pub struct VerificationMail {
pub:
from string = 'email_authenticator@spiderlib.ff'
subject string = 'Verify your email'
body string = 'Please verify your email by clicking the link below'
}
pub struct SmtpConfig {
server string
from string
port int
username string
password string
}
pub fn (mut auth Authenticator) email_authentication(config SendMailConfig) ! {
auth.send_verification_mail(config)!
auth.await_authentication(email: config.email)!
}
// sends mail with verification link
pub fn (mut auth Authenticator) send_verification_mail(config SendMailConfig) ! {
// create auth session
auth_code := rand.bytes(64) or { panic(err) }
auth.backend.create_auth_session(
email: config.email
auth_code: auth_code.hex()
timeout: time.now().add_seconds(180)
)!
link := '<a href="${config.link}/${config.email}/${auth_code.hex()}">Click to authenticate</a>'
mail := smtp.Mail{
to: config.email
from: config.mail.from
subject: config.mail.subject
body_type: .html
body: '${config.mail.body}\n${link}'
}
mut client := smtp.new_client(
server: auth.config.server
from: auth.config.from
port: auth.config.port
username: auth.config.username
password: auth.config.password
)!
client.send(mail) or { return error('Error resolving email address') }
client.quit() or { return error('Could not close connection to server') }
}
// sends mail with login link
pub fn (mut auth Authenticator) send_login_link(config SendMailConfig) ! {
expiration := time.now().add(5 * time.minute)
data := '${config.email}.${expiration}' // data to be signed
signature := hmac.new(hex.decode(auth.secret) or { panic(err) }, data.bytes(), sha256.sum,
sha256.block_size)
encoded_signature := base64.url_encode(signature.bytestr().bytes())
link := '<a href="${config.link}/${config.email}/${expiration.unix()}/${encoded_signature}">Click to login</a>'
mail := smtp.Mail{
to: config.email
from: config.mail.from
subject: config.mail.subject
body_type: .html
body: '${config.mail.body}\n${link}'
}
mut client := smtp.new_client(
server: auth.config.server
from: auth.config.from
port: auth.config.port
username: auth.config.username
password: auth.config.password
)!
client.send(mail) or { panic('Error resolving email address') }
client.quit() or { panic('Could not close connection to server') }
}
pub struct LoginAttempt {
pub:
email string
expiration time.Time
signature string
}
// sends mail with login link
pub fn (mut auth Authenticator) authenticate_login_attempt(attempt LoginAttempt) ! {
if time.now() > attempt.expiration {
return error('link expired')
}
data := '${attempt.email}.${attempt.expiration}' // data to be signed
signature_mirror := hmac.new(hex.decode(auth.secret) or { panic(err) }, data.bytes(),
sha256.sum, sha256.block_size).bytestr().bytes()
decoded_signature := base64.url_decode(attempt.signature)
if !hmac.equal(decoded_signature, signature_mirror) {
return error('signature mismatch')
}
}
// result of an authentication attempt
// returns time and attempts remaining
pub struct AttemptResult {
pub:
authenticated bool
attempts_left int
time_left time.Time
}
enum AuthErrorReason {
cypher_mismatch
no_remaining_attempts
session_not_found
}
struct AuthError {
Error
reason AuthErrorReason
}
// authenticates if email/cypher combo correct within timeout and remaining attemts
// TODO: address based request limits recognition to prevent brute
// TODO: max allowed request per seccond to prevent dos
pub fn (mut auth Authenticator) authenticate(email string, cypher string) ! {
session := auth.backend.read_auth_session(email) or {
return AuthError{
reason: .session_not_found
}
}
if session.attempts_left <= 0 { // checks if remaining attempts
return AuthError{
reason: .no_remaining_attempts
}
}
// authenticates if cypher in link matches authcode
if cypher == session.auth_code {
auth.backend.set_session_authenticated(email) or { panic(err) }
} else {
updated_session := AuthSession{
...session
attempts_left: session.attempts_left - 1
}
auth.backend.update_auth_session(updated_session)!
return AuthError{
reason: .cypher_mismatch
}
}
}
pub struct AwaitAuthParams {
email string @[required]
timeout time.Duration = 3 * time.minute
}
// function to check if an email is authenticated
pub fn (mut auth Authenticator) await_authentication(params AwaitAuthParams) ! {
stopwatch := time.new_stopwatch()
for stopwatch.elapsed() < params.timeout {
if auth.is_authenticated(params.email)! {
return
}
time.sleep(2 * time.second)
}
return error('Authentication timeout.')
}
// function to check if an email is authenticated
pub fn (mut auth Authenticator) is_authenticated(email string) !bool {
session := auth.backend.read_auth_session(email) or { return error('Cant find session') }
return session.authenticated
}

View File

@@ -0,0 +1,14 @@
module authentication
import log
// Creates and updates, authenticates email authentication sessions
interface IBackend {
read_auth_session(string) ?AuthSession
mut:
logger &log.Logger
create_auth_session(AuthSession) !
update_auth_session(AuthSession) !
delete_auth_session(string) !
set_session_authenticated(string) !
}

View File

@@ -0,0 +1,93 @@
module authentication
import db.sqlite
import log
import time
// Creates and updates, authenticates email authentication sessions
@[noinit]
struct DatabaseBackend {
mut:
db sqlite.DB
}
@[params]
pub struct DatabaseBackendConfig {
db_path string = 'email_authenticator.sqlite'
}
// factory for
pub fn new_database_backend(config DatabaseBackendConfig) !DatabaseBackend {
db := sqlite.connect(config.db_path) or { panic(err) }
sql db {
create table AuthSession
} or { panic(err) }
return DatabaseBackend{
// logger: config.logger
db: db
}
}
pub fn (auth DatabaseBackend) create_auth_session(session_ AuthSession) ! {
mut session := session_
if session.timeout.unix() == 0 {
session.timeout = time.now().add_seconds(180)
}
sql auth.db {
insert session into AuthSession
} or { panic('err:${err}') }
}
pub fn (auth DatabaseBackend) read_auth_session(email string) ?AuthSession {
session := sql auth.db {
select from AuthSession where email == '${email}'
} or { panic('err:${err}') }
return session[0] or { return none }
}
pub fn (auth DatabaseBackend) update_auth_session(session AuthSession) ! {
sql auth.db {
update AuthSession set attempts_left = session.attempts_left where email == session.email
} or { panic('err:${err}') }
}
pub fn (auth DatabaseBackend) set_session_authenticated(email string) ! {
sql auth.db {
update AuthSession set authenticated = true where email == email
} or { panic('err:${err}') }
}
pub fn (auth DatabaseBackend) delete_auth_session(email string) ! {
sql auth.db {
delete from AuthSession where email == '${email}'
} or { panic('err:${err}') }
}
// if session.attempts_left <= 0 { // checks if remaining attempts
// return AttemptResult{
// authenticated: false
// attempts_left: 0
// time_left:
// }
// }
// // authenticates if cypher in link matches authcode
// if cypher == auth.sessions[email].auth_code {
// auth.logger.debug(@FN + ':\nUser authenticated email: ${email}')
// auth.sessions[email].authenticated = true
// result := AttemptResult{
// authenticated: true
// attempts_left: auth.sessions[email].attempts_left
// }
// return result
// } else {
// auth.sessions[email].attempts_left -= 1
// result := AttemptResult{
// authenticated: false
// attempts_left: auth.sessions[email].attempts_left
// }
// return result
// }

View File

@@ -0,0 +1,59 @@
module authentication
import db.sqlite
import log
import time
const test_email = 'test@example.com'
const test_auth_code = '123ABC'
const test_db_name = 'email_authenticator.sqlite'
fn testsuite_begin() {
db := sqlite.connect(email.test_db_name) or { panic(err) }
sql db {
drop table AuthSession
} or { return }
}
fn testsuite_end() {
db := sqlite.connect(email.test_db_name) or { panic(err) }
sql db {
drop table AuthSession
} or { panic(err) }
}
fn test_database_backend() {
mut backend := new_database_backend()!
run_backend_tests(mut backend)!
backend.db.close()!
}
fn test_memory_backend() {
mut backend := new_memory_backend()!
run_backend_tests(mut backend)!
}
fn run_backend_tests(mut backend IBackend) ! {
session := AuthSession{
email: email.test_email
}
backend.create_auth_session(session)!
assert backend.read_auth_session(email.test_email)! == session
backend.update_auth_session(AuthSession{
...session
attempts_left: 1
})!
assert backend.read_auth_session(email.test_email)!.attempts_left == 1
backend.delete_auth_session(email.test_email)!
if _ := backend.read_auth_session(email.test_email) {
// should return none, so fails test
assert false
} else {
assert true
}
}

View File

@@ -0,0 +1,68 @@
module authentication
import net.http
import time
import json
// session controller that be be added to vweb projects
pub struct EmailClient {
url string @[required]
}
struct PostParams {
url string
data string
timeout time.Duration
}
fn (client EmailClient) post_request(params PostParams) !http.Response {
mut req := http.new_request(http.Method.post, params.url, params.data)
req.read_timeout = params.timeout
resp := req.do() or {
return error('Failed to send request to email authentication server: ${err.code}')
}
if resp.status_code == 404 {
return error('Could not find email verification endpoint, please make sure the auth client url is configured to the url the auth server is running at.')
}
if resp.status_code != 200 {
panic('Email verification request failed, this should never happen: ${resp.status_msg}')
}
return resp
}
// verify_email posts an email verification req to the email auth controller
pub fn (client EmailClient) email_authentication(params SendMailConfig) ! {
client.post_request(
url: '${client.url}/email_authentication'
data: json.encode(params)
timeout: 180 * time.second
)!
}
// verify_email posts an email verification req to the email auth controller
pub fn (client EmailClient) is_verified(address string) !bool {
resp := client.post_request(
url: '${client.url}/is_verified'
data: json.encode(address)
timeout: 180 * time.second
)!
return resp.body == 'true'
}
// send_verification_email posts an email verification req to the email auth controller
pub fn (client EmailClient) send_verification_email(params SendMailConfig) ! {
client.post_request(
url: '${client.url}/send_verification_mail'
data: json.encode(params)
) or { return error(err.msg()) }
}
// authenticate posts an authentication attempt req to the email auth controller
pub fn (c EmailClient) authenticate(address string, cypher string) !AttemptResult {
resp := http.post('${c.url}/authenticate', json.encode(AuthAttempt{
address: address
cypher: cypher
}))!
result := json.decode(AttemptResult, resp.body)!
return result
}

View File

@@ -0,0 +1,145 @@
module authentication
import vweb
import time
import json
import log
import freeflowuniverse.herolib.ui.console
const agent = 'Email Authentication Controller'
// email authentication controller that be be added to vweb projects
@[heap]
pub struct Controller {
vweb.Context
callback string @[vweb_global]
mut:
authenticator Authenticator @[vweb_global]
}
@[params]
pub struct ControllerParams {
logger &log.Logger
authenticator Authenticator @[required]
}
pub fn new_controller(params ControllerParams) Controller {
mut app := Controller{
authenticator: params.authenticator
}
return app
}
// route responsible for verifying email, email form should be posted here
@[POST]
pub fn (mut app Controller) send_verification_mail() !vweb.Result {
config := json.decode(SendMailConfig, app.req.data)!
app.authenticator.send_verification_mail(config) or { panic(err) }
return app.ok('')
}
// route responsible for verifying email, email form should be posted here
@[POST]
pub fn (mut app Controller) is_verified() vweb.Result {
address := app.req.data
// checks if email verified every 2 seconds
for {
if app.authenticator.is_authenticated(address) or { panic(err) } {
// returns success message once verified
return app.ok('ok')
}
time.sleep(2 * time.second)
}
return app.html('timeout')
}
// route responsible for verifying email, email form should be posted here
@[POST]
pub fn (mut app Controller) email_authentication() vweb.Result {
config_ := json.decode(SendMailConfig, app.req.data) or {
app.set_status(422, 'Request payload does not follow anticipated formatting.')
return app.text('Request payload does not follow anticipated formatting.')
}
config := if config_.link == '' {
SendMailConfig{
...config_
link: 'http://localhost:8000/email_authenticator/authentication_link'
}
} else {
config_
}
app.authenticator.send_verification_mail(config) or { panic(err) }
// checks if email verified every 2 seconds
for {
if app.authenticator.is_authenticated(config.email) or { panic(err) } {
// returns success message once verified
return app.ok('ok')
}
time.sleep(2 * time.second)
}
return app.ok('success!')
}
// route responsible for verifying email, email form should be posted here
@[POST]
pub fn (mut app Controller) verify() vweb.Result {
config_ := json.decode(SendMailConfig, app.req.data) or {
app.set_status(422, 'Request payload does not follow anticipated formatting.')
return app.text('Request payload does not follow anticipated formatting.')
}
config := if config_.link == '' {
SendMailConfig{
...config_
link: 'http://localhost:8000/email_authenticator/authentication_link'
}
} else {
config_
}
app.authenticator.send_verification_mail(config) or { panic(err) }
// checks if email verified every 2 seconds
stopwatch := time.new_stopwatch()
for stopwatch.elapsed() < 180 * time.second {
authenticated := app.authenticator.is_authenticated(config.email) or {
return app.text(err.msg())
}
if authenticated {
console.print_debug('heyo yess')
return app.ok('success')
}
time.sleep(2 * time.second)
}
app.set_status(408, 'Email authentication timeout.')
return app.text('Email authentication timeout.')
}
pub struct AuthAttempt {
pub:
ip string
address string
cypher string
}
@[POST]
pub fn (mut app Controller) authenticate() !vweb.Result {
attempt := json.decode(AuthAttempt, app.req.data)!
app.authenticator.authenticate(attempt.address, attempt.cypher) or {
app.set_status(401, err.msg())
return app.text('Failed to authenticate')
}
return app.ok('Authentication successful')
}
@['/authentication_link/:address/:cypher']
pub fn (mut app Controller) authentication_link(address string, cypher string) !vweb.Result {
app.authenticator.authenticate(address, cypher) or {
app.set_status(401, err.msg())
return app.text('Failed to authenticate')
}
return app.html('Authentication successful')
}

View File

@@ -0,0 +1,46 @@
module authentication
import log
import net.smtp
import os
import toml
fn test_new_controller() {
mut logger := log.Logger(&log.Log{
level: .debug
})
env := toml.parse_file(os.dir(os.dir(@FILE)) + '/.env') or {
panic('Could not find .env, ${err}')
}
client := smtp.Client{
server: 'smtp-relay.brevo.com'
from: 'verify@authenticator.io'
port: 587
username: env.value('BREVO_SMTP_USERNAME').string()
password: env.value('BREVO_SMTP_PASSWORD').string()
}
controller := new_controller(logger: &logger)
}
fn test_send_verification_mail() {
// mut logger := log.Logger(&log.Log{
// level: .debug
// })
// env := toml.parse_file(os.dir(os.dir(@FILE)) + '/.env') or {
// panic('Could not find .env, ${err}')
// }
// client := smtp.Client{
// server: 'smtp-relay.brevo.com'
// from: 'verify@authenticator.io'
// port: 587
// username: env.value('BREVO_SMTP_USERNAME').string()
// password: env.value('BREVO_SMTP_PASSWORD').string()
// }
// controller := new_controller(logger: &logger)
}

View File

@@ -0,0 +1,106 @@
module authentication
import time
import crypto.hmac
import crypto.sha256
import encoding.hex
import encoding.base64
import freeflowuniverse.herolib.clients.mailclient {MailClient}
pub struct StatelessAuthenticator {
pub:
secret string
pub mut:
mail_client MailClient
}
pub fn new_stateless_authenticator(authenticator StatelessAuthenticator) !StatelessAuthenticator {
// TODO: do some checks
return StatelessAuthenticator {...authenticator}
}
pub struct AuthenticationMail {
RedirectURLs
pub:
to string // email address being authentcated
from string = 'email_authenticator@herolib.tf'
subject string = 'Verify your email'
body string = 'Please verify your email by clicking the link below'
callback string // callback url of authentication link
success_url string // where the user will be redirected upon successful authentication
failure_url string // where the user will be redirected upon failed authentication
}
pub fn (mut a StatelessAuthenticator) send_authentication_mail(mail AuthenticationMail) ! {
link := a.new_authentication_link(mail.to, mail.callback, mail.RedirectURLs)!
button := '<a href="${link}" style="display:inline-block; padding:10px 20px; font-size:16px; color:white; background-color:#4CAF50; text-decoration:none; border-radius:5px;">Verify Email</a>'
// send email with link in body
a.mail_client.send(
to: mail.to
from: mail.from
subject: mail.subject
body_type: .html
body: $tmpl('./templates/mail.html')
) or { return error('Error resolving email address $err') }
}
@[params]
pub struct RedirectURLs {
pub:
success_url string
failure_url string
}
fn (a StatelessAuthenticator) new_authentication_link(email string, callback string, urls RedirectURLs) !string {
if urls.failure_url != '' {
panic('implement')
}
// sign email address and expiration of authentication link
expiration := time.now().add(5 * time.minute)
data := '${email}.${expiration}' // data to be signed
// QUESTION? should success url also be signed for security?
signature := hmac.new(
hex.decode(a.secret)!,
data.bytes(),
sha256.sum,
sha256.block_size
)
encoded_signature := base64.url_encode(signature.bytestr().bytes())
mut queries := ''
if urls.success_url != '' {
encoded_url := base64.url_encode(urls.success_url.bytes())
queries += '?success_url=${encoded_url}'
}
return "${callback}/${email}/${expiration.unix()}/${encoded_signature}${queries}"
}
pub struct AuthenticationAttempt {
pub:
email string
expiration time.Time
signature string
}
// sends mail with login link
pub fn (auth StatelessAuthenticator) authenticate(attempt AuthenticationAttempt) ! {
if time.now() > attempt.expiration {
return error('link expired')
}
data := '${attempt.email}.${attempt.expiration}' // data to be signed
signature_mirror := hmac.new(
hex.decode(auth.secret) or {panic(err)},
data.bytes(),
sha256.sum,
sha256.block_size
).bytestr().bytes()
decoded_signature := base64.url_decode(attempt.signature)
if !hmac.equal(decoded_signature, signature_mirror) {
return error('signature mismatch')
}
}

View File

@@ -0,0 +1,49 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
body {
font-family: Arial, sans-serif;
color: #333333;
line-height: 1.6;
}
.container {
max-width: 600px;
margin: 0 auto;
padding: 20px;
border: 1px solid #dddddd;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.header {
font-size: 24px;
margin-bottom: 20px;
color: #4CAF50;
}
.content {
margin-bottom: 20px;
}
.footer {
font-size: 12px;
color: #888888;
margin-top: 20px;
}
</style>
</head>
<body>
<div class="container">
<div class="header">Verify Your Email</div>
<div class="content">
<p>Hello,</p>
<p>@{mail.body}</p>
<p>@{button}</p>
</div>
<div class="footer">
<p>If you did not request this email, please ignore it.</p>
<p>Thank you,<br>The OurWorld Team</p>
</div>
</div>
</body>
</html>

175
lib/security/jwt/jwt.v Normal file
View File

@@ -0,0 +1,175 @@
module jwt
import crypto.hmac
import crypto.sha256
import encoding.base64
import json
import x.json2
import time
import crypto.rand
import os
// JWT code in this page is from
// https://github.com/vlang/v/blob/master/examples/vweb_orm_jwt/src/auth_services.v
// credit to https://github.com/enghitalo
pub struct JsonWebToken {
JwtHeader
JwtPayload
}
struct JwtHeader {
alg string
typ string
}
// TODO: refactor to use single JWT interface
// todo: we can name these better
pub struct JwtPayload {
pub:
sub string // (subject)
iss string // (issuer)
exp time.Time // (expiration)
iat time.Time // (issued at)
aud string // (audience)
data string
}
// creates jwt with encoded payload and header
// DOESN'T handle data encryption, sensitive data should be encrypted
pub fn create_token(payload_ JwtPayload) JsonWebToken {
return JsonWebToken{
JwtHeader: JwtHeader{'HS256', 'JWT'}
JwtPayload: JwtPayload{
...payload_
iat: time.now()
}
}
}
pub fn create_secret() string {
bytes := rand.bytes(64) or { panic('Creating JWT Secret: ${err}') }
return bytes.bytestr()
}
pub fn (token JsonWebToken) sign(secret string) string {
header := base64.url_encode(json.encode(token.JwtHeader).bytes())
payload := base64.url_encode(json.encode(token.JwtPayload).bytes())
signature := base64.url_encode(hmac.new(secret.bytes(), '${header}.${payload}'.bytes(),
sha256.sum, sha256.block_size).bytestr().bytes())
return '${header}.${payload}.${signature}'
}
pub fn (token JsonWebToken) is_expired() bool {
return token.exp <= time.now()
}
pub type SignedJWT = string
pub fn (token SignedJWT) is_valid() bool {
return token.count('.') == 2
}
pub fn (token SignedJWT) verify(secret string) !bool {
if !token.is_valid() {
return error('Token `${token}` is not valid')
}
signature_mirror := hmac.new(secret.bytes(), token.all_before_last('.').bytes(), sha256.sum,
sha256.block_size).bytestr().bytes()
signature_token := base64.url_decode(token.all_after_last('.'))
return hmac.equal(signature_token, signature_mirror)
}
// gets cookie token, returns user obj
pub fn (token SignedJWT) decode() !JsonWebToken {
if !token.is_valid() {
return error('Token `${token}` is not valid')
}
header_urlencoded := token.split('.')[0]
header_json := base64.url_decode(header_urlencoded).bytestr()
header := json.decode(JwtHeader, header_json) or { panic('Decode header: ${err}') }
payload_urlencoded := token.split('.')[1]
payload_json := base64.url_decode(payload_urlencoded).bytestr()
payload := json.decode(JwtPayload, payload_json) or { panic('Decoding payload: ${err}') }
return JsonWebToken{
JwtHeader: header
JwtPayload: payload
}
}
// gets cookie token, returns user obj
pub fn (token SignedJWT) get_field(field string) !string {
if !token.is_valid() {
return error('Token `${token}` is not valid')
}
header_urlencoded := token.split('.')[0]
header_json := base64.url_decode(header_urlencoded).bytestr()
header := json.decode(JwtHeader, header_json) or { panic('Decode header: ${err}') }
payload_urlencoded := token.split('.')[1]
payload_json := base64.url_decode(payload_urlencoded).bytestr()
payload_raw := json2.raw_decode(payload_json) or { panic('Decoding payload: ${err}') }
payload_map := payload_raw.as_map()
return payload_map[field].str()
}
// gets cookie token, returns user obj
pub fn (token SignedJWT) decode_subject() !string {
decoded := token.decode()!
return decoded.sub
}
// verifies jwt cookie
pub fn verify_jwt(token string) bool {
if token == '' {
return false
}
secret := os.getenv('SECRET_KEY')
token_split := token.split('.')
signature_mirror := hmac.new(secret.bytes(), '${token_split[0]}.${token_split[1]}'.bytes(),
sha256.sum, sha256.block_size).bytestr().bytes()
signature_from_token := base64.url_decode(token_split[2])
return hmac.equal(signature_from_token, signature_mirror)
}
// verifies jwt cookie
// todo: implement assymetric verification
pub fn verify_jwt_assymetric(token string, pk string) bool {
return false
}
// gets cookie token, returns user obj
pub fn get_data(token string) !string {
if token == '' {
return error('Failed to decode token: token is empty')
}
payload := json.decode(JwtPayload, base64.url_decode(token.split('.')[1]).bytestr()) or {
panic(err)
}
return payload.data
}
// gets cookie token, returns user obj
pub fn get_payload(token string) !JwtPayload {
if token == '' {
return error('Failed to decode token: token is empty')
}
encoded_payload := base64.url_decode(token.split('.')[1]).bytestr()
return json.decode(JwtPayload, encoded_payload)!
}
// // gets cookie token, returns access obj
// pub fn get_access(token string, username string) ?Access {
// if token == '' {
// return error('Cookie token is empty')
// }
// payload := json.decode(AccessPayload, base64.url_decode(token.split('.')[1]).bytestr()) or {
// panic(err)
// }
// if payload.user != username {
// return error('Access cookie is for different user')
// }
// return payload.access
// }

View File

@@ -235,7 +235,7 @@ pub fn (mut d Deployer) wait_deployment(node_id u32, mut dl models.Deployment, w
if (time.now() - start).minutes() > 5 {
return error('failed to deploy deployment: contractID: ${contract_id}, some workloads are not ready after wating 5 minutes')
} else {
d.logger.info('Waiting for deployment to become ready')
d.logger.info('Waiting for deployment with contract ${contract_id} to become ready')
time.sleep(500 * time.millisecond)
}
}

View File

@@ -1,498 +1,110 @@
module model
import json
type OptionU64 = EmptyOption | u64
type OptionBool = EmptyOption | bool
@[params]
pub struct FarmFilter {
pub mut:
page OptionU64 = EmptyOption{}
size OptionU64 = EmptyOption{}
ret_count OptionBool = EmptyOption{}
randomize OptionBool = EmptyOption{}
free_ips OptionU64 = EmptyOption{}
total_ips OptionU64 = EmptyOption{}
stellar_address string
pricing_policy_id OptionU64 = EmptyOption{}
farm_id OptionU64 = EmptyOption{}
twin_id OptionU64 = EmptyOption{}
name string
name_contains string
certification_type string
dedicated OptionBool = EmptyOption{}
country string
node_free_mru OptionU64 = EmptyOption{}
node_free_hru OptionU64 = EmptyOption{}
node_free_sru OptionU64 = EmptyOption{}
node_status string
node_rented_by OptionU64 = EmptyOption{}
node_available_for OptionU64 = EmptyOption{}
node_has_gpu OptionBool = EmptyOption{}
node_certified OptionBool = EmptyOption{}
page ?u64
size ?u64
ret_count ?bool
randomize ?bool
free_ips ?u64
total_ips ?u64
stellar_address ?string
pricing_policy_id ?u64
farm_id ?u64
twin_id ?u64
name ?string
name_contains ?string
certification_type ?string
dedicated ?bool
country ?string
node_free_mru ?u64
node_free_hru ?u64
node_free_sru ?u64
node_status ?string
node_rented_by ?u64
node_available_for ?u64
node_has_gpu ?bool
node_certified ?bool
}
// serialize FarmFilter to map
pub fn (f &FarmFilter) to_map() map[string]string {
mut m := map[string]string{}
match f.page {
EmptyOption {}
u64 {
m['page'] = f.page.str()
}
}
match f.size {
EmptyOption {}
u64 {
m['size'] = f.size.str()
}
}
match f.ret_count {
EmptyOption {}
bool {
m['ret_count'] = f.ret_count.str()
}
}
match f.randomize {
EmptyOption {}
bool {
m['randomize'] = f.randomize.str()
}
}
match f.free_ips {
EmptyOption {}
u64 {
m['free_ips'] = f.free_ips.str()
}
}
match f.total_ips {
EmptyOption {}
u64 {
m['total_ips'] = f.total_ips.str()
}
}
if f.stellar_address != '' {
m['stellar_address'] = f.stellar_address
}
match f.pricing_policy_id {
EmptyOption {}
u64 {
m['pricing_policy_id'] = f.pricing_policy_id.str()
}
}
match f.farm_id {
EmptyOption {}
u64 {
m['farm_id'] = f.farm_id.str()
}
}
match f.twin_id {
EmptyOption {}
u64 {
m['twin_id'] = f.twin_id.str()
}
}
if f.name != '' {
m['name'] = f.name
}
if f.name_contains != '' {
m['name_contains'] = f.name_contains
}
if f.certification_type != '' {
m['certification_type'] = f.certification_type
}
if f.country != '' {
m['country'] = f.country
}
match f.dedicated {
EmptyOption {}
bool {
m['dedicated'] = f.dedicated.str()
}
}
match f.node_available_for {
EmptyOption {}
u64 {
m['node_available_for'] = f.node_available_for.str()
}
}
match f.node_free_hru {
EmptyOption {}
u64 {
m['node_free_hru'] = f.node_free_hru.str()
}
}
match f.node_free_mru {
EmptyOption {}
u64 {
m['node_free_mru'] = f.node_free_mru.str()
}
}
match f.node_free_sru {
EmptyOption {}
u64 {
m['node_free_sru'] = f.node_free_sru.str()
}
}
match f.node_rented_by {
EmptyOption {}
u64 {
m['node_rented_by'] = f.node_rented_by.str()
}
}
match f.node_has_gpu {
EmptyOption {}
bool {
m['node_has_gpu'] = f.node_has_gpu.str()
}
}
match f.node_certified {
EmptyOption {}
bool {
m['node_certified'] = f.node_certified.str()
}
}
if f.node_status != '' {
m['node_status'] = f.node_status
}
return m
pub fn (f FarmFilter) to_map() map[string]string {
return to_map(f)
}
@[params]
pub struct ContractFilter {
pub mut:
page OptionU64 = EmptyOption{}
size OptionU64 = EmptyOption{}
ret_count OptionBool = EmptyOption{}
randomize OptionBool = EmptyOption{}
contract_id OptionU64 = EmptyOption{}
twin_id OptionU64 = EmptyOption{}
node_id OptionU64 = EmptyOption{}
contract_type string
state string
name string
number_of_public_ips OptionU64 = EmptyOption{}
deployment_data string
deployment_hash string
page ?u64
size ?u64
ret_count ?bool
randomize ?bool
contract_id ?u64
twin_id ?u64
node_id ?u64
contract_type ?string
state ?string
name ?string
number_of_public_ips ?u64
deployment_data ?string
deployment_hash ?string
}
// serialize ContractFilter to map
pub fn (f &ContractFilter) to_map() map[string]string {
mut m := map[string]string{}
match f.page {
EmptyOption {}
u64 {
m['page'] = f.page.str()
}
}
match f.size {
EmptyOption {}
u64 {
m['size'] = f.size.str()
}
}
match f.ret_count {
EmptyOption {}
bool {
m['ret_count'] = f.ret_count.str()
}
}
match f.randomize {
EmptyOption {}
bool {
m['randomize'] = f.randomize.str()
}
}
match f.contract_id {
EmptyOption {}
u64 {
m['contract_id'] = f.contract_id.str()
}
}
match f.twin_id {
EmptyOption {}
u64 {
m['twin_id'] = f.twin_id.str()
}
}
match f.node_id {
EmptyOption {}
u64 {
m['node_id'] = f.node_id.str()
}
}
if f.contract_type != '' {
m['type'] = f.contract_type
}
if f.state != '' {
m['state'] = f.state
}
if f.name != '' {
m['name'] = f.name
}
match f.number_of_public_ips {
EmptyOption {}
u64 {
m['number_of_public_ips'] = f.number_of_public_ips.str()
}
}
if f.deployment_data != '' {
m['deployment_data'] = f.deployment_data
}
if f.deployment_hash != '' {
m['deployment_hash'] = f.deployment_hash
}
return m
pub fn (f ContractFilter) to_map() map[string]string {
return to_map(f)
}
@[params]
pub struct NodeFilter {
pub mut:
page OptionU64 = EmptyOption{}
size OptionU64 = EmptyOption{}
ret_count OptionBool = EmptyOption{}
randomize OptionBool = EmptyOption{}
free_mru OptionU64 = EmptyOption{}
free_sru OptionU64 = EmptyOption{}
free_hru OptionU64 = EmptyOption{}
page ?u64
size ?u64
ret_count ?bool
randomize ?bool
free_mru ?u64
free_sru ?u64
free_hru ?u64
free_ips ?u64
total_mru OptionU64 = EmptyOption{}
total_sru OptionU64 = EmptyOption{}
total_hru OptionU64 = EmptyOption{}
total_cru OptionU64 = EmptyOption{}
city string
city_contains string
country string
country_contains string
farm_name string
farm_name_contains string
ipv4 OptionBool = EmptyOption{}
ipv6 OptionBool = EmptyOption{}
domain OptionBool = EmptyOption{}
status string
dedicated OptionBool = EmptyOption{}
healthy OptionBool = EmptyOption{}
rentable OptionBool = EmptyOption{}
rented_by OptionU64 = EmptyOption{}
rented OptionBool = EmptyOption{}
available_for OptionU64 = EmptyOption{}
total_mru ?u64
total_sru ?u64
total_hru ?u64
total_cru ?u64
city ?string
city_contains ?string
country ?string
country_contains ?string
farm_name ?string
farm_name_contains ?string
ipv4 ?bool
ipv6 ?bool
domain ?bool
status ?string
dedicated ?bool
healthy ?bool
rentable ?bool
rented_by ?u64
rented ?bool
available_for ?u64
farm_ids []u64
node_ids []u64
node_id ?u32
twin_id OptionU64 = EmptyOption{}
certification_type string
has_gpu OptionBool = EmptyOption{}
twin_id ?u64
certification_type ?string
has_gpu ?bool
has_ipv6 ?bool
gpu_device_id string
gpu_device_name string
gpu_vendor_id string
gpu_vendor_name string
gpu_available OptionBool = EmptyOption{}
gpu_device_id ?string
gpu_device_name ?string
gpu_vendor_id ?string
gpu_vendor_name ?string
gpu_available ?bool
features []string
}
// serialize NodeFilter to map
pub fn (p &NodeFilter) to_map() map[string]string {
mut m := map[string]string{}
match p.page {
EmptyOption {}
u64 {
m['page'] = p.page.str()
}
}
match p.size {
EmptyOption {}
u64 {
m['size'] = p.size.str()
}
}
match p.ret_count {
EmptyOption {}
bool {
m['ret_count'] = p.ret_count.str()
}
}
match p.randomize {
EmptyOption {}
bool {
m['randomize'] = p.randomize.str()
}
}
match p.free_mru {
EmptyOption {}
u64 {
m['free_mru'] = p.free_mru.str()
}
}
match p.free_sru {
EmptyOption {}
u64 {
m['free_sru'] = p.free_sru.str()
}
}
match p.free_hru {
EmptyOption {}
u64 {
m['free_hru'] = p.free_hru.str()
}
}
if v := p.free_ips {
m['free_ips'] = v.str()
}
if v := p.has_ipv6 {
m['has_ipv6'] = v.str()
}
match p.total_cru {
EmptyOption {}
u64 {
m['total_cru'] = p.total_cru.str()
}
}
match p.total_hru {
EmptyOption {}
u64 {
m['total_hru'] = p.total_hru.str()
}
}
match p.total_mru {
EmptyOption {}
u64 {
m['total_mru'] = p.total_mru.str()
}
}
match p.total_sru {
EmptyOption {}
u64 {
m['total_sru'] = p.total_sru.str()
}
}
if p.status != '' {
m['status'] = p.status
}
if p.city != '' {
m['city'] = p.city
}
if p.city_contains != '' {
m['city_contains'] = p.city_contains
}
if p.country != '' {
m['country'] = p.country
}
if p.country_contains != '' {
m['country_contains'] = p.country_contains
}
if p.farm_name != '' {
m['farm_name'] = p.farm_name
}
if p.farm_name_contains != '' {
m['farm_name_contains'] = p.farm_name_contains
}
match p.ipv4 {
EmptyOption {}
bool {
m['ipv4'] = p.ipv4.str()
}
}
match p.ipv6 {
EmptyOption {}
bool {
m['ipv6'] = p.ipv6.str()
}
}
match p.healthy {
EmptyOption {}
bool {
m['healthy'] = p.healthy.str()
}
}
match p.domain {
EmptyOption {}
bool {
m['domain'] = p.domain.str()
}
}
match p.dedicated {
EmptyOption {}
bool {
m['dedicated'] = p.dedicated.str()
}
}
match p.rentable {
EmptyOption {}
bool {
m['rentable'] = p.rentable.str()
}
}
match p.rented_by {
EmptyOption {}
u64 {
m['rented_by'] = p.rented_by.str()
}
}
match p.rented {
EmptyOption {}
bool {
m['rented'] = p.rented.str()
}
}
match p.available_for {
EmptyOption {}
u64 {
m['available_for'] = p.available_for.str()
}
}
if p.features.len > 0 {
m['features'] = json.encode(p.features).all_after('[').all_before(']')
}
if p.farm_ids.len > 0 {
m['farm_ids'] = json.encode(p.farm_ids).all_after('[').all_before(']')
}
if p.node_ids.len > 0 {
m['node_ids'] = json.encode(p.node_ids).all_after('[').all_before(']')
}
if n := p.node_id {
m['node_id'] = n.str()
}
match p.twin_id {
EmptyOption {}
u64 {
m['twin_id'] = p.twin_id.str()
}
}
if p.certification_type != '' {
m['certification_type'] = p.certification_type
}
match p.has_gpu {
EmptyOption {}
bool {
m['has_gpu'] = p.has_gpu.str()
}
}
if p.gpu_device_id != '' {
m['gpu_device_id'] = p.gpu_device_id
}
if p.gpu_device_name != '' {
m['gpu_device_name'] = p.gpu_device_name
}
if p.gpu_vendor_id != '' {
m['gpu_vendor_id'] = p.gpu_vendor_id
}
if p.gpu_vendor_name != '' {
m['gpu_vendor_name'] = p.gpu_vendor_name
}
match p.gpu_available {
EmptyOption {}
bool {
m['gpu_available'] = p.gpu_available.str()
}
}
return m
pub fn (f NodeFilter) to_map() map[string]string {
return to_map(f)
}
pub enum NodeStatus {
@@ -519,57 +131,50 @@ pub mut:
@[params]
pub struct TwinFilter {
pub mut:
page OptionU64 = EmptyOption{}
size OptionU64 = EmptyOption{}
ret_count OptionBool = EmptyOption{}
randomize OptionBool = EmptyOption{}
twin_id OptionU64 = EmptyOption{}
account_id string
relay string
public_key string
page ?u64
size ?u64
ret_count ?bool
randomize ?bool
twin_id ?u64
account_id ?string
relay ?string
public_key ?string
}
// serialize TwinFilter to map
pub fn (p &TwinFilter) to_map() map[string]string {
pub fn (f TwinFilter) to_map() map[string]string {
return to_map(f)
}
pub fn to_map[T](t T) map[string]string {
mut m := map[string]string{}
match p.page {
EmptyOption {}
u64 {
m['page'] = p.page.str()
$for field in T.fields {
value := t.$(field.name)
$if value is $option {
opt := t.$(field.name)
if opt != none {
// NOTE: for some reason when passing the value to another function
// it is not recognized as an Option and is dereferenced
encode_val(field.name, value, mut m)
}
}
}
match p.size {
EmptyOption {}
u64 {
m['size'] = p.size.str()
$if value !is $option {
encode_val(field.name, value, mut m)
}
}
match p.ret_count {
EmptyOption {}
bool {
m['ret_count'] = p.ret_count.str()
}
}
match p.randomize {
EmptyOption {}
bool {
m['randomize'] = p.randomize.str()
}
}
match p.twin_id {
EmptyOption {}
u64 {
m['twin_id'] = p.twin_id.str()
}
}
if p.account_id != '' {
m['account_id'] = p.account_id
}
if p.relay != '' {
m['relay'] = p.relay
}
if p.public_key != '' {
m['public_key'] = p.public_key
}
return m
}
fn encode_val[T](field_name string, val T, mut m map[string]string) {
$if T is $array {
mut arr := []string{}
for a in val {
arr << a.str()
}
m[field_name] = arr.join(',')
} $else {
m[field_name] = val.str()
}
}

View File

@@ -10,14 +10,12 @@ pub:
}
pub fn (mut i NodeIterator) next() ?[]Node {
match i.filter.page {
EmptyOption {
i.filter.page = u64(1)
}
u64 {
i.filter.page = i.filter.page as u64 + 1
}
if v := i.filter.page {
i.filter.page = v + 1
} else {
i.filter.page = u64(1)
}
nodes := i.get_func(i.filter) or { return none }
if nodes.len == 0 {
return none
@@ -35,13 +33,10 @@ pub:
}
pub fn (mut i FarmIterator) next() ?[]Farm {
match i.filter.page {
EmptyOption {
i.filter.page = u64(1)
}
u64 {
i.filter.page = i.filter.page as u64 + 1
}
if v := i.filter.page {
i.filter.page = v + 1
} else {
i.filter.page = u64(1)
}
farms := i.get_func(i.filter) or { return none }
if farms.len == 0 {
@@ -60,13 +55,10 @@ pub:
}
pub fn (mut i ContractIterator) next() ?[]Contract {
match i.filter.page {
EmptyOption {
i.filter.page = u64(1)
}
u64 {
i.filter.page = i.filter.page as u64 + 1
}
if v := i.filter.page {
i.filter.page = v + 1
} else {
i.filter.page = u64(1)
}
contracts := i.get_func(i.filter) or { return none }
if contracts.len == 0 {
@@ -85,13 +77,10 @@ pub:
}
pub fn (mut i TwinIterator) next() ?[]Twin {
match i.filter.page {
EmptyOption {
i.filter.page = u64(1)
}
u64 {
i.filter.page = i.filter.page as u64 + 1
}
if v := i.filter.page {
i.filter.page = v + 1
} else {
i.filter.page = u64(1)
}
twins := i.get_func(i.filter) or { return none }
if twins.len == 0 {

View File

@@ -102,5 +102,3 @@ pub fn (u DropTFTUnit) str() string {
}
return '${u64(u)} dTFT' // Short for dropTFT (1 TFT = 10_000_000 drops). dylan suggests the name and i'm using this till we have an officail name!
}
struct EmptyOption {}

View File

@@ -1,44 +0,0 @@
module models
import freeflowuniverse.herolib.threefold.grid
import log
// ContractMetaData struct to represent a deployment metadata.
pub struct ContractMetaData {
pub mut:
type_ string @[json: 'type']
name string
project_name string @[json: 'projectName']
}
// // GridMachinesModel struct to represent multiple machines in the grid
// pub struct GridMachinesModel {
// mnemonic string
// ssh_key string
// chain_network grid.ChainNetwork
// pub mut:
// client &GridClient = unsafe { nil }
// node_id int
// network NetworkInfo
// machines []MachineModel
// name string
// metadata string
// }
// // GridContracts struct to represent contracts in the grid
// pub struct GridContracts {
// pub mut:
// client &GridClient = unsafe { nil }
// network grid.ChainNetwork
// }
// // GridClient struct to represent the client interacting with the grid
// pub struct GridClient {
// pub mut:
// mnemonic string
// ssh_key string
// chain_network grid.ChainNetwork
// deployer grid.Deployer
// machines GridMachinesModel
// contracts GridContracts
// }

View File

@@ -1,16 +0,0 @@
module models
import freeflowuniverse.herolib.threefold.grid
import log
// GridClient struct to represent the client interacting with the grid
pub struct Deployment {
mut:
deployer grid.Deployer
pub mut:
mnemonic string
ssh_key string
chain_network grid.ChainNetwork
machines GridMachinesModel
contracts GridContracts
}

View File

@@ -1,20 +0,0 @@
module models
pub struct KubernetesModel {
}
pub fn (mut km KubernetesModel) deploy() {
println('Not Implemented')
}
pub fn (mut km KubernetesModel) delete() {
println('Not Implemented')
}
pub fn (mut km KubernetesModel) get() {
println('Not Implemented')
}
pub fn (mut km KubernetesModel) update() {
println('Not Implemented')
}

View File

@@ -1,264 +0,0 @@
module models
import freeflowuniverse.herolib.threefold.grid
import freeflowuniverse.herolib.threefold.grid.models as grid_models
import rand
import freeflowuniverse.herolib.ui.console
import json
// Deploy the workloads
pub fn (mut gm GridMachinesModel) deploy(vms GridMachinesModel) ! {
console.print_header('Starting deployment process.')
// Prepare Workloads
workloads := create_workloads(mut gm, vms)!
// Create and deploy deployment
contract_id := create_and_deploy_deployment(mut gm, vms, workloads)!
// Fetch deployment result
machine_res := fetch_deployment_result(mut gm.client.deployer, contract_id, u32(vms.node_id))!
console.print_header('Zmachine result: ${machine_res}')
}
// Helper function to create workloads
fn create_workloads(mut gm GridMachinesModel, vms GridMachinesModel) ![]Workload {
console.print_header('Creating workloads.')
mut workloads := []grid_models.Workload{}
// Create network workload
wg_port := gm.client.deployer.assign_wg_port(u32(vms.node_id))!
workloads << create_network_workload(vms, wg_port)
// Create machine workloads
mut public_ip_name := ''
for machine in vms.machines {
if machine.network_access.public_ip4 || machine.network_access.public_ip6 {
public_ip_name = rand.string(5).to_lower()
workloads << create_public_ip_workload(machine.network_access.public_ip4,
machine.network_access.public_ip6, public_ip_name)
}
workloads << create_zmachine_workload(machine, vms.network, gm.ssh_key, public_ip_name).to_workload(
name: machine.name
description: 'VGridClient Zmachine'
)
}
return workloads
}
// Helper function to create and deploy deployment
fn create_and_deploy_deployment(mut gm GridMachinesModel, vms GridMachinesModel, workloads []Workload) !int {
console.print_header('Creating deployment.')
mut deployment := grid_models.new_deployment(
twin_id: gm.client.deployer.twin_id
description: 'VGridClient Deployment'
workloads: workloads
signature_requirement: create_signature_requirement(gm.client.deployer.twin_id)
)
log_and_set_metadata(mut logger, mut deployment, 'vm', vms.name)
console.print_header('Deploying workloads...')
contract_id := gm.client.deployer.deploy(u32(vms.node_id), mut deployment, deployment.metadata,
0) or {
logger.error('Deployment failed: ${err}')
return err
}
console.print_header('Deployment successful. Contract ID: ${contract_id}')
return int(contract_id)
}
// Helper function to fetch deployment result
fn fetch_deployment_result(mut deployer grid.Deployer, contract_id int, node_id u32) !ZmachineResult {
dl := deployer.get_deployment(u64(contract_id), node_id) or {
logger.error('Failed to get deployment data: ${err}')
exit(1)
}
return get_machine_result(dl)!
}
// Helper function to create a Zmachine workload
fn create_zmachine_workload(machine MachineModel, network NetworkInfo, ssh_key string, public_ip_name string) Zmachine {
console.print_header('Creating Zmachine workload.')
return grid_models.Zmachine{
flist: 'https://hub.grid.tf/tf-official-vms/ubuntu-24.04-latest.flist'
network: grid_models.ZmachineNetwork{
interfaces: [
grid_models.ZNetworkInterface{
network: network.name
ip: network.ip_range.split('/')[0]
},
]
public_ip: public_ip_name
planetary: machine.network_access.planetary
mycelium: grid_models.MyceliumIP{
network: network.name
hex_seed: rand.string(6).bytes().hex()
}
}
entrypoint: '/sbin/zinit init'
compute_capacity: grid_models.ComputeCapacity{
cpu: u8(machine.capacity.cpu)
memory: i64(machine.capacity.memory) * 1024 * 1024
}
env: {
'SSH_KEY': ssh_key
}
}
}
// Helper function to create a network workload
fn create_network_workload(gm GridMachinesModel, wg_port u32) Workload {
console.print_header('Creating network workload.')
return grid_models.Znet{
ip_range: gm.network.ip_range
subnet: gm.network.subnet
wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE='
wireguard_listen_port: u16(wg_port)
mycelium: grid_models.Mycelium{
hex_key: rand.string(32).bytes().hex()
}
peers: [
grid_models.Peer{
subnet: gm.network.subnet
wireguard_public_key: '4KTvZS2KPWYfMr+GbiUUly0ANVg8jBC7xP9Bl79Z8zM='
allowed_ips: [gm.network.subnet]
},
]
}.to_workload(
name: gm.network.name
description: 'VGridClient Network'
)
}
// Helper function to create a public IP workload
fn create_public_ip_workload(is_v4 bool, is_v6 bool, name string) Workload {
console.print_header('Creating Public IP workload.')
return grid_models.PublicIP{
v4: is_v4
v6: is_v6
}.to_workload(name: name)
}
// Helper function to create signature requirements
fn create_signature_requirement(twin_id int) SignatureRequirement {
console.print_header('Setting signature requirement.')
return grid_models.SignatureRequirement{
weight_required: 1
requests: [
grid_models.SignatureRequest{
twin_id: u32(twin_id)
weight: 1
},
]
}
}
// Helper function to log and set metadata
fn log_and_set_metadata(mut logger log.Log, mut deployment Deployment, key string, value string) {
console.print_header('Setting ${key} metadata.')
deployment.add_metadata(key, value)
}
// Helper function to get the deployment result
fn get_machine_result(dl Deployment) !ZmachineResult {
for _, w in dl.workloads {
if w.type_ == grid_models.workload_types.zmachine {
res := json.decode(grid_models.ZmachineResult, w.result.data)!
return res
}
}
return error('Failed to get Zmachine workload')
}
pub fn (mut gm GridMachinesModel) list() ![]Deployment {
mut deployments := []grid_models.Deployment{}
console.print_header('Listing active contracts.')
contracts := gm.client.contracts.get_my_active_contracts() or {
return error('Cannot list twin contracts due to: ${err}')
}
console.print_header('Active contracts listed.')
console.print_header('Listing deployments.')
for contract in contracts {
console.print_header('Listing deployment node ${contract.details.node_id}.')
if contract.contract_type == 'node' {
dl := gm.client.deployer.get_deployment(contract.contract_id, u32(contract.details.node_id)) or {
console.print_stderror('Cannot list twin deployment for contract ${contract.contract_id} due to: ${err}.')
continue
}
deployments << dl
console.print_header('Deployment Result: ${dl}.')
}
}
return deployments
}
fn (mut gm GridMachinesModel) list_contract_names() ![]string {
contracts := gm.client.contracts.get_my_active_contracts()!
mut names := []string{}
for contract in contracts {
res := json.decode(ContractMetaData, contract.details.deployment_data) or {
return error('Cannot decode the deployment metadata due to: ${err}')
}
names << res.name
}
return names
}
pub fn (mut gm GridMachinesModel) delete(deployment_name string) ! {
console.print_header('Deleting deployment with name: ${deployment_name}.')
console.print_header('Listing the twin `${gm.client.deployer.twin_id}` active contracts.')
contracts := gm.client.contracts.get_my_active_contracts() or {
return error('Cannot list twin contracts due to: ${err}')
}
console.print_header('Active contracts listed.')
for contract in contracts {
res := json.decode(ContractMetaData, contract.details.deployment_data) or {
return error('Cannot decode the contract deployment data due to: ${err}')
}
if res.name == deployment_name {
console.print_header('Start deleting deployment ${deployment_name}.')
gm.client.deployer.client.cancel_contract(contract.contract_id) or {
return error('Cannot delete deployment due to: ${err}')
}
console.print_header('Deployment ${deployment_name} deleted!.')
}
}
}
// Placeholder for get operation
pub fn (mut gm GridMachinesModel) get(deployment_name string) ![]Deployment {
mut deployments := []grid_models.Deployment{}
contracts := gm.client.contracts.get_my_active_contracts() or {
return error('Cannot list twin contracts due to: ${err}')
}
for contract in contracts {
if contract.contract_type == 'node' {
dl := gm.client.deployer.get_deployment(contract.contract_id, u32(contract.details.node_id)) or {
console.print_stderror('Cannot list twin deployment for contract ${contract.contract_id} due to: ${err}.')
continue
}
if dl.metadata.len != 0 {
res := json.decode(ContractMetaData, dl.metadata) or {
return error('Cannot decode the deployment metadata due to: ${err}')
}
if deployment_name == res.name {
deployments << dl
}
}
}
}
console.print_header('Deployments: ${deployments}')
return deployments
}

View File

@@ -75,12 +75,35 @@ pub fn get_deployment(name string) !TFDeployment {
return dl
}
pub fn delete_deployment(name string) ! {
mut deployer := get_deployer()!
mut dl := TFDeployment{
name: name
kvstore: KVStoreFS{}
deployer: &deployer
}
dl.load() or { return error('Faild to load the deployment due to: ${err}') }
console.print_header('Current deployment contracts: ${dl.contracts}')
mut contracts := []u64{}
contracts << dl.contracts.name
contracts << dl.contracts.node.values()
contracts << dl.contracts.rent.values()
dl.deployer.client.batch_cancel_contracts(contracts)!
console.print_header('Deployment contracts are canceled successfully.')
dl.kvstore.delete(dl.name)!
console.print_header('Deployment is deleted successfully.')
}
pub fn (mut self TFDeployment) deploy() ! {
console.print_header('Starting deployment process.')
self.set_nodes()!
old_deployment := self.list_deployments()!
println('old_deployment ${old_deployment}')
console.print_header('old contract ids: ${old_deployment.keys()}')
mut setup := new_deployment_setup(self.network, self.vms, self.zdbs, self.webnames,
old_deployment, mut self.deployer)!
@@ -92,6 +115,10 @@ pub fn (mut self TFDeployment) deploy() ! {
fn (mut self TFDeployment) set_nodes() ! {
for mut vm in self.vms {
if vm.node_id != 0 {
continue
}
mut node_ids := []u64{}
for node_id in vm.requirements.nodes {
@@ -104,11 +131,11 @@ fn (mut self TFDeployment) set_nodes() ! {
free_mru: convert_to_gigabytes(u64(vm.requirements.memory))
total_cru: u64(vm.requirements.cpu)
free_sru: convert_to_gigabytes(u64(vm.requirements.size))
available_for: gridproxy_models.OptionU64(u64(self.deployer.twin_id))
available_for: u64(self.deployer.twin_id)
free_ips: if vm.requirements.public_ip4 { u64(1) } else { none }
has_ipv6: if vm.requirements.public_ip6 { vm.requirements.public_ip6 } else { none }
status: 'up'
features: if vm.requirements.public_ip4 { [] } else { ['zmachine'] }
features: if vm.requirements.public_ip4 { ['zmachine'] } else { [] }
)!
if nodes.len == 0 {
@@ -118,41 +145,54 @@ fn (mut self TFDeployment) set_nodes() ! {
return error('Requested the Grid Proxy and no nodes found.')
}
vm.node_id = self.pick_node(nodes) or { return error('Failed to pick valid node: ${err}') }
vm.node_id = u32(pick_node(mut self.deployer, nodes) or {
return error('Failed to pick valid node: ${err}')
}.node_id)
}
for mut zdb in self.zdbs {
if zdb.node_id != 0 {
continue
}
nodes := filter_nodes(
free_sru: convert_to_gigabytes(u64(zdb.requirements.size))
status: 'up'
healthy: true
node_id: zdb.requirements.node_id
available_for: gridproxy_models.OptionU64(u64(self.deployer.twin_id))
available_for: u64(self.deployer.twin_id)
)!
if nodes.len == 0 {
return error('Requested the Grid Proxy and no nodes found.')
}
zdb.node_id = self.pick_node(nodes) or { return error('Failed to pick valid node: ${err}') }
zdb.node_id = u32(pick_node(mut self.deployer, nodes) or {
return error('Failed to pick valid node: ${err}')
}.node_id)
}
for mut webname in self.webnames {
if webname.node_id != 0 {
continue
}
nodes := filter_nodes(
domain: true
status: 'up'
healthy: true
node_id: webname.requirements.node_id
available_for: gridproxy_models.OptionU64(u64(self.deployer.twin_id))
available_for: u64(self.deployer.twin_id)
features: ['zmachine']
)!
if nodes.len == 0 {
return error('Requested the Grid Proxy and no nodes found.')
}
webname.node_id = self.pick_node(nodes) or {
webname.node_id = u32(pick_node(mut self.deployer, nodes) or {
return error('Failed to pick valid node: ${err}')
}
}.node_id)
}
}
@@ -205,7 +245,6 @@ fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! {
}
if create_name_contracts.len > 0 || create_deployments.len > 0 {
console.print_header('Attempting batch deployment')
created_name_contracts_map, ret_dls := self.deployer.batch_deploy(create_name_contracts, mut
create_deployments, none)!
@@ -473,36 +512,3 @@ pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deploymen
return dls
}
fn (mut self TFDeployment) pick_node(nodes []gridproxy_models.Node) !u32 {
mut node_id := ?u32(none)
mut checked := []bool{len: nodes.len}
mut checked_cnt := 0
for checked_cnt < nodes.len {
idx := int(rand.u32() % u32(nodes.len))
if checked[idx] {
continue
}
checked[idx] = true
checked_cnt += 1
if self.ping_node(u32(nodes[idx].twin_id)) {
node_id = u32(nodes[idx].node_id)
break
}
}
if v := node_id {
return v
} else {
return error('No node is reachable.')
}
}
fn (mut self TFDeployment) ping_node(twin_id u32) bool {
if _ := self.deployer.client.get_zos_version(twin_id) {
return true
} else {
return false
}
}

View File

@@ -90,6 +90,11 @@ fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, old_deployme
// Returns:
// - None
fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
if machines.len == 0 {
return
}
console.print_header('Preparing Zmachine workloads.')
mut used_ip_octets := map[u32][]u8{}
for machine in machines {
mut req := machine.requirements
@@ -100,7 +105,6 @@ fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
self.set_public_ip_workload(machine.node_id, public_ip_name, req)!
}
console.print_header('Creating Zmachine workload.')
self.set_zmachine_workload(machine, public_ip_name, mut used_ip_octets)!
}
}
@@ -114,10 +118,14 @@ fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
//
// Each ZDB is processed to convert the requirements into a grid workload and associated with a healthy node.
fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! {
if zdbs.len == 0 {
return
}
console.print_header('Preparing ZDB workloads.')
for zdb in zdbs {
// Retrieve ZDB requirements from the result
mut req := zdb.requirements
console.print_header('Creating a ZDB workload for `${req.name}` DB.')
// Create the Zdb model with the size converted to bytes
zdb_model := grid_models.Zdb{
@@ -150,6 +158,11 @@ fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! {
// Returns:
// - None
fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! {
if webnames.len == 0 {
return
}
console.print_header('Preparing WebName workloads.')
for wn in webnames {
req := wn.requirements
@@ -238,7 +251,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip
// - public_ip_name: Name of the public IP to assign to the workload
fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name string, vm VMRequirements) ! {
// Add the public IP workload
console.print_header('Creating Public IP workload.')
console.print_header('Preparing Public IP workload for node ${node_id}.')
public_ip_workload := grid_models.PublicIP{
v4: vm.public_ip4
v6: vm.public_ip6
@@ -257,7 +270,6 @@ fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name
// Throws:
// - Error if failed to assign a private IP in the subnet
fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets map[u32][]u8) !string {
console.print_header('Assign private IP to node ${node_id}.')
ip := self.network_handler.wg_subnet[node_id].split('/')[0]
mut split_ip := ip.split('.')
last_octet := ip.split('.').last().u8()
@@ -268,7 +280,6 @@ fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets
split_ip[3] = '${candidate}'
used_ip_octets[node_id] << candidate
ip_ := split_ip.join('.')
console.print_header('Private IP Assigned: ${ip_}.')
return ip_
}
return error('failed to assign private IP in subnet: ${self.network_handler.wg_subnet[node_id]}')

View File

@@ -26,4 +26,8 @@ fn (kvs KVStoreFS) get(key string) ![]u8 {
}
fn (kvs KVStoreFS) delete(key string) ! {
mut mycontext := context.context_new()!
mut session := mycontext.session_new(name: 'deployer')!
mut db := session.db_get()!
db.delete(key: key) or { return error('Cannot set the key due to: ${err}') }
}

View File

@@ -43,7 +43,7 @@ fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! {
}
}
console.print_header('Loaded nodes: ${self.nodes}.')
console.print_header('Network nodes: ${self.nodes}.')
self.setup_wireguard_data()!
self.setup_access_node()!
}
@@ -110,15 +110,24 @@ fn (mut self NetworkHandler) setup_access_node() ! {
console.print_header('No public nodes found based on your specs.')
console.print_header('Requesting the Proxy to assign a public node.')
mut myfilter := gridproxy.nodefilter()!
myfilter.ipv4 = true // Only consider nodes with IPv4
myfilter.status = 'up'
myfilter.healthy = true
nodes := filter_nodes(myfilter)!
access_node := nodes[0]
nodes := filter_nodes(
ipv4: true
status: 'up'
healthy: true
available_for: u64(self.deployer.twin_id)
features: [
'zmachine',
]
)!
if nodes.len == 0 {
return error('Requested the Grid Proxy and no nodes found.')
}
access_node := pick_node(mut self.deployer, nodes) or {
return error('Failed to pick valid node: ${err}')
}
self.public_node = u32(access_node.node_id)
console.print_header('Public node ${self.public_node}')
self.nodes << self.public_node
@@ -169,17 +178,10 @@ fn (mut self NetworkHandler) setup_wireguard_data() ! {
}
self.wg_ports[node_id] = self.deployer.assign_wg_port(node_id)!
console.print_header('Assign Wireguard port for node ${node_id}.')
console.print_header('Generate Wireguard keys for node ${node_id}.')
self.wg_keys[node_id] = self.deployer.client.generate_wg_priv_key()!
console.print_header('Wireguard keys for node ${node_id} are ${self.wg_keys[node_id]}.')
console.print_header('Calculate subnet for node ${node_id}.')
self.wg_subnet[node_id] = self.calculate_subnet()!
console.print_header('Node ${node_id} subnet is ${self.wg_subnet[node_id]}.')
console.print_header('Node ${node_id} public config ${public_config}.')
if public_config.ipv4.len != 0 {
self.endpoints[node_id] = public_config.ipv4.split('/')[0]

View File

@@ -13,7 +13,7 @@ __global (
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
name string
}
fn args_get(args_ ArgsGet) ArgsGet {
@@ -30,16 +30,18 @@ fn args_get(args_ ArgsGet) ArgsGet {
pub fn get(args_ ArgsGet) !&TFGridDeployer {
mut args := args_get(args_)
if args.name !in tfgrid3deployer_global {
if !config_exists() {
if default {
config_save()!
if args.name == 'default' {
if !config_exists(args) {
if default {
config_save(args)!
}
}
config_load(args)!
}
config_load()!
}
return tfgrid3deployer_global[args.name] or {
println(tfgrid3deployer_global)
panic('bug in get from factory: ')
panic('could not get config for tfgrid3deployer with name:${args.name}')
}
}
@@ -64,22 +66,16 @@ fn config_save(args_ ArgsGet) ! {
fn set(o TFGridDeployer) ! {
mut o2 := obj_init(o)!
tfgrid3deployer_global['default'] = &o2
tfgrid3deployer_global[o.name] = &o2
tfgrid3deployer_default = o.name
}
@[params]
pub struct PlayArgs {
pub mut:
name string = 'default'
heroscript string // if filled in then plbook will be made out of it
plbook ?playbook.PlayBook
reset bool
start bool
stop bool
restart bool
delete bool
configure bool // make sure there is at least one installed
}
pub fn play(args_ PlayArgs) ! {
@@ -94,8 +90,7 @@ pub fn play(args_ PlayArgs) ! {
if install_actions.len > 0 {
for install_action in install_actions {
mut p := install_action.params
mycfg := cfg_play(p)!
set(mycfg)!
cfg_play(p)!
}
}
}

View File

@@ -39,7 +39,7 @@ pub mut:
network Network
}
fn cfg_play(p paramsparser.Params) !TFGridDeployer {
fn cfg_play(p paramsparser.Params) ! {
network_str := p.get_default('network', 'main')!
network := match network_str {
'dev' { Network.dev }
@@ -53,7 +53,7 @@ fn cfg_play(p paramsparser.Params) !TFGridDeployer {
mnemonic: p.get_default('mnemonic', '')!
network: network
}
return mycfg
set(mycfg)!
}
fn obj_init(obj_ TFGridDeployer) !TFGridDeployer {

View File

@@ -1,9 +1,11 @@
module tfgrid3deployer
import freeflowuniverse.herolib.threefold.gridproxy
import freeflowuniverse.herolib.threefold.grid
import freeflowuniverse.herolib.threefold.grid.models as grid_models
import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models
import rand
import freeflowuniverse.herolib.ui.console
// Resolves the correct grid network based on the `cn.network` value.
//
@@ -53,3 +55,37 @@ pub fn filter_nodes(filter gridproxy_models.NodeFilter) ![]gridproxy_models.Node
fn convert_to_gigabytes(bytes u64) u64 {
return bytes * 1024 * 1024 * 1024
}
fn pick_node(mut deployer grid.Deployer, nodes []gridproxy_models.Node) !gridproxy_models.Node {
mut node := ?gridproxy_models.Node(none)
mut checked := []bool{len: nodes.len}
mut checked_cnt := 0
for checked_cnt < nodes.len {
idx := int(rand.u32() % u32(nodes.len))
if checked[idx] {
continue
}
checked[idx] = true
checked_cnt += 1
if ping_node(mut deployer, u32(nodes[idx].twin_id)) {
node = nodes[idx]
break
}
}
if v := node {
return v
} else {
return error('No node is reachable.')
}
}
fn ping_node(mut deployer grid.Deployer, twin_id u32) bool {
if _ := deployer.client.get_zos_version(twin_id) {
return true
} else {
console.print_stderr('Failed to ping node with twin: ${twin_id}')
return false
}
}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
#!/usr/bin/env -S v -cc gcc -n -w -gc none -no-retry-compilation -d use_openssl -enable-globals run
import os
import flag
@@ -119,7 +119,7 @@ fn dotest(path string, base_dir string, mut cache TestCache) ! {
return
}
cmd := 'v -stats -enable-globals -n -w -gc none -no-retry-compilation -cc tcc test ${norm_path}'
cmd := 'v -stats -enable-globals -n -w -gc none -no-retry-compilation test ${norm_path}'
println(cmd)
result := os.execute(cmd)
eprintln(result)