cleanup client for grid
This commit is contained in:
258
lib/threefold/grid3/deploy_tosort/deployment.v
Normal file
258
lib/threefold/grid3/deploy_tosort/deployment.v
Normal file
@@ -0,0 +1,258 @@
|
||||
module deploy
|
||||
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.threefold.grid
|
||||
import freeflowuniverse.herolib.data.encoder
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import rand
|
||||
import json
|
||||
import encoding.base64
|
||||
import os
|
||||
|
||||
@[heap]
|
||||
pub struct TFDeployment {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
description string
|
||||
vms []VMachine
|
||||
mut:
|
||||
deployer ?grid.Deployer @[skip; str: skip]
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) new_deployer() !grid.Deployer {
|
||||
if self.deployer == none {
|
||||
mut grid_client := get()!
|
||||
network := match grid_client.network {
|
||||
.dev { grid.ChainNetwork.dev }
|
||||
.test { grid.ChainNetwork.test }
|
||||
.main { grid.ChainNetwork.main }
|
||||
}
|
||||
self.deployer = grid.new_deployer(grid_client.mnemonic, network)!
|
||||
}
|
||||
return self.deployer or { return error('Deployer not initialized') }
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) vm_deploy(args_ VMRequirements) !VMachine {
|
||||
console.print_header('Starting deployment process.')
|
||||
|
||||
mut deployer := self.new_deployer() or { return error('Failed to initialize deployer: ${err}') }
|
||||
|
||||
mut node_id := get_node_id(args_) or { return error('Failed to determine node ID: ${err}') }
|
||||
|
||||
mut network := args_.network or {
|
||||
NetworkSpecs{
|
||||
name: 'net' + rand.string(5)
|
||||
ip_range: '10.249.0.0/16'
|
||||
subnet: '10.249.0.0/24'
|
||||
}
|
||||
}
|
||||
|
||||
wg_port := deployer.assign_wg_port(node_id)!
|
||||
workloads := create_workloads(args_, network, wg_port)!
|
||||
|
||||
console.print_header('Creating deployment.')
|
||||
mut deployment := grid_models.new_deployment(
|
||||
twin_id: deployer.twin_id
|
||||
description: 'VGridClient Deployment'
|
||||
workloads: workloads
|
||||
signature_requirement: create_signature_requirement(deployer.twin_id)
|
||||
)
|
||||
|
||||
console.print_header('Setting metadata and deploying workloads.')
|
||||
deployment.add_metadata('vm', args_.name)
|
||||
contract_id := deployer.deploy(node_id, mut deployment, deployment.metadata, 0) or {
|
||||
return error('Deployment failed: ${err}')
|
||||
}
|
||||
|
||||
console.print_header('Deployment successful. Contract ID: ${contract_id}')
|
||||
|
||||
vm := VMachine{
|
||||
tfchain_id: '${deployer.twin_id}${args_.name}'
|
||||
tfchain_contract_id: int(contract_id)
|
||||
requirements: VMRequirements{
|
||||
name: args_.name
|
||||
description: args_.description
|
||||
cpu: args_.cpu
|
||||
memory: args_.memory
|
||||
}
|
||||
}
|
||||
|
||||
self.name = args_.name
|
||||
self.description = args_.description
|
||||
self.vms << vm
|
||||
|
||||
self.save()!
|
||||
self.load(args_.name)!
|
||||
return vm
|
||||
}
|
||||
|
||||
fn get_node_id(args_ VMRequirements) !u32 {
|
||||
if args_.nodes.len == 0 {
|
||||
console.print_header('Requesting the proxy to filter nodes.')
|
||||
nodes := nodefilter(args_)!
|
||||
if nodes.len == 0 {
|
||||
return error('No suitable nodes found.')
|
||||
}
|
||||
return u32(nodes[0].node_id)
|
||||
}
|
||||
return u32(args_.nodes[0])
|
||||
}
|
||||
|
||||
fn create_workloads(args_ VMRequirements, network NetworkSpecs, wg_port u32) ![]grid_models.Workload {
|
||||
mut workloads := []grid_models.Workload{}
|
||||
|
||||
workloads << create_network_workload(network, wg_port)!
|
||||
mut public_ip_name := ''
|
||||
if args_.public_ip4 || args_.public_ip6 {
|
||||
public_ip_name = rand.string(5).to_lower()
|
||||
workloads << create_public_ip_workload(args_.public_ip4, args_.public_ip6, public_ip_name)
|
||||
}
|
||||
|
||||
zmachine := create_zmachine_workload(args_, network, public_ip_name)!
|
||||
workloads << zmachine.to_workload(
|
||||
name: args_.name
|
||||
description: args_.description
|
||||
)
|
||||
|
||||
return workloads
|
||||
}
|
||||
|
||||
fn create_signature_requirement(twin_id int) grid_models.SignatureRequirement {
|
||||
console.print_header('Setting signature requirement.')
|
||||
return grid_models.SignatureRequirement{
|
||||
weight_required: 1
|
||||
requests: [
|
||||
grid_models.SignatureRequest{
|
||||
twin_id: u32(twin_id)
|
||||
weight: 1
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn create_network_workload(network NetworkSpecs, wg_port u32) !grid_models.Workload {
|
||||
console.print_header('Creating network workload.')
|
||||
return grid_models.Znet{
|
||||
ip_range: network.ip_range
|
||||
subnet: network.subnet
|
||||
wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE='
|
||||
wireguard_listen_port: u16(wg_port)
|
||||
mycelium: grid_models.Mycelium{
|
||||
hex_key: rand.string(32).bytes().hex()
|
||||
}
|
||||
peers: [
|
||||
grid_models.Peer{
|
||||
subnet: network.subnet
|
||||
wireguard_public_key: '4KTvZS2KPWYfMr+GbiUUly0ANVg8jBC7xP9Bl79Z8zM='
|
||||
allowed_ips: [network.subnet]
|
||||
},
|
||||
]
|
||||
}.to_workload(
|
||||
name: network.name
|
||||
description: 'VGridClient Network'
|
||||
)
|
||||
}
|
||||
|
||||
fn create_public_ip_workload(is_v4 bool, is_v6 bool, name string) grid_models.Workload {
|
||||
console.print_header('Creating Public IP workload.')
|
||||
return grid_models.PublicIP{
|
||||
v4: is_v4
|
||||
v6: is_v6
|
||||
}.to_workload(name: name)
|
||||
}
|
||||
|
||||
fn create_zmachine_workload(args_ VMRequirements, network NetworkSpecs, public_ip_name string) !grid_models.Zmachine {
|
||||
console.print_header('Creating Zmachine workload.')
|
||||
mut grid_client := get()!
|
||||
|
||||
return grid_models.Zmachine{
|
||||
network: grid_models.ZmachineNetwork{
|
||||
interfaces: [
|
||||
grid_models.ZNetworkInterface{
|
||||
network: network.name
|
||||
ip: network.ip_range.split('/')[0]
|
||||
},
|
||||
]
|
||||
public_ip: public_ip_name
|
||||
planetary: args_.planetary
|
||||
mycelium: grid_models.MyceliumIP{
|
||||
network: network.name
|
||||
hex_seed: rand.string(6).bytes().hex()
|
||||
}
|
||||
}
|
||||
flist: 'https://hub.grid.tf/tf-official-vms/ubuntu-24.04-latest.flist'
|
||||
entrypoint: '/sbin/zinit init'
|
||||
compute_capacity: grid_models.ComputeCapacity{
|
||||
cpu: u8(args_.cpu)
|
||||
memory: i64(args_.memory) * 1024 * 1024 * 1024
|
||||
}
|
||||
env: {
|
||||
'SSH_KEY': grid_client.ssh_key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) vm_get(name string) ![]VMachine {
|
||||
d := self.load(name)!
|
||||
return d.vms
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) load(deployment_name string) !TFDeployment {
|
||||
path := '${os.home_dir()}/hero/var/tfgrid/deploy/'
|
||||
filepath := '${path}${deployment_name}'
|
||||
base64_string := os.read_file(filepath) or {
|
||||
return error('Failed to open file due to: ${err}')
|
||||
}
|
||||
bytes := base64.decode(base64_string)
|
||||
d := self.decode(bytes)!
|
||||
return d
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) save() ! {
|
||||
dir_path := '${os.home_dir()}/hero/var/tfgrid/deploy/'
|
||||
os.mkdir_all(dir_path)!
|
||||
file_path := dir_path + self.name
|
||||
|
||||
encoded_data := self.encode() or { return error('Failed to encode deployment data: ${err}') }
|
||||
base64_string := base64.encode(encoded_data)
|
||||
|
||||
os.write_file(file_path, base64_string) or { return error('Failed to write to file: ${err}') }
|
||||
}
|
||||
|
||||
fn (self TFDeployment) encode() ![]u8 {
|
||||
mut b := encoder.new()
|
||||
b.add_string(self.name)
|
||||
b.add_int(self.vms.len)
|
||||
|
||||
for vm in self.vms {
|
||||
vm_data := vm.encode()!
|
||||
b.add_int(vm_data.len)
|
||||
b.add_bytes(vm_data)
|
||||
}
|
||||
|
||||
return b.data
|
||||
}
|
||||
|
||||
fn (self TFDeployment) decode(data []u8) !TFDeployment {
|
||||
if data.len == 0 {
|
||||
return error('Data cannot be empty')
|
||||
}
|
||||
|
||||
mut d := encoder.decoder_new(data)
|
||||
mut result := TFDeployment{
|
||||
name: d.get_string()
|
||||
}
|
||||
|
||||
num_vms := d.get_int()
|
||||
|
||||
for _ in 0 .. num_vms {
|
||||
d.get_int()
|
||||
vm_data := d.get_bytes()
|
||||
mut dd := encoder.decoder_new(vm_data)
|
||||
vm := decode_vmachine(dd.data)!
|
||||
result.vms << vm
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
8
lib/threefold/grid3/deployer/.heroscript
Normal file
8
lib/threefold/grid3/deployer/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'deployer'
|
||||
classname:'TFGridDeployer'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
41
lib/threefold/grid3/deployer/contracts.v
Normal file
41
lib/threefold/grid3/deployer/contracts.v
Normal file
@@ -0,0 +1,41 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.gridproxy
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model as proxy_models
|
||||
|
||||
@[params]
|
||||
pub struct ContractGetArgs {
|
||||
pub mut:
|
||||
active bool = true
|
||||
twin_id u64
|
||||
}
|
||||
|
||||
// Retrieves all contracts (active and inactive) on the selected grid network.
|
||||
//
|
||||
// This function interacts with the Grid Proxy to retrieve all contracts associated
|
||||
// with the twin ID of the current deployer (from GridClient).
|
||||
//
|
||||
// Returns:
|
||||
// - An array of `gridproxy.Contract` containing contract information.
|
||||
//
|
||||
// Example:
|
||||
// ```
|
||||
// contracts := cn.get_my_contracts()!
|
||||
// ```
|
||||
pub fn (mut self TFDeployment) tfchain_contracts(args ContractGetArgs) ![]proxy_models.Contract {
|
||||
net := resolve_network()!
|
||||
args2 := gridproxy.GridProxyClientArgs{
|
||||
net: net
|
||||
cache: true
|
||||
}
|
||||
|
||||
mut proxy := gridproxy.new(args2)!
|
||||
if args.active {
|
||||
return proxy.get_contracts_active(args.twin_id)
|
||||
} else {
|
||||
params := proxy_models.ContractFilter{
|
||||
twin_id: args.twin_id
|
||||
}
|
||||
return proxy.get_contracts(params)
|
||||
}
|
||||
}
|
||||
528
lib/threefold/grid3/deployer/deployment.v
Normal file
528
lib/threefold/grid3/deployer/deployment.v
Normal file
@@ -0,0 +1,528 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
import freeflowuniverse.herolib.threefold.grid
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import compress.zlib
|
||||
import encoding.hex
|
||||
import x.crypto.chacha20
|
||||
import crypto.sha256
|
||||
import json
|
||||
|
||||
struct GridContracts {
|
||||
pub mut:
|
||||
name []u64
|
||||
node map[string]u64
|
||||
rent map[string]u64
|
||||
}
|
||||
|
||||
@[heap]
|
||||
pub struct TFDeployment {
|
||||
pub mut:
|
||||
name string
|
||||
description string
|
||||
vms []VMachine
|
||||
zdbs []ZDB
|
||||
webnames []WebName
|
||||
network NetworkSpecs
|
||||
mut:
|
||||
// Set the deployed contracts on the deployment and save the full deployment to be able to delete the whole deployment when need.
|
||||
contracts GridContracts
|
||||
deployer &grid.Deployer @[skip; str: skip]
|
||||
kvstore KVStoreFS @[skip; str: skip]
|
||||
}
|
||||
|
||||
fn get_deployer() !grid.Deployer {
|
||||
mut grid_client := get()!
|
||||
|
||||
network := match grid_client.network {
|
||||
.dev { grid.ChainNetwork.dev }
|
||||
.qa { grid.ChainNetwork.qa }
|
||||
.test { grid.ChainNetwork.test }
|
||||
.main { grid.ChainNetwork.main }
|
||||
}
|
||||
|
||||
return grid.new_deployer(grid_client.mnemonic, network)!
|
||||
}
|
||||
|
||||
pub fn new_deployment(name string) !TFDeployment {
|
||||
kvstore := KVStoreFS{}
|
||||
|
||||
if _ := kvstore.get(name) {
|
||||
return error('Deployment with the same name "${name}" already exists.')
|
||||
}
|
||||
|
||||
deployer := get_deployer()!
|
||||
return TFDeployment{
|
||||
name: name
|
||||
kvstore: KVStoreFS{}
|
||||
deployer: &deployer
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_deployment(name string) !TFDeployment {
|
||||
mut deployer := get_deployer()!
|
||||
mut dl := TFDeployment{
|
||||
name: name
|
||||
kvstore: KVStoreFS{}
|
||||
deployer: &deployer
|
||||
}
|
||||
|
||||
dl.load() or { return error('Faild to load the deployment due to: ${err}') }
|
||||
|
||||
return dl
|
||||
}
|
||||
|
||||
pub fn delete_deployment(name string) ! {
|
||||
mut deployer := get_deployer()!
|
||||
mut dl := TFDeployment{
|
||||
name: name
|
||||
kvstore: KVStoreFS{}
|
||||
deployer: &deployer
|
||||
}
|
||||
|
||||
dl.load() or { return error('Faild to load the deployment due to: ${err}') }
|
||||
|
||||
console.print_header('Current deployment contracts: ${dl.contracts}')
|
||||
mut contracts := []u64{}
|
||||
contracts << dl.contracts.name
|
||||
contracts << dl.contracts.node.values()
|
||||
contracts << dl.contracts.rent.values()
|
||||
|
||||
dl.deployer.client.batch_cancel_contracts(contracts)!
|
||||
console.print_header('Deployment contracts are canceled successfully.')
|
||||
|
||||
dl.kvstore.delete(dl.name)!
|
||||
console.print_header('Deployment is deleted successfully.')
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) deploy() ! {
|
||||
console.print_header('Starting deployment process.')
|
||||
self.set_nodes()!
|
||||
old_deployment := self.list_deployments()!
|
||||
|
||||
console.print_header('old contract ids: ${old_deployment.keys()}')
|
||||
|
||||
mut setup := new_deployment_setup(self.network, self.vms, self.zdbs, self.webnames,
|
||||
old_deployment, mut self.deployer)!
|
||||
|
||||
// Check we are in which state
|
||||
self.finalize_deployment(setup)!
|
||||
self.save()!
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) set_nodes() ! {
|
||||
// TODO: each request should run in a separate thread
|
||||
for mut vm in self.vms {
|
||||
if vm.node_id != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
mut node_ids := []u64{}
|
||||
|
||||
for node_id in vm.requirements.nodes {
|
||||
node_ids << u64(node_id)
|
||||
}
|
||||
|
||||
nodes := filter_nodes(
|
||||
node_ids: node_ids
|
||||
healthy: true
|
||||
free_mru: convert_to_gigabytes(u64(vm.requirements.memory))
|
||||
total_cru: u64(vm.requirements.cpu)
|
||||
free_sru: convert_to_gigabytes(u64(vm.requirements.size))
|
||||
available_for: u64(self.deployer.twin_id)
|
||||
free_ips: if vm.requirements.public_ip4 { u64(1) } else { none }
|
||||
has_ipv6: if vm.requirements.public_ip6 { vm.requirements.public_ip6 } else { none }
|
||||
status: 'up'
|
||||
features: if vm.requirements.public_ip4 { ['zmachine'] } else { [] }
|
||||
on_hetzner: vm.requirements.use_hetzner_node
|
||||
)!
|
||||
|
||||
if nodes.len == 0 {
|
||||
if node_ids.len != 0 {
|
||||
return error("The provided vm nodes ${node_ids} don't have enough resources.")
|
||||
}
|
||||
return error('Requested the Grid Proxy and no nodes found.')
|
||||
}
|
||||
|
||||
vm.node_id = u32(pick_node(mut self.deployer, nodes) or {
|
||||
return error('Failed to pick valid node: ${err}')
|
||||
}.node_id)
|
||||
}
|
||||
|
||||
for mut zdb in self.zdbs {
|
||||
if zdb.node_id != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes := filter_nodes(
|
||||
free_sru: convert_to_gigabytes(u64(zdb.requirements.size))
|
||||
status: 'up'
|
||||
healthy: true
|
||||
node_id: zdb.requirements.node_id
|
||||
available_for: u64(self.deployer.twin_id)
|
||||
on_hetzner: zdb.requirements.use_hetzner_node
|
||||
)!
|
||||
|
||||
if nodes.len == 0 {
|
||||
return error('Requested the Grid Proxy and no nodes found.')
|
||||
}
|
||||
|
||||
zdb.node_id = u32(pick_node(mut self.deployer, nodes) or {
|
||||
return error('Failed to pick valid node: ${err}')
|
||||
}.node_id)
|
||||
}
|
||||
|
||||
for mut webname in self.webnames {
|
||||
if webname.node_id != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes := filter_nodes(
|
||||
domain: true
|
||||
status: 'up'
|
||||
healthy: true
|
||||
node_id: webname.requirements.node_id
|
||||
available_for: u64(self.deployer.twin_id)
|
||||
features: ['zmachine']
|
||||
on_hetzner: webname.requirements.use_hetzner_node
|
||||
)!
|
||||
|
||||
if nodes.len == 0 {
|
||||
return error('Requested the Grid Proxy and no nodes found.')
|
||||
}
|
||||
|
||||
webname.node_id = u32(pick_node(mut self.deployer, nodes) or {
|
||||
return error('Failed to pick valid node: ${err}')
|
||||
}.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! {
|
||||
mut new_deployments := map[u32]&grid_models.Deployment{}
|
||||
old_deployments := self.list_deployments()!
|
||||
mut current_contracts := []u64{}
|
||||
mut create_deployments := map[u32]&grid_models.Deployment{}
|
||||
|
||||
for node_id, workloads in setup.workloads {
|
||||
console.print_header('Creating deployment on node ${node_id}.')
|
||||
mut deployment := grid_models.new_deployment(
|
||||
twin_id: setup.deployer.twin_id
|
||||
description: 'VGridClient Deployment'
|
||||
workloads: workloads
|
||||
signature_requirement: grid_models.SignatureRequirement{
|
||||
weight_required: 1
|
||||
requests: [
|
||||
grid_models.SignatureRequest{
|
||||
twin_id: u32(setup.deployer.twin_id)
|
||||
weight: 1
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
if d := old_deployments[node_id] {
|
||||
deployment.version = d.version
|
||||
deployment.contract_id = d.contract_id
|
||||
current_contracts << d.contract_id
|
||||
} else {
|
||||
create_deployments[node_id] = &deployment
|
||||
}
|
||||
|
||||
deployment.add_metadata('VGridClient/Deployment', self.name)
|
||||
new_deployments[node_id] = &deployment
|
||||
}
|
||||
|
||||
mut create_name_contracts := []string{}
|
||||
mut delete_contracts := []u64{}
|
||||
|
||||
mut returned_deployments := map[u32]&grid_models.Deployment{}
|
||||
mut name_contracts_map := setup.name_contract_map.clone()
|
||||
|
||||
// Create stage.
|
||||
for contract_name, contract_id in setup.name_contract_map {
|
||||
if contract_id == 0 {
|
||||
create_name_contracts << contract_name
|
||||
}
|
||||
}
|
||||
|
||||
if create_name_contracts.len > 0 || create_deployments.len > 0 {
|
||||
created_name_contracts_map, ret_dls := self.deployer.batch_deploy(create_name_contracts, mut
|
||||
create_deployments, none)!
|
||||
|
||||
for node_id, deployment in ret_dls {
|
||||
returned_deployments[node_id] = deployment
|
||||
}
|
||||
|
||||
for contract_name, contract_id in created_name_contracts_map {
|
||||
name_contracts_map[contract_name] = contract_id
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel stage.
|
||||
for contract_id in self.contracts.name {
|
||||
if !setup.name_contract_map.values().contains(contract_id) {
|
||||
delete_contracts << contract_id
|
||||
}
|
||||
}
|
||||
|
||||
for node_id, dl in old_deployments {
|
||||
if _ := new_deployments[node_id] {
|
||||
continue
|
||||
}
|
||||
delete_contracts << dl.contract_id
|
||||
}
|
||||
|
||||
if delete_contracts.len > 0 {
|
||||
self.deployer.client.batch_cancel_contracts(delete_contracts)!
|
||||
}
|
||||
|
||||
// Update stage.
|
||||
for node_id, mut dl in new_deployments {
|
||||
mut deployment := *dl
|
||||
if _ := old_deployments[node_id] {
|
||||
self.deployer.update_deployment(node_id, mut deployment, dl.metadata)!
|
||||
returned_deployments[node_id] = deployment
|
||||
}
|
||||
}
|
||||
|
||||
self.update_state(setup, name_contracts_map, returned_deployments)!
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) update_state(setup DeploymentSetup, name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! {
|
||||
mut workloads := map[u32]map[string]&grid_models.Workload{}
|
||||
|
||||
for node_id, deployment in dls {
|
||||
workloads[node_id] = map[string]&grid_models.Workload{}
|
||||
for id, _ in deployment.workloads {
|
||||
workloads[node_id][deployment.workloads[id].name] = &deployment.workloads[id]
|
||||
}
|
||||
}
|
||||
|
||||
self.contracts = GridContracts{}
|
||||
for _, contract_id in name_contracts_map {
|
||||
self.contracts.name << contract_id
|
||||
}
|
||||
|
||||
for node_id, dl in dls {
|
||||
self.contracts.node['${node_id}'] = dl.contract_id
|
||||
}
|
||||
|
||||
for mut vm in self.vms {
|
||||
vm_workload := workloads[vm.node_id][vm.requirements.name]
|
||||
res := json.decode(grid_models.ZmachineResult, vm_workload.result.data)!
|
||||
vm.mycelium_ip = res.mycelium_ip
|
||||
vm.planetary_ip = res.planetary_ip
|
||||
vm.wireguard_ip = res.ip
|
||||
vm.contract_id = dls[vm.node_id].contract_id
|
||||
|
||||
if vm.requirements.public_ip4 || vm.requirements.public_ip6 {
|
||||
ip_workload := workloads[vm.node_id]['${vm.requirements.name}_pubip']
|
||||
ip_res := json.decode(grid_models.PublicIPResult, ip_workload.result.data)!
|
||||
vm.public_ip4 = ip_res.ip
|
||||
vm.public_ip6 = ip_res.ip6
|
||||
}
|
||||
}
|
||||
|
||||
for mut zdb in self.zdbs {
|
||||
zdb_workload := workloads[zdb.node_id][zdb.requirements.name]
|
||||
res := json.decode(grid_models.ZdbResult, zdb_workload.result.data)!
|
||||
zdb.ips = res.ips
|
||||
zdb.namespace = res.namespace
|
||||
zdb.port = res.port
|
||||
zdb.contract_id = dls[zdb.node_id].contract_id
|
||||
}
|
||||
|
||||
for mut wn in self.webnames {
|
||||
wn_workload := workloads[wn.node_id][wn.requirements.name]
|
||||
res := json.decode(grid_models.GatewayProxyResult, wn_workload.result.data)!
|
||||
wn.fqdn = res.fqdn
|
||||
wn.node_contract_id = dls[wn.node_id].contract_id
|
||||
wn.name_contract_id = name_contracts_map[wn.requirements.name]
|
||||
}
|
||||
|
||||
self.network.ip_range = setup.network_handler.ip_range
|
||||
self.network.mycelium = setup.network_handler.mycelium
|
||||
self.network.user_access_configs = setup.network_handler.user_access_configs.clone()
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) vm_get(vm_name string) !VMachine {
|
||||
console.print_header('Getting ${vm_name} VM.')
|
||||
for vmachine in self.vms {
|
||||
if vmachine.requirements.name == vm_name {
|
||||
return vmachine
|
||||
}
|
||||
}
|
||||
return error('Machine does not exist.')
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) zdb_get(zdb_name string) !ZDB {
|
||||
console.print_header('Getting ${zdb_name} Zdb.')
|
||||
for zdb in self.zdbs {
|
||||
if zdb.requirements.name == zdb_name {
|
||||
return zdb
|
||||
}
|
||||
}
|
||||
return error('Zdb does not exist.')
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) webname_get(wn_name string) !WebName {
|
||||
console.print_header('Getting ${wn_name} webname.')
|
||||
for wbn in self.webnames {
|
||||
if wbn.requirements.name == wn_name {
|
||||
return wbn
|
||||
}
|
||||
}
|
||||
return error('Webname does not exist.')
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) load() ! {
|
||||
value := self.kvstore.get(self.name)!
|
||||
decrypted := self.decrypt(value)!
|
||||
decompressed := self.decompress(decrypted)!
|
||||
self.decode(decompressed)!
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) save() ! {
|
||||
encoded_data := self.encode()!
|
||||
self.kvstore.set(self.name, encoded_data)!
|
||||
}
|
||||
|
||||
fn (self TFDeployment) compress(data []u8) ![]u8 {
|
||||
return zlib.compress(data) or { error('Cannot compress the data due to: ${err}') }
|
||||
}
|
||||
|
||||
fn (self TFDeployment) decompress(data []u8) ![]u8 {
|
||||
return zlib.decompress(data) or { error('Cannot decompress the data due to: ${err}') }
|
||||
}
|
||||
|
||||
fn (self TFDeployment) encrypt(compressed []u8) ![]u8 {
|
||||
key_hashed := sha256.hexhash(self.deployer.mnemonics)
|
||||
name_hashed := sha256.hexhash(self.name)
|
||||
key := hex.decode(key_hashed)!
|
||||
nonce := hex.decode(name_hashed)![..12]
|
||||
encrypted := chacha20.encrypt(key, nonce, compressed) or {
|
||||
return error('Cannot encrypt the data due to: ${err}')
|
||||
}
|
||||
return encrypted
|
||||
}
|
||||
|
||||
fn (self TFDeployment) decrypt(data []u8) ![]u8 {
|
||||
key_hashed := sha256.hexhash(self.deployer.mnemonics)
|
||||
name_hashed := sha256.hexhash(self.name)
|
||||
key := hex.decode(key_hashed)!
|
||||
nonce := hex.decode(name_hashed)![..12]
|
||||
|
||||
compressed := chacha20.decrypt(key, nonce, data) or {
|
||||
return error('Cannot decrypt the data due to: ${err}')
|
||||
}
|
||||
return compressed
|
||||
}
|
||||
|
||||
fn (self TFDeployment) encode() ![]u8 {
|
||||
// TODO: Change to 'encoder'
|
||||
|
||||
data := json.encode(self).bytes()
|
||||
|
||||
compressed := self.compress(data)!
|
||||
encrypted := self.encrypt(compressed)!
|
||||
return encrypted
|
||||
}
|
||||
|
||||
fn (mut self TFDeployment) decode(data []u8) ! {
|
||||
obj := json.decode(TFDeployment, data.bytestr())!
|
||||
self.vms = obj.vms
|
||||
self.zdbs = obj.zdbs
|
||||
self.webnames = obj.webnames
|
||||
self.contracts = obj.contracts
|
||||
self.network = obj.network
|
||||
self.name = obj.name
|
||||
self.description = obj.description
|
||||
}
|
||||
|
||||
// Set a new machine on the deployment.
|
||||
pub fn (mut self TFDeployment) add_machine(requirements VMRequirements) {
|
||||
self.vms << VMachine{
|
||||
requirements: requirements
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) remove_machine(name string) ! {
|
||||
l := self.vms.len
|
||||
for id, vm in self.vms {
|
||||
if vm.requirements.name == name {
|
||||
self.vms[id], self.vms[l - 1] = self.vms[l - 1], self.vms[id]
|
||||
self.vms.delete_last()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return error('vm with name ${name} is not found')
|
||||
}
|
||||
|
||||
// Set a new zdb on the deployment.
|
||||
pub fn (mut self TFDeployment) add_zdb(zdb ZDBRequirements) {
|
||||
self.zdbs << ZDB{
|
||||
requirements: zdb
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) remove_zdb(name string) ! {
|
||||
l := self.zdbs.len
|
||||
for id, zdb in self.zdbs {
|
||||
if zdb.requirements.name == name {
|
||||
self.zdbs[id], self.zdbs[l - 1] = self.zdbs[l - 1], self.zdbs[id]
|
||||
self.zdbs.delete_last()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return error('zdb with name ${name} is not found')
|
||||
}
|
||||
|
||||
// Set a new webname on the deployment.
|
||||
pub fn (mut self TFDeployment) add_webname(requirements WebNameRequirements) {
|
||||
self.webnames << WebName{
|
||||
requirements: requirements
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) remove_webname(name string) ! {
|
||||
l := self.webnames.len
|
||||
for id, wn in self.webnames {
|
||||
if wn.requirements.name == name {
|
||||
self.webnames[id], self.webnames[l - 1] = self.webnames[l - 1], self.webnames[id]
|
||||
self.webnames.delete_last()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return error('webname with name ${name} is not found')
|
||||
}
|
||||
|
||||
// lists deployments used with vms, zdbs, and webnames
|
||||
pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deployment {
|
||||
mut threads := []thread !grid_models.Deployment{}
|
||||
mut dls := map[u32]grid_models.Deployment{}
|
||||
mut contract_node := map[u64]u32{}
|
||||
for node_id, contract_id in self.contracts.node {
|
||||
contract_node[contract_id] = node_id.u32()
|
||||
threads << spawn self.deployer.get_deployment(contract_id, node_id.u32())
|
||||
}
|
||||
|
||||
for th in threads {
|
||||
dl := th.wait()!
|
||||
node_id := contract_node[dl.contract_id]
|
||||
dls[node_id] = dl
|
||||
}
|
||||
|
||||
return dls
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) configure_network(req NetworkRequirements) ! {
|
||||
self.network.requirements = req
|
||||
}
|
||||
|
||||
pub fn (mut self TFDeployment) get_user_access_configs() []UserAccessConfig {
|
||||
return self.network.user_access_configs
|
||||
}
|
||||
306
lib/threefold/grid3/deployer/deployment_setup.v
Normal file
306
lib/threefold/grid3/deployer/deployment_setup.v
Normal file
@@ -0,0 +1,306 @@
|
||||
// This file should only contains any functions, helpers that related to the deployment setup.
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
import freeflowuniverse.herolib.threefold.grid
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import rand
|
||||
|
||||
// a struct that prepare the setup for the deployment
|
||||
struct DeploymentSetup {
|
||||
mut:
|
||||
workloads map[u32][]grid_models.Workload
|
||||
network_handler NetworkHandler
|
||||
|
||||
deployer &grid.Deployer @[skip; str: skip]
|
||||
contracts_map map[u32]u64
|
||||
name_contract_map map[string]u64
|
||||
}
|
||||
|
||||
// Sets up a new deployment with network, VM, and ZDB workloads.
|
||||
// Parameters:
|
||||
// - network_specs: NetworkSpecs struct containing network setup specifications
|
||||
// - vms: Array of VMachine instances representing the virtual machines to set up workloads for
|
||||
// - zdbs: Array of ZDB objects containing ZDB requirements
|
||||
// - webnames: Array of WebName instances representing web names
|
||||
// - deployer: Reference to the grid.Deployer for deployment operations
|
||||
// Modifies:
|
||||
// - dls: Modified DeploymentSetup struct with network, VM, and ZDB workloads set up
|
||||
// Returns:
|
||||
// - None
|
||||
fn new_deployment_setup(network_specs NetworkSpecs, vms []VMachine, zdbs []ZDB, webnames []WebName, old_deployments map[u32]grid_models.Deployment, mut deployer grid.Deployer) !DeploymentSetup {
|
||||
mut dls := DeploymentSetup{
|
||||
deployer: deployer
|
||||
network_handler: NetworkHandler{
|
||||
req: network_specs.requirements
|
||||
deployer: deployer
|
||||
mycelium: network_specs.mycelium
|
||||
ip_range: network_specs.ip_range
|
||||
user_access_configs: network_specs.user_access_configs.clone()
|
||||
}
|
||||
}
|
||||
|
||||
dls.setup_network_workloads(vms, webnames, old_deployments)!
|
||||
dls.setup_vm_workloads(vms)!
|
||||
dls.setup_zdb_workloads(zdbs)!
|
||||
dls.setup_webname_workloads(webnames)!
|
||||
dls.match_versions(old_deployments)
|
||||
return dls
|
||||
}
|
||||
|
||||
fn (mut self DeploymentSetup) match_versions(old_dls map[u32]grid_models.Deployment) {
|
||||
for node_id, dl in old_dls {
|
||||
mut wl_versions := map[string]u32{}
|
||||
for wl in dl.workloads {
|
||||
wl_versions['${wl.name}:${wl.type_}'] = wl.version
|
||||
}
|
||||
|
||||
for mut wl in self.workloads[node_id] {
|
||||
wl.version = wl_versions['${wl.name}:${wl.type_}']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up network workloads for the deployment setup.
|
||||
// Parameters:
|
||||
// - vms: Array of VMachine instances representing the virtual machines to set up workloads for
|
||||
// Modifies:
|
||||
// - st: Modified DeploymentSetup struct with network workloads set up
|
||||
// Returns:
|
||||
// - None
|
||||
fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, webnames []WebName, old_deployments map[u32]grid_models.Deployment) ! {
|
||||
st.network_handler.load_network_state(old_deployments)!
|
||||
st.network_handler.create_network(vms, webnames)!
|
||||
data := st.network_handler.generate_workloads()!
|
||||
|
||||
for node_id, workload in data {
|
||||
st.workloads[node_id] << workload
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up VM workloads for the deployment setup.
|
||||
//
|
||||
// This method iterates over a list of VMachines, processes each machine's requirements,
|
||||
// sets up public IP if required, creates a Zmachine workload, and updates the used IP octets map.
|
||||
//
|
||||
// Parameters:
|
||||
// - machines: Array of VMachine instances representing the virtual machines to set up workloads for
|
||||
// Modifies:
|
||||
// - self: Modified DeploymentSetup struct with VM workloads set up
|
||||
// - used_ip_octets: Map of u32 to arrays of u8 representing used IP octets
|
||||
// Returns:
|
||||
// - None
|
||||
fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
|
||||
if machines.len == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('Preparing Zmachine workloads.')
|
||||
mut used_ip_octets := map[u32][]u8{}
|
||||
for machine in machines {
|
||||
mut req := machine.requirements
|
||||
mut public_ip_name := ''
|
||||
|
||||
if req.public_ip4 || req.public_ip6 {
|
||||
public_ip_name = '${req.name}_pubip'
|
||||
self.set_public_ip_workload(machine.node_id, public_ip_name, req)!
|
||||
}
|
||||
|
||||
self.set_zmachine_workload(machine, public_ip_name, mut used_ip_octets)!
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up Zero-DB (ZDB) workloads for deployment.
|
||||
//
|
||||
// This function takes a list of ZDB results, processes each result into a ZDB workload model,
|
||||
// assigns it to a healthy node, and then adds it to the deployment workloads.
|
||||
//
|
||||
// `zdbs`: A list of ZDB objects containing the ZDB requirements.
|
||||
//
|
||||
// Each ZDB is processed to convert the requirements into a grid workload and associated with a healthy node.
|
||||
fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! {
|
||||
if zdbs.len == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('Preparing ZDB workloads.')
|
||||
for zdb in zdbs {
|
||||
// Retrieve ZDB requirements from the result
|
||||
mut req := zdb.requirements
|
||||
|
||||
// Create the Zdb model with the size converted to bytes
|
||||
zdb_model := grid_models.Zdb{
|
||||
size: convert_to_gigabytes(u64(req.size)) // Convert size from MB to bytes
|
||||
mode: req.mode
|
||||
public: req.public
|
||||
password: req.password
|
||||
}
|
||||
|
||||
// Generate a workload based on the Zdb model
|
||||
zdb_workload := zdb_model.to_workload(
|
||||
name: req.name
|
||||
description: req.description
|
||||
)
|
||||
|
||||
// Append the workload to the node's workload list in the deployment setup
|
||||
self.workloads[zdb.node_id] << zdb_workload
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up web name workloads for the deployment setup.
|
||||
//
|
||||
// This method processes each WebName instance in the provided array, sets up gateway name proxies based on the requirements,
|
||||
// and adds the gateway name proxy workload to the deployment workloads. It also updates the name contract map accordingly.
|
||||
//
|
||||
// Parameters:
|
||||
// - webnames: Array of WebName instances representing web names to set up workloads for
|
||||
// Modifies:
|
||||
// - self: Modified DeploymentSetup struct with web name workloads set up
|
||||
// Returns:
|
||||
// - None
|
||||
fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! {
|
||||
if webnames.len == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('Preparing WebName workloads.')
|
||||
for wn in webnames {
|
||||
req := wn.requirements
|
||||
|
||||
gw_name := if req.name == '' {
|
||||
rand.string(5).to_lower()
|
||||
} else {
|
||||
req.name
|
||||
}
|
||||
|
||||
gw := grid_models.GatewayNameProxy{
|
||||
tls_passthrough: req.tls_passthrough
|
||||
backends: [req.backend]
|
||||
name: gw_name
|
||||
network: if wn.requirements.use_wireguard_network {
|
||||
self.network_handler.req.name
|
||||
} else {
|
||||
none
|
||||
}
|
||||
}
|
||||
|
||||
self.workloads[wn.node_id] << gw.to_workload(
|
||||
name: gw_name
|
||||
)
|
||||
self.name_contract_map[gw_name] = wn.name_contract_id
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up a Zmachine workload for the deployment setup.
|
||||
//
|
||||
// This method prepares a Zmachine workload based on the provided VMachine, assigns private and public IPs,
|
||||
// sets up Mycelium IP if required, and configures compute capacity and environment variables.
|
||||
//
|
||||
// Parameters:
|
||||
// - vmachine: VMachine instance representing the virtual machine for which the workload is being set up
|
||||
// - public_ip_name: Name of the public IP to assign to the Zmachine
|
||||
// - used_ip_octets: Map of u32 to arrays of u8 representing used IP octets
|
||||
// Throws:
|
||||
// - Error if grid client is not available or if there are issues setting up the workload
|
||||
fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip_name string, mut used_ip_octets map[u32][]u8) ! {
|
||||
mut grid_client := get()!
|
||||
mut env_map := vmachine.requirements.env.clone()
|
||||
env_map['SSH_KEY'] = grid_client.ssh_key
|
||||
|
||||
zmachine_workload := grid_models.Zmachine{
|
||||
network: grid_models.ZmachineNetwork{
|
||||
interfaces: [
|
||||
grid_models.ZNetworkInterface{
|
||||
network: self.network_handler.req.name
|
||||
ip: if vmachine.wireguard_ip.len > 0 {
|
||||
used_ip_octets[vmachine.node_id] << vmachine.wireguard_ip.all_after_last('.').u8()
|
||||
vmachine.wireguard_ip
|
||||
} else {
|
||||
self.assign_private_ip(vmachine.node_id, mut used_ip_octets)!
|
||||
}
|
||||
},
|
||||
]
|
||||
public_ip: public_ip_name
|
||||
planetary: vmachine.requirements.planetary
|
||||
mycelium: if mycelium := vmachine.requirements.mycelium {
|
||||
grid_models.MyceliumIP{
|
||||
network: self.network_handler.req.name
|
||||
hex_seed: mycelium.hex_seed
|
||||
}
|
||||
} else {
|
||||
none
|
||||
}
|
||||
}
|
||||
size: convert_to_gigabytes(u64(vmachine.requirements.size))
|
||||
flist: vmachine.requirements.flist
|
||||
entrypoint: vmachine.requirements.entrypoint
|
||||
compute_capacity: grid_models.ComputeCapacity{
|
||||
cpu: u8(vmachine.requirements.cpu)
|
||||
memory: i64(convert_to_gigabytes(u64(vmachine.requirements.memory)))
|
||||
}
|
||||
env: env_map
|
||||
}.to_workload(
|
||||
name: vmachine.requirements.name
|
||||
description: vmachine.requirements.description
|
||||
)
|
||||
|
||||
self.workloads[vmachine.node_id] << zmachine_workload
|
||||
}
|
||||
|
||||
// Sets up a public IP workload for a specific node.
|
||||
//
|
||||
// This method creates a PublicIP workload based on the provided VMRequirements,
|
||||
// assigns IPv4 and IPv6 addresses, and adds the workload to the DeploymentSetup workloads for the specified node.
|
||||
//
|
||||
// Parameters:
|
||||
// - node_id: u32 representing the node ID where the public IP workload will be set up
|
||||
// - public_ip_name: Name of the public IP to assign to the workload
|
||||
fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name string, vm VMRequirements) ! {
|
||||
// Add the public IP workload
|
||||
console.print_header('Preparing Public IP workload for node ${node_id}.')
|
||||
public_ip_workload := grid_models.PublicIP{
|
||||
v4: vm.public_ip4
|
||||
v6: vm.public_ip6
|
||||
}.to_workload(name: public_ip_name)
|
||||
|
||||
self.workloads[node_id] << public_ip_workload
|
||||
}
|
||||
|
||||
// Assigns a private IP to a specified node based on the provided node ID and used IP octets map.
|
||||
//
|
||||
// Parameters:
|
||||
// - node_id: u32 representing the node ID to assign the private IP to
|
||||
// - used_ip_octets: Map of u32 to arrays of u8 representing the used IP octets for each node
|
||||
// Returns:
|
||||
// - string: The assigned private IP address
|
||||
// Throws:
|
||||
// - Error if failed to assign a private IP in the subnet
|
||||
fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets map[u32][]u8) !string {
|
||||
ip := self.network_handler.wg_subnet[node_id].split('/')[0]
|
||||
mut split_ip := ip.split('.')
|
||||
last_octet := ip.split('.').last().u8()
|
||||
for candidate := last_octet + 2; candidate < 255; candidate += 1 {
|
||||
if candidate in used_ip_octets[node_id] {
|
||||
continue
|
||||
}
|
||||
split_ip[3] = '${candidate}'
|
||||
used_ip_octets[node_id] << candidate
|
||||
ip_ := split_ip.join('.')
|
||||
return ip_
|
||||
}
|
||||
return error('failed to assign private IP in subnet: ${self.network_handler.wg_subnet[node_id]}')
|
||||
}
|
||||
|
||||
/*
|
||||
TODO's:
|
||||
# TODO:
|
||||
- add action methods e.g. delete, ping...
|
||||
- cache node and user twin ids
|
||||
- chainge the encoding/decoding behavior
|
||||
|
||||
# Done:
|
||||
- return result after deployment
|
||||
- use batch calls for substrate
|
||||
- send deployments to nodes concurrently
|
||||
- add roll back behavior
|
||||
*/
|
||||
36
lib/threefold/grid3/deployer/filter.v
Normal file
36
lib/threefold/grid3/deployer/filter.v
Normal file
@@ -0,0 +1,36 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.gridproxy
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models
|
||||
|
||||
|
||||
//TODO: put all code in relation to filtering in file filter.v
|
||||
@[params]
|
||||
pub struct FilterNodesArgs {
|
||||
gridproxy_models.NodeFilter
|
||||
pub:
|
||||
on_hetzner bool
|
||||
}
|
||||
|
||||
pub fn filter_nodes(args FilterNodesArgs) ![]gridproxy_models.Node {
|
||||
// Resolve the network configuration
|
||||
net := resolve_network()!
|
||||
|
||||
// Create grid proxy client and retrieve the matching nodes
|
||||
mut gp_client := gridproxy.new(net: net, cache: true)!
|
||||
|
||||
mut filter := args.NodeFilter
|
||||
if args.on_hetzner {
|
||||
filter.features << ['zmachine-light']
|
||||
}
|
||||
|
||||
nodes := gp_client.get_nodes(filter)!
|
||||
return nodes
|
||||
}
|
||||
|
||||
// fn get_hetzner_node_ids(nodes []gridproxy_models.Node) ![]u64 {
|
||||
// // get farm ids that are know to be hetzner's
|
||||
// // if we need to iterate over all nodes, maybe we should use multi-threading
|
||||
// panic('Not Implemented')
|
||||
// return []
|
||||
// }
|
||||
33
lib/threefold/grid3/deployer/kvstore.v
Normal file
33
lib/threefold/grid3/deployer/kvstore.v
Normal file
@@ -0,0 +1,33 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.core.base as context
|
||||
|
||||
// Will be changed when we support the logic of the TFChain one
|
||||
pub struct KVStoreFS {}
|
||||
|
||||
fn (kvs KVStoreFS) set(key string, data []u8) ! {
|
||||
// set in context
|
||||
mut mycontext := context.context_new()!
|
||||
mut session := mycontext.session_new(name: 'deployer')!
|
||||
mut db := session.db_get()!
|
||||
db.set(key: key, valueb: data) or { return error('Cannot set the key due to: ${err}') }
|
||||
}
|
||||
|
||||
fn (kvs KVStoreFS) get(key string) ![]u8 {
|
||||
mut mycontext := context.context_new()!
|
||||
mut session := mycontext.session_new(name: 'deployer')!
|
||||
mut db := session.db_get()!
|
||||
value := db.get(key: key) or { return error('Cannot get value of key ${key} due to: ${err}') }
|
||||
if value.len == 0 {
|
||||
return error('The value is empty.')
|
||||
}
|
||||
|
||||
return value.bytes()
|
||||
}
|
||||
|
||||
fn (kvs KVStoreFS) delete(key string) ! {
|
||||
mut mycontext := context.context_new()!
|
||||
mut session := mycontext.session_new(name: 'deployer')!
|
||||
mut db := session.db_get()!
|
||||
db.delete(key: key) or { return error('Cannot set the key due to: ${err}') }
|
||||
}
|
||||
394
lib/threefold/grid3/deployer/network.v
Normal file
394
lib/threefold/grid3/deployer/network.v
Normal file
@@ -0,0 +1,394 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
import freeflowuniverse.herolib.threefold.grid
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import json
|
||||
import rand
|
||||
|
||||
// NetworkInfo struct to represent network details
|
||||
@[params]
|
||||
pub struct NetworkRequirements {
|
||||
pub mut:
|
||||
name string = 'net' + rand.string(5)
|
||||
user_access_endpoints int
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct NetworkSpecs {
|
||||
pub mut:
|
||||
requirements NetworkRequirements
|
||||
ip_range string = '10.10.0.0/16'
|
||||
mycelium string = rand.hex(64)
|
||||
user_access_configs []UserAccessConfig
|
||||
}
|
||||
|
||||
pub struct UserAccessConfig {
|
||||
pub:
|
||||
ip string
|
||||
secret_key string
|
||||
public_key string
|
||||
|
||||
peer_public_key string
|
||||
network_ip_range string
|
||||
public_node_endpoint string
|
||||
}
|
||||
|
||||
pub fn (c UserAccessConfig) print_wg_config() string {
|
||||
return '[Interface]
|
||||
Address = ${c.ip}
|
||||
PrivateKey = ${c.secret_key}
|
||||
[Peer]
|
||||
PublicKey = ${c.peer_public_key}
|
||||
AllowedIPs = ${c.network_ip_range}, 100.64.0.0/16
|
||||
PersistentKeepalive = 25
|
||||
Endpoint = ${c.public_node_endpoint}'
|
||||
}
|
||||
|
||||
struct NetworkHandler {
|
||||
mut:
|
||||
req NetworkRequirements
|
||||
// network_name string
|
||||
nodes []u32
|
||||
ip_range string
|
||||
wg_ports map[u32]u16
|
||||
wg_keys map[u32][]string
|
||||
wg_subnet map[u32]string
|
||||
endpoints map[u32]string
|
||||
public_node u32
|
||||
hidden_nodes []u32
|
||||
none_accessible_ip_ranges []string
|
||||
mycelium string
|
||||
|
||||
// user_access_endopoints int
|
||||
user_access_configs []UserAccessConfig
|
||||
|
||||
deployer &grid.Deployer @[skip; str: skip]
|
||||
}
|
||||
|
||||
// TODO: maybe rename to fill_network or something similar
|
||||
fn (mut self NetworkHandler) create_network(vmachines []VMachine, webnames []WebName) ! {
|
||||
// Set nodes
|
||||
self.nodes = []
|
||||
|
||||
for vmachine in vmachines {
|
||||
if !self.nodes.contains(vmachine.node_id) {
|
||||
self.nodes << vmachine.node_id
|
||||
}
|
||||
}
|
||||
|
||||
for webname in webnames {
|
||||
if webname.requirements.use_wireguard_network && !self.nodes.contains(webname.node_id) {
|
||||
self.nodes << webname.node_id
|
||||
}
|
||||
}
|
||||
|
||||
console.print_header('Network nodes: ${self.nodes}.')
|
||||
self.setup_wireguard_data()!
|
||||
self.setup_access_node()!
|
||||
self.setup_user_access()!
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.Peer, mycleium_hex_key string) !grid_models.Workload {
|
||||
mut network_workload := grid_models.Znet{
|
||||
ip_range: self.ip_range
|
||||
subnet: self.wg_subnet[node_id]
|
||||
wireguard_private_key: self.wg_keys[node_id][0]
|
||||
wireguard_listen_port: self.wg_ports[node_id]
|
||||
peers: peers
|
||||
mycelium: grid_models.Mycelium{
|
||||
hex_key: mycleium_hex_key
|
||||
peers: []
|
||||
}
|
||||
}
|
||||
|
||||
return network_workload.to_workload(
|
||||
name: self.req.name
|
||||
description: 'VGridClient network workload'
|
||||
)
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_models.Peer {
|
||||
mut peers := []grid_models.Peer{}
|
||||
if self.public_node != 0 {
|
||||
ip_range_oct := self.ip_range.all_before('/').split('.')
|
||||
peers << grid_models.Peer{
|
||||
subnet: self.wg_subnet[self.public_node]
|
||||
wireguard_public_key: self.wg_keys[self.public_node][1]
|
||||
allowed_ips: [self.ip_range, '100.64.${ip_range_oct[1]}.${ip_range_oct[2]}/24']
|
||||
endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}'
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) setup_access_node() ! {
|
||||
if self.req.user_access_endpoints == 0 && (self.hidden_nodes.len < 1 || self.nodes.len == 1) {
|
||||
self.public_node = 0
|
||||
return
|
||||
}
|
||||
|
||||
if self.public_node != 0 {
|
||||
if !self.nodes.contains(self.public_node) {
|
||||
self.nodes << self.public_node
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
- In this case a public node should be assigned.
|
||||
- We need to store it somewhere to inform the user that the deployment has one more contract on another node,
|
||||
also delete that contract when delete the full deployment.
|
||||
- Assign the public node with the new node id.
|
||||
*/
|
||||
console.print_header('No public nodes found based on your specs.')
|
||||
console.print_header('Requesting the Proxy to assign a public node.')
|
||||
|
||||
nodes := filter_nodes(
|
||||
ipv4: true
|
||||
status: 'up'
|
||||
healthy: true
|
||||
available_for: u64(self.deployer.twin_id)
|
||||
features: [
|
||||
'zmachine',
|
||||
]
|
||||
)!
|
||||
if nodes.len == 0 {
|
||||
return error('Requested the Grid Proxy and no nodes found.')
|
||||
}
|
||||
|
||||
access_node := pick_node(mut self.deployer, nodes) or {
|
||||
return error('Failed to pick valid node: ${err}')
|
||||
}
|
||||
self.public_node = u32(access_node.node_id)
|
||||
|
||||
console.print_header('Public node ${self.public_node}')
|
||||
|
||||
self.nodes << self.public_node
|
||||
|
||||
wg_port := self.deployer.assign_wg_port(self.public_node)!
|
||||
keys := self.deployer.client.generate_wg_priv_key()! // The first index will be the private.
|
||||
mut parts := self.ip_range.split('/')[0].split('.')
|
||||
parts[2] = '${self.nodes.len + 2}'
|
||||
subnet := parts.join('.') + '/24'
|
||||
|
||||
self.wg_ports[self.public_node] = wg_port
|
||||
self.wg_keys[self.public_node] = keys
|
||||
self.wg_subnet[self.public_node] = subnet
|
||||
self.endpoints[self.public_node] = access_node.public_config.ipv4.split('/')[0]
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) setup_user_access() ! {
|
||||
to_create_user_access := self.req.user_access_endpoints - self.user_access_configs.len
|
||||
if to_create_user_access < 0 {
|
||||
// TODO: support removing user access
|
||||
return error('removing user access is not supported')
|
||||
}
|
||||
|
||||
for i := 0; i < to_create_user_access; i++ {
|
||||
wg_keys := self.deployer.client.generate_wg_priv_key()!
|
||||
self.user_access_configs << UserAccessConfig{
|
||||
ip: self.calculate_subnet()!
|
||||
secret_key: wg_keys[0]
|
||||
public_key: wg_keys[1]
|
||||
peer_public_key: self.wg_keys[self.public_node][1]
|
||||
public_node_endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}'
|
||||
network_ip_range: self.ip_range
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) setup_wireguard_data() ! {
|
||||
console.print_header('Setting up network workload.')
|
||||
self.hidden_nodes, self.none_accessible_ip_ranges = [], []
|
||||
|
||||
for node_id in self.nodes {
|
||||
// TODO: Check if there values don't re-generate
|
||||
mut public_config := self.deployer.get_node_pub_config(node_id) or {
|
||||
if err.msg().contains('no public configuration') {
|
||||
grid_models.PublicConfig{}
|
||||
} else {
|
||||
return error('Failed to get node public config: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
if _ := self.wg_ports[node_id] {
|
||||
// The node already exists
|
||||
if public_config.ipv4.len != 0 {
|
||||
self.endpoints[node_id] = public_config.ipv4.split('/')[0]
|
||||
if self.public_node == 0 {
|
||||
self.public_node = node_id
|
||||
}
|
||||
} else if public_config.ipv6.len != 0 {
|
||||
self.endpoints[node_id] = public_config.ipv6.split('/')[0]
|
||||
} else {
|
||||
self.hidden_nodes << node_id
|
||||
self.none_accessible_ip_ranges << self.wg_subnet[node_id]
|
||||
self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id])
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
self.wg_ports[node_id] = self.deployer.assign_wg_port(node_id)!
|
||||
|
||||
self.wg_keys[node_id] = self.deployer.client.generate_wg_priv_key()!
|
||||
|
||||
self.wg_subnet[node_id] = self.calculate_subnet()!
|
||||
|
||||
if public_config.ipv4.len != 0 {
|
||||
self.endpoints[node_id] = public_config.ipv4.split('/')[0]
|
||||
self.public_node = node_id
|
||||
} else if public_config.ipv6.len != 0 {
|
||||
self.endpoints[node_id] = public_config.ipv6.split('/')[0]
|
||||
} else {
|
||||
self.hidden_nodes << node_id
|
||||
self.none_accessible_ip_ranges << self.wg_subnet[node_id]
|
||||
self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) prepare_public_node_peers(node_id u32) ![]grid_models.Peer {
|
||||
mut peers := []grid_models.Peer{}
|
||||
for peer_id in self.nodes {
|
||||
if peer_id in self.hidden_nodes || peer_id == node_id {
|
||||
continue
|
||||
}
|
||||
|
||||
subnet := self.wg_subnet[peer_id]
|
||||
mut allowed_ips := [subnet, wireguard_routing_ip(subnet)]
|
||||
|
||||
if peer_id == self.public_node {
|
||||
allowed_ips << self.none_accessible_ip_ranges
|
||||
}
|
||||
|
||||
peers << grid_models.Peer{
|
||||
subnet: subnet
|
||||
wireguard_public_key: self.wg_keys[peer_id][1]
|
||||
allowed_ips: allowed_ips
|
||||
endpoint: '${self.endpoints[peer_id]}:${self.wg_ports[peer_id]}'
|
||||
}
|
||||
}
|
||||
|
||||
if node_id == self.public_node {
|
||||
for hidden_node_id in self.hidden_nodes {
|
||||
subnet := self.wg_subnet[hidden_node_id]
|
||||
routing_ip := wireguard_routing_ip(subnet)
|
||||
|
||||
peers << grid_models.Peer{
|
||||
subnet: subnet
|
||||
wireguard_public_key: self.wg_keys[hidden_node_id][1]
|
||||
allowed_ips: [subnet, routing_ip]
|
||||
endpoint: ''
|
||||
}
|
||||
}
|
||||
|
||||
for user_access in self.user_access_configs {
|
||||
routing_ip := wireguard_routing_ip(user_access.ip)
|
||||
|
||||
peers << grid_models.Peer{
|
||||
subnet: user_access.ip
|
||||
wireguard_public_key: user_access.public_key
|
||||
allowed_ips: [user_access.ip, routing_ip]
|
||||
endpoint: ''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) calculate_subnet() !string {
|
||||
mut parts := self.ip_range.split('/')[0].split('.')
|
||||
user_access_subnets := self.user_access_configs.map(it.ip)
|
||||
node_subnets := self.wg_subnet.values()
|
||||
mut used_subnets := []string{}
|
||||
used_subnets << node_subnets.clone()
|
||||
used_subnets << user_access_subnets.clone()
|
||||
|
||||
for i := 2; i <= 255; i += 1 {
|
||||
parts[2] = '${i}'
|
||||
candidate := parts.join('.') + '/24'
|
||||
if !used_subnets.contains(candidate) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
|
||||
return error('failed to calcuate subnet')
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) load_network_state(dls map[u32]grid_models.Deployment) ! {
|
||||
// load network from deployments
|
||||
|
||||
mut network_name := ''
|
||||
mut subnet_node := map[string]u32{}
|
||||
mut subnet_to_endpoint := map[string]string{}
|
||||
for node_id, dl in dls {
|
||||
mut znet := grid_models.Znet{}
|
||||
for wl in dl.workloads {
|
||||
network_name = wl.name
|
||||
if wl.type_ == grid_models.workload_types.network {
|
||||
znet = json.decode(grid_models.Znet, wl.data)!
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if znet.subnet == '' {
|
||||
// deployment didn't have a network workload. skip..
|
||||
continue
|
||||
}
|
||||
|
||||
self.req.name = network_name
|
||||
self.nodes << node_id
|
||||
self.ip_range = znet.ip_range
|
||||
self.wg_ports[node_id] = znet.wireguard_listen_port
|
||||
self.wg_keys[node_id] = [znet.wireguard_private_key,
|
||||
self.deployer.client.generate_wg_public_key(znet.wireguard_private_key)!]
|
||||
self.wg_subnet[node_id] = znet.subnet
|
||||
self.mycelium = if myclelium := znet.mycelium { myclelium.hex_key } else { '' }
|
||||
subnet_node[znet.subnet] = node_id
|
||||
for peer in znet.peers {
|
||||
subnet_to_endpoint[peer.subnet] = peer.endpoint
|
||||
|
||||
if peer.endpoint == '' {
|
||||
// current node is the access node
|
||||
self.public_node = node_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for subnet, endpoint in subnet_to_endpoint {
|
||||
node_id := subnet_node[subnet] or {
|
||||
// this maybe a user access, not a node
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint == '' {
|
||||
self.hidden_nodes << node_id
|
||||
continue
|
||||
}
|
||||
self.endpoints[node_id] = endpoint.all_before_last(':').trim('[]')
|
||||
}
|
||||
|
||||
for node_id in self.hidden_nodes {
|
||||
self.none_accessible_ip_ranges << self.wg_subnet[node_id]
|
||||
self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id])
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut self NetworkHandler) generate_workloads() !map[u32]grid_models.Workload {
|
||||
mut workloads := map[u32]grid_models.Workload{}
|
||||
for node_id in self.nodes {
|
||||
if node_id in self.hidden_nodes {
|
||||
mut peers := self.prepare_hidden_node_peers(node_id)!
|
||||
workloads[node_id] = self.generate_workload(node_id, peers, self.mycelium)!
|
||||
continue
|
||||
}
|
||||
|
||||
mut peers := self.prepare_public_node_peers(node_id)!
|
||||
workloads[node_id] = self.generate_workload(node_id, peers, self.mycelium)!
|
||||
}
|
||||
|
||||
return workloads
|
||||
}
|
||||
27
lib/threefold/grid3/deployer/readme.md
Normal file
27
lib/threefold/grid3/deployer/readme.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# deployer
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.clients. deployer
|
||||
|
||||
mut client:= deployer.get()!
|
||||
|
||||
client...
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!deployer.configure
|
||||
secret: '...'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
```
|
||||
118
lib/threefold/grid3/deployer/tfgrid3deployer_factory_.v
Normal file
118
lib/threefold/grid3/deployer/tfgrid3deployer_factory_.v
Normal file
@@ -0,0 +1,118 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.installers.threefold.griddriver
|
||||
|
||||
|
||||
__global (
|
||||
tfgrid3deployer_global map[string]&TFGridDeployer
|
||||
tfgrid3deployer_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&TFGridDeployer {
|
||||
|
||||
mut installer:=griddriver.get()!
|
||||
installer.install()!
|
||||
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := TFGridDeployer{}
|
||||
if args.name !in tfgrid3deployer_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('deployer', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return tfgrid3deployer_global[args.name] or {
|
||||
println(tfgrid3deployer_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for deployer with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o TFGridDeployer) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('deployer', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('deployer', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('deployer', args.name)!
|
||||
if args.name in tfgrid3deployer_global {
|
||||
// del tfgrid3deployer_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o TFGridDeployer) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
tfgrid3deployer_global[o.name] = &o2
|
||||
tfgrid3deployer_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'deployer.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for deployer
|
||||
pub fn switch(name string) {
|
||||
tfgrid3deployer_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
77
lib/threefold/grid3/deployer/tfgrid3deployer_model.v
Normal file
77
lib/threefold/grid3/deployer/tfgrid3deployer_model.v
Normal file
@@ -0,0 +1,77 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
|
||||
pub const version = '1.0.0'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
|
||||
|
||||
pub enum Network {
|
||||
dev
|
||||
main
|
||||
test
|
||||
qa
|
||||
}
|
||||
|
||||
@[heap]
|
||||
pub struct TFGridDeployer {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
ssh_key string
|
||||
mnemonic string
|
||||
network Network
|
||||
}
|
||||
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ TFGridDeployer) !TFGridDeployer {
|
||||
mut mycfg := mycfg_
|
||||
ssh_key := os.getenv_opt('SSH_KEY') or { '' }
|
||||
if ssh_key.len>0{
|
||||
mycfg.ssh_key = ssh_key
|
||||
}
|
||||
mnemonic := os.getenv_opt('TFGRID_MNEMONIC') or { '' }
|
||||
if mnemonic.len>0{
|
||||
mycfg.mnemonic = mnemonic
|
||||
}
|
||||
network := os.getenv_opt('TFGRID_NETWORK') or { 'main' } //
|
||||
if network.len>0{
|
||||
match network {
|
||||
"main"{
|
||||
mycfg.network = .main
|
||||
} "dev" {
|
||||
mycfg.network = .dev
|
||||
} "test" {
|
||||
mycfg.network = .test
|
||||
} "qa" {
|
||||
mycfg.network = .qa
|
||||
}else{
|
||||
return error("can't find network with type; ${network}")
|
||||
}
|
||||
}
|
||||
}
|
||||
if mycfg.ssh_key.len == 0 {
|
||||
return error('ssh_key cannot be empty')
|
||||
}
|
||||
if mycfg.mnemonic.len == 0 {
|
||||
return error('mnemonic cannot be empty')
|
||||
}
|
||||
// println(mycfg)
|
||||
return mycfg
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj TFGridDeployer) !string {
|
||||
return encoderhero.encode[TFGridDeployer](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !TFGridDeployer {
|
||||
mut obj := encoderhero.decode[TFGridDeployer](heroscript)!
|
||||
return obj
|
||||
}
|
||||
79
lib/threefold/grid3/deployer/utils.v
Normal file
79
lib/threefold/grid3/deployer/utils.v
Normal file
@@ -0,0 +1,79 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.gridproxy
|
||||
import freeflowuniverse.herolib.threefold.grid
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models
|
||||
import rand
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
// Resolves the correct grid network based on the `cn.network` value.
|
||||
//
|
||||
// This utility function converts the custom network type of GridContracts
|
||||
// to the appropriate value in `gridproxy.TFGridNet`.
|
||||
//
|
||||
// Returns:
|
||||
// - A `gridproxy.TFGridNet` value corresponding to the grid network.
|
||||
fn resolve_network() !gridproxy.TFGridNet {
|
||||
mut cfg := get()!
|
||||
return match cfg.network {
|
||||
.dev { gridproxy.TFGridNet.dev }
|
||||
.test { gridproxy.TFGridNet.test }
|
||||
.main { gridproxy.TFGridNet.main }
|
||||
.qa { gridproxy.TFGridNet.qa }
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be the node's subnet and the wireguard routing ip that should start with 100.64 then the 2nd and 3rd part of the node's subnet
|
||||
*/
|
||||
fn wireguard_routing_ip(ip string) string {
|
||||
parts := ip.split('.')
|
||||
return '100.64.${parts[1]}.${parts[2]}/32'
|
||||
}
|
||||
|
||||
// Creates a new mycelium address with a randomly generated hex key
|
||||
pub fn (mut deployer TFGridDeployer) mycelium_address_create() grid_models.Mycelium {
|
||||
return grid_models.Mycelium{
|
||||
hex_key: rand.string(32).bytes().hex()
|
||||
peers: []
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_to_gigabytes(bytes u64) u64 {
|
||||
return bytes * 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
fn pick_node(mut deployer grid.Deployer, nodes []gridproxy_models.Node) !gridproxy_models.Node {
|
||||
mut node := ?gridproxy_models.Node(none)
|
||||
mut checked := []bool{len: nodes.len}
|
||||
mut checked_cnt := 0
|
||||
for checked_cnt < nodes.len {
|
||||
idx := int(rand.u32() % u32(nodes.len))
|
||||
if checked[idx] {
|
||||
continue
|
||||
}
|
||||
|
||||
checked[idx] = true
|
||||
checked_cnt += 1
|
||||
if ping_node(mut deployer, u32(nodes[idx].twin_id)) {
|
||||
node = nodes[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if v := node {
|
||||
return v
|
||||
} else {
|
||||
return error('No node is reachable.')
|
||||
}
|
||||
}
|
||||
|
||||
fn ping_node(mut deployer grid.Deployer, twin_id u32) bool {
|
||||
if _ := deployer.client.get_zos_version(twin_id) {
|
||||
return true
|
||||
} else {
|
||||
console.print_stderr('Failed to ping node with twin: ${twin_id}')
|
||||
return false
|
||||
}
|
||||
}
|
||||
169
lib/threefold/grid3/deployer/vmachine.v
Normal file
169
lib/threefold/grid3/deployer/vmachine.v
Normal file
@@ -0,0 +1,169 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import json
|
||||
import os
|
||||
import rand
|
||||
|
||||
@[params]
|
||||
pub struct Mycelium {
|
||||
hex_seed string = rand.hex(12)
|
||||
}
|
||||
|
||||
// MachineNetworkReq struct to represent network access configuration
|
||||
@[params]
|
||||
pub struct VMRequirements {
|
||||
pub mut:
|
||||
name string
|
||||
description string
|
||||
cpu int // vcores
|
||||
size u64
|
||||
memory int // gbyte
|
||||
public_ip4 bool
|
||||
public_ip6 bool
|
||||
planetary bool
|
||||
mycelium ?Mycelium
|
||||
flist string = 'https://hub.grid.tf/tf-official-vms/ubuntu-24.04-latest.flist'
|
||||
entrypoint string = '/sbin/zinit init'
|
||||
env map[string]string
|
||||
// if set will chose a node from the list to deploy on
|
||||
nodes []u32
|
||||
// will deploy on one of hetzner nodes
|
||||
use_hetzner_node bool
|
||||
}
|
||||
|
||||
// MachineModel struct to represent a machine and its associat ed details
|
||||
pub struct VMachine {
|
||||
pub mut:
|
||||
tfchain_id string
|
||||
contract_id u64
|
||||
requirements VMRequirements
|
||||
node_id u32
|
||||
planetary_ip string
|
||||
mycelium_ip string
|
||||
public_ip4 string
|
||||
wireguard_ip string
|
||||
public_ip6 string
|
||||
}
|
||||
|
||||
// Helper function to encode a VMachine
|
||||
fn (self VMachine) encode() ![]u8 {
|
||||
// mut b := encoder.new()
|
||||
// b.add_string(self.name)
|
||||
// b.add_string(self.tfchain_id)
|
||||
// b.add_int(self.contract_id)
|
||||
// b.add_int(self.cpu)
|
||||
// b.add_int(self.memory)
|
||||
// b.add_string(self.description)
|
||||
// for now we just use json, will do bytes when needed
|
||||
return json.encode(self).bytes()
|
||||
}
|
||||
|
||||
// Helper function to decode a VMachine
|
||||
fn decode_vmachine(data []u8) !VMachine {
|
||||
// mut d encoder.Decode
|
||||
// return VMachine{
|
||||
// name: d.get_string()
|
||||
// tfchain_id: d.get_string()
|
||||
// contract_id: d.get_int()
|
||||
// cpu: d.get_int()
|
||||
// memory: d.get_int()
|
||||
// description: d.get_string()
|
||||
// }
|
||||
data_string := data.bytestr()
|
||||
return json.decode(VMachine, data_string)
|
||||
}
|
||||
|
||||
// Call zos to get the zos version running on the node
|
||||
fn (self VMachine) check_node_up() !bool {
|
||||
console.print_header('Pinging node: ${self.node_id}')
|
||||
mut deployer := get_deployer()!
|
||||
node_twin_id := deployer.client.get_node_twin(self.node_id) or {
|
||||
return error('faild to get the node twin ID due to: ${err}')
|
||||
}
|
||||
deployer.client.get_zos_version(node_twin_id) or { return false }
|
||||
console.print_header('Node ${self.node_id} is reachable.')
|
||||
return true
|
||||
}
|
||||
|
||||
fn ping(ip string) bool {
|
||||
res := os.execute('ping -c 1 -W 2 ${ip}')
|
||||
return res.exit_code == 0
|
||||
}
|
||||
|
||||
// Ping the VM supported interfaces
|
||||
fn (self VMachine) check_vm_up() bool {
|
||||
if self.public_ip4 != '' {
|
||||
console.print_header('Pinging public IPv4: ${self.public_ip4}')
|
||||
pingable := ping(self.public_ip4)
|
||||
if !pingable {
|
||||
console.print_stderr("The public IPv4 isn't pingable.")
|
||||
}
|
||||
return pingable
|
||||
}
|
||||
|
||||
if self.public_ip6 != '' {
|
||||
console.print_header('Pinging public IPv6: ${self.public_ip6}')
|
||||
pingable := ping(self.public_ip6)
|
||||
if !pingable {
|
||||
console.print_stderr("The public IPv6 isn't pingable.")
|
||||
}
|
||||
return pingable
|
||||
}
|
||||
|
||||
if self.planetary_ip != '' {
|
||||
console.print_header('Pinging planetary IP: ${self.planetary_ip}')
|
||||
pingable := ping(self.planetary_ip)
|
||||
if !pingable {
|
||||
console.print_stderr("The planetary IP isn't pingable.")
|
||||
}
|
||||
return pingable
|
||||
}
|
||||
|
||||
if self.mycelium_ip != '' {
|
||||
console.print_header('Pinging mycelium IP: ${self.mycelium_ip}')
|
||||
pingable := ping(self.mycelium_ip)
|
||||
if !pingable {
|
||||
console.print_stderr("The mycelium IP isn't pingable.")
|
||||
}
|
||||
return pingable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
pub fn (self VMachine) healthcheck() !bool {
|
||||
console.print_header('Doing a healthcheck on machine ${self.requirements.name}')
|
||||
|
||||
is_vm_up := self.check_node_up()!
|
||||
if !is_vm_up {
|
||||
console.print_stderr("The VM isn't reachable, pinging node ${self.node_id}")
|
||||
is_node_up := self.check_node_up()!
|
||||
if !is_node_up {
|
||||
console.print_stderr("The VM node isn't reachable.")
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
console.print_header('The VM is up and reachable.')
|
||||
return true
|
||||
}
|
||||
|
||||
// NetworkInfo struct to represent network details
|
||||
pub struct RecoverArgs {
|
||||
pub mut:
|
||||
reinstall bool // reinstall if needed and run heroscript
|
||||
}
|
||||
|
||||
fn (self VMachine) recover(args RecoverArgs) ! {
|
||||
}
|
||||
|
||||
// NetworkInfo struct to represent network details
|
||||
pub struct DeployArgs {
|
||||
pub mut:
|
||||
reset bool // careful will delete existing machine if true
|
||||
}
|
||||
|
||||
fn (self VMachine) deploy(args DeployArgs) ! {
|
||||
// check the machine is there, if yes and reset used then delete the machine before deploying a new one
|
||||
}
|
||||
29
lib/threefold/grid3/deployer/webnames.v
Normal file
29
lib/threefold/grid3/deployer/webnames.v
Normal file
@@ -0,0 +1,29 @@
|
||||
module deployer
|
||||
|
||||
import json
|
||||
|
||||
@[params]
|
||||
pub struct WebNameRequirements {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
node_id ?u32
|
||||
use_wireguard_network bool
|
||||
use_hetzner_node bool
|
||||
// must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port]
|
||||
backend string @[required]
|
||||
tls_passthrough bool
|
||||
}
|
||||
|
||||
pub struct WebName {
|
||||
pub mut:
|
||||
fqdn string
|
||||
name_contract_id u64
|
||||
node_contract_id u64
|
||||
requirements WebNameRequirements
|
||||
node_id u32
|
||||
}
|
||||
|
||||
// Helper function to encode a WebName
|
||||
fn (self WebName) encode() ![]u8 {
|
||||
return json.encode(self).bytes()
|
||||
}
|
||||
33
lib/threefold/grid3/deployer/zdbs.v
Normal file
33
lib/threefold/grid3/deployer/zdbs.v
Normal file
@@ -0,0 +1,33 @@
|
||||
module deployer
|
||||
|
||||
import freeflowuniverse.herolib.threefold.grid.models as grid_models
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
import json
|
||||
|
||||
@[params]
|
||||
pub struct ZDBRequirements {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
password string @[required]
|
||||
size int @[required]
|
||||
node_id ?u32
|
||||
description string
|
||||
mode grid_models.ZdbMode = 'user'
|
||||
public bool
|
||||
use_hetzner_node bool
|
||||
}
|
||||
|
||||
pub struct ZDB {
|
||||
pub mut:
|
||||
ips []string
|
||||
port u32
|
||||
namespace string
|
||||
contract_id u64
|
||||
requirements ZDBRequirements
|
||||
node_id u32
|
||||
}
|
||||
|
||||
// Helper function to encode a ZDB
|
||||
fn (self ZDB) encode() ![]u8 {
|
||||
return json.encode(self).bytes()
|
||||
}
|
||||
7
lib/threefold/grid3/deployer2_sort/README.md
Normal file
7
lib/threefold/grid3/deployer2_sort/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# TFGrid Deployments
|
||||
|
||||
|
||||
Create workloads in native low level format, and then use a gridriver go binary to post it to TFChain as well as send it to ZOS.
|
||||
|
||||
|
||||
//TODO: not sure how to use this one
|
||||
319
lib/threefold/grid3/deployer2_sort/deployer.v
Normal file
319
lib/threefold/grid3/deployer2_sort/deployer.v
Normal file
@@ -0,0 +1,319 @@
|
||||
module deployer2
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import log
|
||||
import freeflowuniverse.herolib.threefold.grid.models
|
||||
import freeflowuniverse.herolib.threefold.griddriver
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
@[heap]
|
||||
pub struct Deployer {
|
||||
pub:
|
||||
mnemonics string
|
||||
substrate_url string
|
||||
twin_id u32
|
||||
relay_url string
|
||||
chain_network ChainNetwork
|
||||
env string
|
||||
pub mut:
|
||||
client griddriver.Client
|
||||
logger log.Log
|
||||
}
|
||||
|
||||
pub enum ChainNetwork {
|
||||
dev
|
||||
qa
|
||||
test
|
||||
main
|
||||
}
|
||||
|
||||
const substrate_url = {
|
||||
ChainNetwork.dev: 'wss://tfchain.dev.grid.tf/ws'
|
||||
ChainNetwork.qa: 'wss://tfchain.qa.grid.tf/ws'
|
||||
ChainNetwork.test: 'wss://tfchain.test.grid.tf/ws'
|
||||
ChainNetwork.main: 'wss://tfchain.grid.tf/ws'
|
||||
}
|
||||
|
||||
const envs = {
|
||||
ChainNetwork.dev: 'dev'
|
||||
ChainNetwork.qa: 'qa'
|
||||
ChainNetwork.test: 'test'
|
||||
ChainNetwork.main: 'main'
|
||||
}
|
||||
|
||||
const relay_url = {
|
||||
ChainNetwork.dev: 'wss://relay.dev.grid.tf'
|
||||
ChainNetwork.qa: 'wss://relay.qa.grid.tf'
|
||||
ChainNetwork.test: 'wss://relay.test.grid.tf'
|
||||
ChainNetwork.main: 'wss://relay.grid.tf'
|
||||
}
|
||||
|
||||
pub fn get_mnemonics() !string {
|
||||
mnemonics := os.getenv('TFGRID_MNEMONIC')
|
||||
if mnemonics == '' {
|
||||
return error('failed to get mnemonics, run `export TFGRID_MNEMONIC=....`')
|
||||
}
|
||||
return mnemonics
|
||||
}
|
||||
|
||||
pub fn new_deployer(mnemonics string, chain_network ChainNetwork) !Deployer {
|
||||
mut logger := &log.Log{}
|
||||
logger.set_level(.debug)
|
||||
|
||||
mut client := griddriver.Client{
|
||||
mnemonic: mnemonics
|
||||
substrate: substrate_url[chain_network]
|
||||
relay: relay_url[chain_network]
|
||||
}
|
||||
twin_id := client.get_user_twin() or { return error('failed to get twin ${err}') }
|
||||
|
||||
return Deployer{
|
||||
mnemonics: mnemonics
|
||||
substrate_url: substrate_url[chain_network]
|
||||
twin_id: twin_id
|
||||
chain_network: chain_network
|
||||
relay_url: relay_url[chain_network]
|
||||
env: envs[chain_network]
|
||||
logger: logger
|
||||
client: client
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut d Deployer) handle_deploy(node_id u32, mut dl models.Deployment, hash_hex string) ! {
|
||||
signature := d.client.sign_deployment(hash_hex)!
|
||||
dl.add_signature(d.twin_id, signature)
|
||||
payload := dl.json_encode()
|
||||
|
||||
node_twin_id := d.client.get_node_twin(node_id)!
|
||||
d.rmb_deployment_deploy(node_twin_id, payload)!
|
||||
|
||||
mut versions := map[string]u32{}
|
||||
for wl in dl.workloads {
|
||||
versions[wl.name] = 0
|
||||
}
|
||||
d.wait_deployment(node_id, mut dl, versions)!
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) update_deployment(node_id u32, mut dl models.Deployment, body string) ! {
|
||||
// get deployment
|
||||
// assign new workload versions
|
||||
// update contract
|
||||
// update deployment
|
||||
old_dl := d.get_deployment(dl.contract_id, node_id)!
|
||||
if !is_deployment_up_to_date(old_dl, dl) {
|
||||
console.print_header('deployment with contract id ${dl.contract_id} is already up-to-date')
|
||||
return
|
||||
}
|
||||
|
||||
new_versions := d.update_versions(old_dl, mut dl)
|
||||
|
||||
hash_hex := dl.challenge_hash().hex()
|
||||
signature := d.client.sign_deployment(hash_hex)!
|
||||
dl.add_signature(d.twin_id, signature)
|
||||
payload := dl.json_encode()
|
||||
|
||||
d.client.update_node_contract(dl.contract_id, body, hash_hex)!
|
||||
|
||||
node_twin_id := d.client.get_node_twin(node_id)!
|
||||
d.rmb_deployment_update(node_twin_id, payload)!
|
||||
d.wait_deployment(node_id, mut dl, new_versions)!
|
||||
}
|
||||
|
||||
// update_versions increments the deployment version
|
||||
// and updates the updated workloads versions
|
||||
fn (mut d Deployer) update_versions(old_dl models.Deployment, mut new_dl models.Deployment) map[string]u32 {
|
||||
mut old_hashes := map[string]string{}
|
||||
mut old_versions := map[string]u32{}
|
||||
mut new_versions := map[string]u32{}
|
||||
|
||||
for wl in old_dl.workloads {
|
||||
hash := wl.challenge_hash().hex()
|
||||
old_hashes[wl.name] = hash
|
||||
old_versions[wl.name] = wl.version
|
||||
}
|
||||
|
||||
new_dl.version = old_dl.version + 1
|
||||
|
||||
for mut wl in new_dl.workloads {
|
||||
hash := wl.challenge_hash().hex()
|
||||
|
||||
if old_hashes[wl.name] != hash {
|
||||
wl.version = new_dl.version
|
||||
} else {
|
||||
wl.version = old_versions[wl.name]
|
||||
}
|
||||
|
||||
new_versions[wl.name] = wl.version
|
||||
}
|
||||
|
||||
return new_versions
|
||||
}
|
||||
|
||||
// same_workloads checks if both deployments have the same workloads, even if updated
|
||||
// this has to be done since a workload name is not included in a deployment's hash
|
||||
// so a user could just replace a workloads's name, and still get the same deployment's hash
|
||||
// but with a totally different workload, since a workload is identified by it's name
|
||||
fn same_workloads(dl1 models.Deployment, dl2 models.Deployment) bool {
|
||||
if dl1.workloads.len != dl2.workloads.len {
|
||||
return false
|
||||
}
|
||||
|
||||
mut names := map[string]bool{}
|
||||
for wl in dl1.workloads {
|
||||
names[wl.name] = true
|
||||
}
|
||||
|
||||
for wl in dl2.workloads {
|
||||
if !names[wl.name] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// is_deployment_up_to_date checks if new_dl is different from old_dl
|
||||
fn is_deployment_up_to_date(old_dl models.Deployment, new_dl models.Deployment) bool {
|
||||
old_hash := old_dl.challenge_hash().hex()
|
||||
new_hash := new_dl.challenge_hash().hex()
|
||||
if old_hash != new_hash {
|
||||
return true
|
||||
}
|
||||
|
||||
return !same_workloads(old_dl, new_dl)
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) deploy(node_id u32, mut dl models.Deployment, body string, solution_provider u64) !u64 {
|
||||
public_ips := dl.count_public_ips()
|
||||
hash_hex := dl.challenge_hash().hex()
|
||||
contract_id := d.client.create_node_contract(node_id, body, hash_hex, public_ips,
|
||||
solution_provider)!
|
||||
d.logger.info('ContractID: ${contract_id}')
|
||||
dl.contract_id = contract_id
|
||||
|
||||
d.handle_deploy(node_id, mut dl, hash_hex) or {
|
||||
d.logger.info('Rolling back...')
|
||||
d.logger.info('deleting contract id: ${contract_id}')
|
||||
d.client.cancel_contract(contract_id)!
|
||||
return err
|
||||
}
|
||||
return dl.contract_id
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) wait_deployment(node_id u32, mut dl models.Deployment, workload_versions map[string]u32) ! {
|
||||
mut start := time.now()
|
||||
num_workloads := dl.workloads.len
|
||||
contract_id := dl.contract_id
|
||||
mut last_state_ok := 0
|
||||
for {
|
||||
mut cur_state_ok := 0
|
||||
mut new_workloads := []models.Workload{}
|
||||
changes := d.deployment_changes(node_id, contract_id)!
|
||||
for wl in changes {
|
||||
if version := workload_versions[wl.name] {
|
||||
if wl.version == version && wl.result.state == models.result_states.ok {
|
||||
cur_state_ok += 1
|
||||
new_workloads << wl
|
||||
} else if wl.version == version && wl.result.state == models.result_states.error {
|
||||
return error('failed to deploy deployment due error: ${wl.result.message}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cur_state_ok > last_state_ok {
|
||||
last_state_ok = cur_state_ok
|
||||
start = time.now()
|
||||
}
|
||||
|
||||
if cur_state_ok == num_workloads {
|
||||
dl.workloads = new_workloads
|
||||
return
|
||||
}
|
||||
|
||||
if (time.now() - start).minutes() > 5 {
|
||||
return error('failed to deploy deployment: contractID: ${contract_id}, some workloads are not ready after wating 5 minutes')
|
||||
} else {
|
||||
d.logger.info('Waiting for deployment with contract ${contract_id} to become ready')
|
||||
time.sleep(500 * time.millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) get_deployment(contract_id u64, node_id u32) !models.Deployment {
|
||||
twin_id := d.client.get_node_twin(node_id)!
|
||||
payload := {
|
||||
'contract_id': contract_id
|
||||
}
|
||||
res := d.rmb_deployment_get(twin_id, json.encode(payload)) or {
|
||||
return error('failed to get deployment with contract id ${contract_id} due to: ${err}')
|
||||
}
|
||||
return json.decode(models.Deployment, res)
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) delete_deployment(contract_id u64, node_id u32) !models.Deployment {
|
||||
twin_id := d.client.get_node_twin(node_id)!
|
||||
payload := {
|
||||
'contract_id': contract_id
|
||||
}
|
||||
res := d.rmb_deployment_delete(twin_id, json.encode(payload))!
|
||||
return json.decode(models.Deployment, res)
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) deployment_changes(node_id u32, contract_id u64) ![]models.Workload {
|
||||
twin_id := d.client.get_node_twin(node_id)!
|
||||
|
||||
res := d.rmb_deployment_changes(twin_id, contract_id)!
|
||||
return json.decode([]models.Workload, res)
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) batch_deploy(name_contracts []string, mut dls map[u32]&models.Deployment, solution_provider ?u64) !(map[string]u64, map[u32]&models.Deployment) {
|
||||
mut batch_create_contract_data := []griddriver.BatchCreateContractData{}
|
||||
for name_contract in name_contracts {
|
||||
batch_create_contract_data << griddriver.BatchCreateContractData{
|
||||
name: name_contract
|
||||
}
|
||||
}
|
||||
|
||||
mut hash_map := map[u32]string{}
|
||||
for node, dl in dls {
|
||||
public_ips := dl.count_public_ips()
|
||||
hash_hex := dl.challenge_hash().hex()
|
||||
hash_map[node] = hash_hex
|
||||
batch_create_contract_data << griddriver.BatchCreateContractData{
|
||||
node: node
|
||||
body: dl.metadata
|
||||
hash: hash_hex
|
||||
public_ips: public_ips
|
||||
solution_provider_id: solution_provider
|
||||
}
|
||||
}
|
||||
|
||||
contract_ids := d.client.batch_create_contracts(batch_create_contract_data)!
|
||||
mut name_contracts_map := map[string]u64{}
|
||||
mut threads := []thread !{}
|
||||
for idx, data in batch_create_contract_data {
|
||||
contract_id := contract_ids[idx]
|
||||
if data.name != '' {
|
||||
name_contracts_map[data.name] = contract_id
|
||||
continue
|
||||
}
|
||||
|
||||
mut dl := dls[data.node] or { return error('Node ${data.node} not found in dls map') }
|
||||
dl.contract_id = contract_id
|
||||
threads << spawn d.handle_deploy(data.node, mut dl, hash_map[data.node])
|
||||
}
|
||||
|
||||
for th in threads {
|
||||
th.wait() or {
|
||||
console.print_stderr('Rolling back: cancling the depolyed contracts: ${contract_ids} due to ${err}')
|
||||
d.client.batch_cancel_contracts(contract_ids) or {
|
||||
return error('Faild to cancel contracts dut to: ${err}')
|
||||
}
|
||||
return error('Deployment failed: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
return name_contracts_map, dls
|
||||
}
|
||||
35
lib/threefold/grid3/deployer2_sort/deployment_state.v
Normal file
35
lib/threefold/grid3/deployer2_sort/deployment_state.v
Normal file
@@ -0,0 +1,35 @@
|
||||
module deployer2
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
struct DeploymentStateDB {
|
||||
redis redisclient.Redis
|
||||
secret string // to encrypt symmetric
|
||||
}
|
||||
|
||||
struct DeploymentState {
|
||||
name string
|
||||
vms []VMDeployed
|
||||
zdbs []ZDBDeployed
|
||||
}
|
||||
|
||||
pub fn (db DeploymentStateDB) set(deployment_name string, key string, val string) ! {
|
||||
// store e.g. \n separated list of all keys per deployment_name
|
||||
// encrypt
|
||||
}
|
||||
|
||||
// pub fn (db DeploymentStateDB) get(deployment_name string, key string)!string {
|
||||
|
||||
// }
|
||||
|
||||
// pub fn (db DeploymentStateDB) delete(deployment_name string, key string)! {
|
||||
|
||||
// }
|
||||
|
||||
// pub fn (db DeploymentStateDB) keys(deployment_name string)![]string {
|
||||
|
||||
// }
|
||||
|
||||
// pub fn (db DeploymentStateDB) load(deployment_name string)!DeploymentState {
|
||||
|
||||
// }
|
||||
69
lib/threefold/grid3/deployer2_sort/factory.v
Normal file
69
lib/threefold/grid3/deployer2_sort/factory.v
Normal file
@@ -0,0 +1,69 @@
|
||||
module deployer2
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
// pub struct TFGridClient[T] {
|
||||
// base.BaseConfig[T]
|
||||
// }
|
||||
|
||||
// @[params]
|
||||
// pub struct Config {
|
||||
// pub mut:
|
||||
// mnemonics string @[secret]
|
||||
// network string
|
||||
// }
|
||||
|
||||
// pub fn get(instance string, cfg Config) !TFGridClient[Config] {
|
||||
// mut self := TFGridClient[Config]{}
|
||||
// if cfg.mnemonics.len > 0 {
|
||||
// // first the type of the instance, then name of instance, then action
|
||||
// self.init('tfgridclient', instance, .set, cfg)!
|
||||
// } else {
|
||||
// self.init('tfgridclient', instance, .get)!
|
||||
// }
|
||||
// return self
|
||||
// }
|
||||
|
||||
// pub fn heroplay(mut plbook playbook.PlayBook) ! {
|
||||
// for mut action in plbook.find(filter: 'tfgridclient.define')! {
|
||||
// mut p := action.params
|
||||
// instance := p.get_default('instance', 'default')!
|
||||
// mut cl := get(instance)!
|
||||
// mut cfg := cl.config_get()!
|
||||
// cfg.mnemonics = p.get('mnemonics')!
|
||||
// cfg.network = p.get('network')!
|
||||
// cl.config_save()!
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn (mut self TFGridClient[Config]) config_interactive() ! {
|
||||
// mut myui := ui.new()!
|
||||
// // console.clear()
|
||||
// console.print_debug('\n## Configure TFGrid client')
|
||||
// console.print_debug('==========================')
|
||||
// console.print_debug('## Instance: ${self.instance}')
|
||||
// console.print_debug('==========================\n\n')
|
||||
|
||||
// mut cfg := self.config()!
|
||||
|
||||
// // self.instance = myui.ask_question(
|
||||
// // question: 'name for configuration instance'
|
||||
// // default: self.instance
|
||||
// // )!
|
||||
|
||||
// cfg.mnemonics = myui.ask_question(
|
||||
// question: 'please enter your mnemonics here'
|
||||
// minlen: 24
|
||||
// default: cfg.mnemonics
|
||||
// )!
|
||||
|
||||
// cfg.network = myui.ask_dropdown(
|
||||
// question: 'choose environment'
|
||||
// items: envs.values()
|
||||
// )!
|
||||
|
||||
// self.config_save()!
|
||||
// }
|
||||
156
lib/threefold/grid3/deployer2_sort/graphql.v
Normal file
156
lib/threefold/grid3/deployer2_sort/graphql.v
Normal file
@@ -0,0 +1,156 @@
|
||||
module deployer2
|
||||
|
||||
import net.http
|
||||
import json
|
||||
import x.json2
|
||||
import log
|
||||
import freeflowuniverse.herolib.threefold.grid.models
|
||||
|
||||
pub struct GraphQl {
|
||||
url string
|
||||
pub mut:
|
||||
logger log.Log
|
||||
}
|
||||
|
||||
pub struct Contract {
|
||||
pub:
|
||||
contract_id string @[json: contractID]
|
||||
deployment_data string @[json: deploymentData]
|
||||
state string
|
||||
node_id u32 @[json: nodeID]
|
||||
name string
|
||||
}
|
||||
|
||||
pub struct Contracts {
|
||||
pub mut:
|
||||
name_contracts []Contract @[json: nameContracts]
|
||||
node_contracts []Contract @[json: nodeContracts]
|
||||
rent_contracts []Contract @[json: rentContracts]
|
||||
}
|
||||
|
||||
// contractsList, err := c.ListContractsByTwinID([]string{"Created, GracePeriod"})
|
||||
pub fn (mut g GraphQl) list_twin_contracts(twin_id u32, states []string) !Contracts {
|
||||
state := '[${states.join(', ')}]'
|
||||
|
||||
options := '(where: {twinID_eq: ${twin_id}, state_in: ${state}}, orderBy: twinID_ASC)'
|
||||
name_contracts_count := g.get_item_total_count('nameContracts', options)!
|
||||
node_contracts_count := g.get_item_total_count('nodeContracts', options)!
|
||||
rent_contracts_count := g.get_item_total_count('rentContracts', options)!
|
||||
contracts_data := g.query('query getContracts(\$nameContractsCount: Int!, \$nodeContractsCount: Int!, \$rentContractsCount: Int!){
|
||||
nameContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$nameContractsCount) {
|
||||
contractID
|
||||
state
|
||||
name
|
||||
}
|
||||
nodeContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$nodeContractsCount) {
|
||||
contractID
|
||||
deploymentData
|
||||
state
|
||||
nodeID
|
||||
}
|
||||
rentContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$rentContractsCount) {
|
||||
contractID
|
||||
state
|
||||
nodeID
|
||||
}
|
||||
}', // map[string]u32{}
|
||||
{
|
||||
'nodeContractsCount': node_contracts_count
|
||||
'nameContractsCount': name_contracts_count
|
||||
'rentContractsCount': rent_contracts_count
|
||||
})!
|
||||
|
||||
return json.decode(Contracts, contracts_data.str())!
|
||||
}
|
||||
|
||||
// GetItemTotalCount return count of items
|
||||
fn (g GraphQl) get_item_total_count(item_name string, options string) !u32 {
|
||||
count_body := 'query { items: ${item_name}Connection${options} { count: totalCount } }'
|
||||
request_body := {
|
||||
'query': count_body
|
||||
}
|
||||
json_body := json.encode(request_body)
|
||||
|
||||
resp := http.post_json(g.url, json_body)!
|
||||
query_data := json2.raw_decode(resp.body)!
|
||||
query_map := query_data.as_map()
|
||||
|
||||
errors := query_map['errors'] or { '' }.str()
|
||||
if errors != '' {
|
||||
return error('graphQl query error: ${errors}')
|
||||
}
|
||||
|
||||
data := query_map['data']! as map[string]json2.Any
|
||||
items := data['items']! as map[string]json2.Any
|
||||
count := u32(items['count']!.int())
|
||||
return count
|
||||
}
|
||||
|
||||
struct QueryRequest {
|
||||
query string
|
||||
variables map[string]u32
|
||||
}
|
||||
|
||||
// Query queries graphql
|
||||
fn (g GraphQl) query(body string, variables map[string]u32) !map[string]json2.Any {
|
||||
mut request_body := QueryRequest{
|
||||
query: body
|
||||
variables: variables
|
||||
}
|
||||
json_body := json.encode(request_body)
|
||||
resp := http.post_json(g.url, json_body)!
|
||||
|
||||
query_data := json2.raw_decode(resp.body)!
|
||||
data_map := query_data.as_map()
|
||||
result := data_map['data']!.as_map()
|
||||
return result
|
||||
}
|
||||
|
||||
pub fn (mut g GraphQl) get_contract_by_project_name(mut deployer Deployer, project_name string) !Contracts {
|
||||
mut contracts := Contracts{}
|
||||
|
||||
g.logger.debug('Getting user twin')
|
||||
twin_id := deployer.client.get_user_twin()!
|
||||
g.logger.debug('Getting twin ${twin_id} contracts...')
|
||||
|
||||
contract_list := g.list_twin_contracts(twin_id, ['Created', 'GracePeriod'])!
|
||||
|
||||
g.logger.debug('filtering contract with project name: ${project_name}')
|
||||
for contract in contract_list.node_contracts {
|
||||
data := json.decode(models.DeploymentData, contract.deployment_data)!
|
||||
if data.project_name == project_name {
|
||||
contracts.node_contracts << contract
|
||||
}
|
||||
}
|
||||
g.logger.debug('filtering name contracts related to project name: ${project_name}')
|
||||
gw_workload := name_gw_in_node_contract(mut deployer, contracts.node_contracts)!
|
||||
contracts.name_contracts << filter_name_contract(contract_list.name_contracts, gw_workload)!
|
||||
return contracts
|
||||
}
|
||||
|
||||
fn name_gw_in_node_contract(mut deployer Deployer, node_contracts []Contract) ![]models.Workload {
|
||||
mut gw_workloads := []models.Workload{}
|
||||
for contract in node_contracts {
|
||||
dl := deployer.get_deployment(contract.contract_id.u64(), contract.node_id) or {
|
||||
return error("Couldn't get deployment workloads: ${err}")
|
||||
}
|
||||
for wl in dl.workloads {
|
||||
if wl.type_ == models.workload_types.gateway_name {
|
||||
gw_workloads << wl
|
||||
}
|
||||
}
|
||||
}
|
||||
return gw_workloads
|
||||
}
|
||||
|
||||
fn filter_name_contract(name_contract []Contract, gw_workload []models.Workload) ![]Contract {
|
||||
mut contracts := []Contract{}
|
||||
for contract in name_contract {
|
||||
for wl in gw_workload {
|
||||
if wl.name == contract.name {
|
||||
contracts << contract
|
||||
}
|
||||
}
|
||||
}
|
||||
return contracts
|
||||
}
|
||||
45
lib/threefold/grid3/deployer2_sort/rmb.v
Normal file
45
lib/threefold/grid3/deployer2_sort/rmb.v
Normal file
@@ -0,0 +1,45 @@
|
||||
module deployer2
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.threefold.grid.models
|
||||
|
||||
// TODO: decode/encode the params/result here
|
||||
pub fn (mut d Deployer) rmb_deployment_changes(dst u32, contract_id u64) !string {
|
||||
payload := json.encode({
|
||||
'contract_id': contract_id
|
||||
})
|
||||
res := d.client.rmb_call(dst, 'zos.deployment.changes', payload)!
|
||||
return res
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) rmb_deployment_get(dst u32, data string) !string {
|
||||
res := d.client.rmb_call(dst, 'zos.deployment.get', data)!
|
||||
return res
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) rmb_deployment_deploy(dst u32, data string) !string {
|
||||
return d.client.rmb_call(dst, 'zos.deployment.deploy', data)!
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) rmb_deployment_update(dst u32, data string) !string {
|
||||
return d.client.rmb_call(dst, 'zos.deployment.update', data)!
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) rmb_deployment_delete(dst u32, data string) !string {
|
||||
return d.client.rmb_call(dst, 'zos.deployment.delete', data)!
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) get_node_pub_config(node_id u32) !models.PublicConfig {
|
||||
node_twin := d.client.get_node_twin(node_id)!
|
||||
data := json.encode('')
|
||||
res := d.client.rmb_call(node_twin, 'zos.network.public_config_get', data)!
|
||||
public_config := json.decode(models.PublicConfig, res)!
|
||||
return public_config
|
||||
}
|
||||
|
||||
pub fn (mut d Deployer) assign_wg_port(node_id u32) !u16 {
|
||||
node_twin := d.client.get_node_twin(node_id)!
|
||||
taken_ports := d.client.list_wg_ports(node_twin)!
|
||||
port := models.rand_port(taken_ports) or { return error("can't assign wireguard port: ${err}") }
|
||||
return port
|
||||
}
|
||||
97
lib/threefold/grid3/deployer2_sort/vm.v
Normal file
97
lib/threefold/grid3/deployer2_sort/vm.v
Normal file
@@ -0,0 +1,97 @@
|
||||
module deployer2
|
||||
|
||||
import json
|
||||
import log
|
||||
import freeflowuniverse.herolib.builder
|
||||
import freeflowuniverse.herolib.threefold.grid.models
|
||||
|
||||
struct VMSpecs {
|
||||
deployment_name string
|
||||
name string
|
||||
nodeid u32
|
||||
pub_sshkeys []string
|
||||
flist string // if any, if used then ostype not used
|
||||
size u32 // size of the rootfs disk in bytes
|
||||
compute_capacity models.ComputeCapacity
|
||||
ostype OSType
|
||||
}
|
||||
|
||||
enum OSType {
|
||||
ubuntu_22_04
|
||||
ubuntu_24_04
|
||||
arch
|
||||
alpine
|
||||
}
|
||||
|
||||
struct VMDeployed {
|
||||
name string
|
||||
nodeid u32
|
||||
guid string
|
||||
yggdrasil_ip string
|
||||
mycelium_ip string
|
||||
}
|
||||
|
||||
pub fn (vm VMDeployed) builder_node() !&builder.Node {
|
||||
mut factory := builder.new()!
|
||||
return factory.node_new(
|
||||
ipaddr: vm.mycelium_ip
|
||||
)!
|
||||
}
|
||||
|
||||
// only connect to yggdrasil and mycelium
|
||||
fn (mut deployer Deployer) vm_deploy(args_ VMSpecs) !VMDeployed {
|
||||
mut args := args_
|
||||
|
||||
if args.pub_sshkeys.len == 0 {
|
||||
return error('at least one ssh key needed to deploy vm')
|
||||
}
|
||||
// deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))!
|
||||
|
||||
_ := models.VM{
|
||||
name: 'vm1'
|
||||
env_vars: {
|
||||
'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP'
|
||||
}
|
||||
}
|
||||
|
||||
mut env_vars := {
|
||||
'SSH_KEY': args.pub_sshkeys[0]
|
||||
}
|
||||
// QUESTION: how to implement multiple ssh keys
|
||||
for i, key in args.pub_sshkeys[0..] {
|
||||
env_vars['SSH_KEY${i}'] = key
|
||||
}
|
||||
|
||||
machine := models.Zmachine{
|
||||
flist: args.flist
|
||||
size: args.size
|
||||
compute_capacity: args.compute_capacity
|
||||
env: env_vars
|
||||
}
|
||||
|
||||
mut deployment := models.new_deployment(
|
||||
// twin_id:
|
||||
workloads: [machine.to_workload()]
|
||||
metadata: models.DeploymentData{
|
||||
name: args.deployment_name
|
||||
}
|
||||
)
|
||||
|
||||
contract_id := deployer.deploy(args.nodeid, mut deployment, args.name, 0)!
|
||||
deployed := deployer.get_deployment(contract_id, args.nodeid)!
|
||||
if deployed.workloads.len < 1 {
|
||||
panic('deployment should have at least one workload for vm')
|
||||
}
|
||||
vm_workload := deployed.workloads[0]
|
||||
zmachine := json.decode(models.Zmachine, vm_workload.data)!
|
||||
mycelium_ip := zmachine.network.mycelium or { panic('deployed vm must have mycelium ip') }
|
||||
vm_deployed := VMDeployed{
|
||||
name: vm_workload.name
|
||||
nodeid: args.nodeid
|
||||
guid: vm_workload.name
|
||||
// yggdrasil_ip: zmachine.network.
|
||||
mycelium_ip: '${mycelium_ip.network}${mycelium_ip.hex_seed}'
|
||||
}
|
||||
|
||||
return vm_deployed
|
||||
}
|
||||
22
lib/threefold/grid3/deployer2_sort/vm_test.v
Normal file
22
lib/threefold/grid3/deployer2_sort/vm_test.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module deployer2
|
||||
|
||||
import freeflowuniverse.herolib.installers.threefold.griddriver
|
||||
import os
|
||||
|
||||
fn testsuite_begin() ! {
|
||||
griddriver.install()!
|
||||
}
|
||||
|
||||
fn test_vm_deploy() ! {
|
||||
mnemonics := os.getenv('TFGRID_MNEMONIC')
|
||||
ssh_key := os.getenv('SSH_KEY')
|
||||
|
||||
chain_network := ChainNetwork.main // User your desired network
|
||||
mut deployer := new_deployer(mnemonics, chain_network)!
|
||||
deployer.vm_deploy(
|
||||
name: 'test_vm'
|
||||
deployment_name: 'test_deployment'
|
||||
nodeid: 24
|
||||
pub_sshkeys: [ssh_key]
|
||||
)!
|
||||
}
|
||||
90
lib/threefold/grid3/deployer2_sort/zdb.v
Normal file
90
lib/threefold/grid3/deployer2_sort/zdb.v
Normal file
@@ -0,0 +1,90 @@
|
||||
module deployer2
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
struct ZDBSpecs {
|
||||
deployment_name string
|
||||
nodeid string
|
||||
namespace string
|
||||
secret string
|
||||
}
|
||||
|
||||
struct ZDBDeployed {
|
||||
nodeid string
|
||||
namespace string
|
||||
secret string
|
||||
yggdrasil_ip string
|
||||
mycelium_ip string
|
||||
}
|
||||
|
||||
// //only connect to yggdrasil and mycelium
|
||||
// fn (mut deployer Deployer) vm_deploy(args_ VMSpecs) !VMDeployed {
|
||||
// mut args := args_
|
||||
|
||||
// if args.pub_sshkeys.len == 0 {
|
||||
// return error('at least one ssh key needed to deploy vm')
|
||||
// }
|
||||
// // deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))!
|
||||
|
||||
// vm := models.VM {
|
||||
// name: 'vm1'
|
||||
// env_vars: {
|
||||
// 'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP'
|
||||
// }
|
||||
// }
|
||||
|
||||
// mut env_vars := {'SSH_KEY': args.pub_sshkeys[0]}
|
||||
// // QUESTION: how to implement multiple ssh keys
|
||||
// for i, key in args.pub_sshkeys[0..] {
|
||||
// env_vars['SSH_KEY${i}'] = key
|
||||
// }
|
||||
|
||||
// machine := models.Zmachine{
|
||||
// flist: args.flist
|
||||
// size: args.size
|
||||
// compute_capacity: args.compute_capacity
|
||||
// env: env_vars
|
||||
// }
|
||||
|
||||
// mut deployment := models.new_deployment(
|
||||
// // twin_id:
|
||||
// workloads: [machine.to_workload()]
|
||||
// metadata: models.DeploymentData{
|
||||
// name: args.deployment_name
|
||||
// }
|
||||
// )
|
||||
|
||||
// contract_id := deployer.deploy(args.nodeid, mut deployment, '', 0)!
|
||||
// deployed := deployer.get_deployment(contract_id, args.nodeid)!
|
||||
// if deployed.workloads.len < 1 {
|
||||
// panic('deployment should have at least one workload for vm')
|
||||
// }
|
||||
// vm_workload := deployed.workloads[0]
|
||||
// zmachine := json.decode(models.Zmachine, vm_workload.data)!
|
||||
// mycelium_ip := zmachine.network.mycelium or {panic('deployed vm must have mycelium ip')}
|
||||
// vm_deployed := grid.VMDeployed{
|
||||
// name: vm_workload.name
|
||||
// nodeid: args.nodeid
|
||||
// guid: vm_workload.name
|
||||
// // yggdrasil_ip: zmachine.network.
|
||||
// mycelium_ip: '${mycelium_ip.network}${mycelium_ip.hex_seed}'
|
||||
// }
|
||||
|
||||
// return vm_deployed
|
||||
// }
|
||||
|
||||
// test zdb is answering
|
||||
pub fn (zdb ZDBDeployed) ping() bool {
|
||||
panic('implement')
|
||||
}
|
||||
|
||||
pub fn (zdb ZDBDeployed) redisclient() !&redisclient.Redis {
|
||||
redis_addr := '${zdb.mycelium_ip}:6379'
|
||||
return redisclient.new(redis_addr)!
|
||||
}
|
||||
|
||||
// //only connect to yggdrasil and mycelium
|
||||
// //
|
||||
// fn zdb_deploy(args_ ZDBSpecs) ZDBDeployed{
|
||||
|
||||
// }
|
||||
12
lib/threefold/grid3/griddriver/client.v
Normal file
12
lib/threefold/grid3/griddriver/client.v
Normal file
@@ -0,0 +1,12 @@
|
||||
module griddriver
|
||||
|
||||
pub struct Client {
|
||||
pub:
|
||||
mnemonic string
|
||||
substrate string
|
||||
relay string
|
||||
mut:
|
||||
node_twin map[u32]u32
|
||||
}
|
||||
|
||||
// TODO: add the rest of griddriver functionalities
|
||||
33
lib/threefold/grid3/griddriver/rmb.v
Normal file
33
lib/threefold/grid3/griddriver/rmb.v
Normal file
@@ -0,0 +1,33 @@
|
||||
module griddriver
|
||||
|
||||
import os
|
||||
import x.json2
|
||||
import json
|
||||
|
||||
pub fn (mut c Client) rmb_call(dst u32, cmd string, payload string) !string {
|
||||
res := os.execute("griddriver rmb --cmd '${cmd}' --dst '${dst}' --payload '${payload}' --substrate '${c.substrate}' --mnemonics '${c.mnemonic}' --relay '${c.relay}'")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
return res.output
|
||||
}
|
||||
|
||||
pub struct Version {
|
||||
zinit string
|
||||
zos string
|
||||
}
|
||||
|
||||
pub fn (mut c Client) get_zos_version(dst u32) !Version {
|
||||
data := json.encode('')
|
||||
res := c.rmb_call(dst, 'zos.system.version', data)!
|
||||
ver := json2.decode[Version](res)!
|
||||
return ver
|
||||
}
|
||||
|
||||
pub fn (mut c Client) list_wg_ports(dst u32) ![]u16 {
|
||||
res := os.execute("griddriver rmb-taken-ports --dst ${dst} --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --relay \"${c.relay}\"")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
return json.decode([]u16, res.output)!
|
||||
}
|
||||
111
lib/threefold/grid3/griddriver/substrate.v
Normal file
111
lib/threefold/grid3/griddriver/substrate.v
Normal file
@@ -0,0 +1,111 @@
|
||||
module griddriver
|
||||
|
||||
import os
|
||||
import strconv
|
||||
import json
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
pub fn (mut c Client) get_node_twin(node_id u64) !u32 {
|
||||
if u32(node_id) in c.node_twin {
|
||||
return c.node_twin[u32(node_id)]
|
||||
}
|
||||
|
||||
res := os.execute("griddriver node-twin --substrate \"${c.substrate}\" --node_id ${node_id}")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
|
||||
twin_id := u32(strconv.parse_uint(res.output, 10, 32)!)
|
||||
c.node_twin[u32(node_id)] = twin_id
|
||||
return twin_id
|
||||
}
|
||||
|
||||
pub fn (mut c Client) get_user_twin() !u32 {
|
||||
res := os.execute("griddriver user-twin --mnemonics \"${c.mnemonic}\" --substrate \"${c.substrate}\"")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
|
||||
return u32(strconv.parse_uint(res.output, 10, 32)!)
|
||||
}
|
||||
|
||||
pub fn (mut c Client) create_node_contract(node_id u32, body string, hash string, public_ips u32, solution_provider u64) !u64 {
|
||||
console.print_debug('url: ${c.substrate}')
|
||||
res := os.execute("griddriver new-node-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --node_id ${node_id} --hash \"${hash}\" --public_ips ${public_ips} --body \"${body}\" --solution_provider ${solution_provider}")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
|
||||
return strconv.parse_uint(res.output, 10, 64)!
|
||||
}
|
||||
|
||||
pub fn (mut c Client) create_name_contract(name string) !u64 {
|
||||
res := os.execute("griddriver new-name-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --name ${name}")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
|
||||
return strconv.parse_uint(res.output, 10, 64)!
|
||||
}
|
||||
|
||||
pub fn (mut c Client) update_node_contract(contract_id u64, body string, hash string) ! {
|
||||
res := os.execute("griddriver update-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract_id ${contract_id} --body \"${body}\" --hash \"${hash}\"")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut c Client) cancel_contract(contract_id u64) ! {
|
||||
res := os.execute("griddriver cancel-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract_id ${contract_id}")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BatchCreateContractData {
|
||||
pub mut:
|
||||
node u32
|
||||
body string
|
||||
hash string
|
||||
public_ips u32
|
||||
solution_provider_id ?u64
|
||||
// for name contracts. if set the contract is assumed to be a name contract and other fields are ignored
|
||||
name string
|
||||
}
|
||||
|
||||
struct Hamada {
|
||||
key []BatchCreateContractData
|
||||
}
|
||||
|
||||
pub fn (mut c Client) batch_create_contracts(contracts_data_ []BatchCreateContractData) ![]u64 {
|
||||
mut contracts_data := contracts_data_.clone()
|
||||
mut body := ''
|
||||
|
||||
for mut contract in contracts_data {
|
||||
if contract.body.len > 0 {
|
||||
body = contract.body
|
||||
}
|
||||
|
||||
contract.body = ''
|
||||
}
|
||||
|
||||
data := json.encode(contracts_data)
|
||||
res := os.execute("griddriver batch-create-contract --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contracts-data '${data}' --contracts-body \"${body}\"")
|
||||
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
|
||||
contract_ids := json.decode([]u64, res.output) or {
|
||||
return error('Cannot decode the result due to ${err}')
|
||||
}
|
||||
return contract_ids
|
||||
}
|
||||
|
||||
pub fn (mut c Client) batch_cancel_contracts(contract_ids []u64) ! {
|
||||
data := json.encode(contract_ids)
|
||||
res := os.execute("griddriver batch-cancel-contract --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract-ids \"${data}\"")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
}
|
||||
38
lib/threefold/grid3/griddriver/utils.v
Normal file
38
lib/threefold/grid3/griddriver/utils.v
Normal file
@@ -0,0 +1,38 @@
|
||||
module griddriver
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.threefold.grid.models
|
||||
|
||||
pub fn (mut c Client) sign_deployment(hash string) !string {
|
||||
res := os.execute("griddriver sign --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --hash \"${hash}\"")
|
||||
if res.exit_code != 0 {
|
||||
return error(res.output)
|
||||
}
|
||||
return res.output
|
||||
}
|
||||
|
||||
pub fn (mut c Client) deploy_single_vm(node_id u32, solution_type string, vm models.VM, env string) !string {
|
||||
data := vm.json_encode()
|
||||
res := os.execute("griddriver deploy-single --mnemonics \"${c.mnemonic}\" --env ${env} --solution_type \"${solution_type}\" --node ${node_id} --data '${data}'")
|
||||
return res.output
|
||||
}
|
||||
|
||||
// returns priv, pub key separated by a space
|
||||
pub fn (mut c Client) generate_wg_priv_key() ![]string {
|
||||
res := os.execute('griddriver generate-wg-key')
|
||||
key := res.output.split(' ')
|
||||
if key.len != 2 {
|
||||
return error('could not generate private key: ${res.output}')
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// returns priv, pub key separated by a space
|
||||
pub fn (mut c Client) generate_wg_public_key(key string) !string {
|
||||
res := os.execute('griddriver generate-wg-public-key --key "${key}"')
|
||||
public_key := res.output.split(' ')
|
||||
if public_key.len != 1 {
|
||||
return error('could not generate public key: ${res.output}')
|
||||
}
|
||||
return public_key[0]
|
||||
}
|
||||
93
lib/threefold/grid3/gridproxy/README.md
Normal file
93
lib/threefold/grid3/gridproxy/README.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# GridProxy API client
|
||||
|
||||
Easily access Threefold grid APIs from vlang. gridproxy is v module include the API client along with API-specific information such as the root URL for the different networks available in the threefold grid. They also include classes that represent entities in the context of the API in sub-module `model`, and that are useful for making conversions between JSON objects and V objects. and some types with helper methods to convert the machine-friendly units returned by the API to more human-friendly units.
|
||||
|
||||
### import the client:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.threefold.gridproxy
|
||||
|
||||
// create a client for the testnet, with API cache disabled
|
||||
// you can pass true as second arg to enable cache
|
||||
mut gp_client := gridproxy.get(.test, false)!
|
||||
|
||||
```
|
||||
|
||||
### use the client to interact with the gridproxy API:
|
||||
|
||||
```v
|
||||
// get farm list
|
||||
farms := gp_client.get_farms()! // you should handle any possible errors in your code
|
||||
// get gateway list
|
||||
gateways := gp_client.get_gateways()!
|
||||
// get node list
|
||||
nodes := gp_client.get_nodes()!
|
||||
// get contract list
|
||||
contracts := gp_client.get_contracts()!
|
||||
// get grid stats
|
||||
stats := gp_client.get_stats()!
|
||||
// get node by id
|
||||
node := gp_client.get_node_by_id(u64(16))!
|
||||
// get node stats
|
||||
node_stats := gp_client.get_node_stats_by_id(u64(16))!
|
||||
// get twins
|
||||
twins := gp_client.get_twins()!
|
||||
```
|
||||
|
||||
for all available methods on the client, see [GridProxy API client modules doc](./docs/)
|
||||
|
||||
### filtering:
|
||||
|
||||
```v
|
||||
// getting only dedicated farms
|
||||
farms_dedicated := gp_client.get_farms(dedicated: true)!
|
||||
// getting only farms with at least one free ip
|
||||
farms_with_free_ips := gp_client.get_farms(free_ips: u64(1))!
|
||||
// pagination options:
|
||||
// get first page of farms
|
||||
farms_first_page := gp_client.get_farms(page: u64(1))!
|
||||
// you can mix any filters and pagination options
|
||||
farms_first_page_dedicated := gp_client.get_farms(page: u64(1), dedicated: true)!
|
||||
// access the field of first farm in the list
|
||||
// the API could return an empty list if no farm is found
|
||||
// you should handle this case in your code
|
||||
if farms_first_page.len > 0 {
|
||||
println(farms_first_page[0].name)
|
||||
}
|
||||
```
|
||||
|
||||
for all available filters, see [GridProxy API client modules doc](./docs/)
|
||||
|
||||
### helper methods:
|
||||
|
||||
```v
|
||||
node := nodes[0]
|
||||
node.updated_at // 1655940222
|
||||
node.created // 1634637306
|
||||
// you can convert the timestamp to V Time object easily with the helper method
|
||||
node.created.to_time() // 2021-10-19 09:55:06
|
||||
node.created.to_time().local() // 2021-10-19 11:55:06
|
||||
node.created.to_time().relative() // last Oct 19
|
||||
node.created.to_time().relative_short() // 246d ago
|
||||
// lets check another field with different type
|
||||
node.uptime // 18958736
|
||||
// you can convert the seconds to a human-readable duration with the helper method
|
||||
node.uptime.to_days() // 219.42981481481482
|
||||
node.uptime.to_hours() // 5266.315555555556
|
||||
node.uptime.to_minutes() // 315978.93333333335
|
||||
// now to the capacity helper methods
|
||||
node.total_resources.mru // 202803036160
|
||||
// you can `to_megabytes`, `to_gigabytes` and `to_terabytes` methods on any resources field.
|
||||
node.total_resources.mru.to_gigabytes() // 202.80303616
|
||||
// the helper methods available for the billing to help you convert the TFT units as well
|
||||
```
|
||||
|
||||
for all available helper methods, see [GridProxy API client modules doc](./docs/)
|
||||
|
||||
TODO:
|
||||
|
||||
- Documented the client iterators and higher-level methods
|
||||
|
||||
## Client Examples
|
||||
|
||||
there are scripts available to serve as examples in the [examples](../examples/) directory. [Docs](../examples/README.md)
|
||||
489
lib/threefold/grid3/gridproxy/gridproxy_core.v
Normal file
489
lib/threefold/grid3/gridproxy/gridproxy_core.v
Normal file
@@ -0,0 +1,489 @@
|
||||
module gridproxy
|
||||
|
||||
// client library for threefold gridproxy API.
|
||||
import json
|
||||
import math
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model { Bill, Contract, ContractFilter, ContractIterator, Farm, FarmFilter, FarmIterator, GridStat, Node, NodeFilter, NodeIterator, NodeStats, Node_, StatFilter, Twin, TwinFilter, TwinIterator }
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
/*
|
||||
all errors returned by the gridproxy API or the client are wrapped in a standard `Error` object with two fields.
|
||||
{
|
||||
msg string
|
||||
code int // could be API call error code or client error code
|
||||
}
|
||||
|
||||
`code` is an error code that can be used to identify the error.
|
||||
in API call errors, `code` represents the HTTP status code. (100..599)
|
||||
|
||||
Client errors codes are represented by numbers in the range of 1..99
|
||||
currently, the following client error codes are used:
|
||||
id not found error code: 4
|
||||
json parsing error code: 10
|
||||
http client error code: 11
|
||||
invalid response from server (e.g. empty response) error code: 24
|
||||
*/
|
||||
// clinet error codes
|
||||
const err_not_found = 4
|
||||
const err_json_parse = 10
|
||||
const err_http_client = 11
|
||||
const err_invalid_resp = 24
|
||||
const err_grid_client = 30
|
||||
|
||||
// get_node_by_id fetchs specific node information by node id.
|
||||
//
|
||||
// * `node_id` (u64): node id.
|
||||
//
|
||||
// returns: `Node` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_node_by_id(node_id u64) !Node {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
|
||||
res := http_client.send(prefix: 'nodes/', id: '${node_id}') or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
node := json.decode(Node, res.data) or {
|
||||
return error_with_code('error to get jsonstr for node data, json decode: node id: ${node_id}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// get_node_stats_by_id fetchs specific node statistics by node id.
|
||||
//
|
||||
// * `node_id` (u64): node id.
|
||||
//
|
||||
// returns: `Node_stats` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_node_stats_by_id(node_id u64) !NodeStats {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
|
||||
res := http_client.send(prefix: 'nodes/', id: '${node_id}/statistics') or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
node_stats := json.decode(NodeStats, res.data) or {
|
||||
return error_with_code('error to get jsonstr for node data, json decode: node id: ${node_id}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return node_stats
|
||||
}
|
||||
|
||||
// get_gateway_by_id fetchs specific gateway information by node id.
|
||||
//
|
||||
// * `node_id` (u64): node id.
|
||||
//
|
||||
// returns: `Node` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_gateway_by_id(node_id u64) !Node {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
|
||||
res := http_client.send(prefix: 'gateways/', id: '${node_id}') or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
node := json.decode(Node, res.data) or {
|
||||
return error_with_code('error to get jsonstr for gateway data, json decode: gateway id: ${node_id}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// get_nodes fetchs nodes information and public configurations with pagination.
|
||||
//
|
||||
// * `available_for` (u64): Available for twin id. [optional].
|
||||
// * `certification_type` (string): Certificate type NotCertified, Silver or Gold. [optional].
|
||||
// * `city_contains` (string): Node partial city filter. [optional].
|
||||
// * `city` (string): Node city filter. [optional].
|
||||
// * `country_contains` (string): Node partial country filter. [optional].
|
||||
// * `country` (string): Node country filter. [optional].
|
||||
// * `dedicated` (bool): Set to true to get the dedicated nodes only. [optional].
|
||||
// * `domain` (string): Set to true to filter nodes with domain. [optional].
|
||||
// * `farm_ids` ([]u64): List of farm ids. [optional].
|
||||
// * `farm_name_contains` (string): Get nodes for specific farm. [optional].
|
||||
// * `farm_name` (string): Get nodes for specific farm. [optional].
|
||||
// * `free_hru` (u64): Min free reservable hru in bytes. [optional].
|
||||
// * `free_ips` (u64): Min number of free ips in the farm of the node. [optional].
|
||||
// * `free_mru` (u64): Min free reservable mru in bytes. [optional].
|
||||
// * `free_sru` (u64): Min free reservable sru in bytes. [optional].
|
||||
// * `gpu_available` (bool): Filter nodes that have available GPU. [optional].
|
||||
// * `gpu_device_id` (string): Filter nodes based on GPU device ID. [optional].
|
||||
// * `gpu_device_name` (string): Filter nodes based on GPU device partial name. [optional].
|
||||
// * `gpu_vendor_id` (string): Filter nodes based on GPU vendor ID. [optional].
|
||||
// * `gpu_vendor_name` (string): Filter nodes based on GPU vendor partial name. [optional].
|
||||
// * `has_gpu`: Filter nodes on whether they have GPU support or not. [optional].
|
||||
// * `ipv4` (string): Set to true to filter nodes with ipv4. [optional].
|
||||
// * `ipv6` (string): Set to true to filter nodes with ipv6. [optional].
|
||||
// * `node_id` (u64): Node id. [optional].
|
||||
// * `page` (u64): Page number. [optional].
|
||||
// * `rentable` (bool): Set to true to filter the available nodes for renting. [optional].
|
||||
// * `rented_by` (u64): Rented by twin id. [optional].
|
||||
// * `ret_count` (bool): Set nodes' count on headers based on filter. [optional].
|
||||
// * `size` (u64): Max result per page. [optional].
|
||||
// * `status` (string): Node status filter, set to 'up' to get online nodes only. [optional].
|
||||
// * `total_cru` (u64): Min total cru in bytes. [optional].
|
||||
// * `total_hru` (u64): Min total hru in bytes. [optional].
|
||||
// * `total_mru` (u64): Min total mru in bytes. [optional].
|
||||
// * `total_sru` (u64): Min total sru in bytes. [optional].
|
||||
// * `twin_id` (u64): Twin id. [optional].
|
||||
//
|
||||
// returns: `[]Node` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_nodes(params NodeFilter) ![]Node {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
params_map := params.to_map()
|
||||
res := http_client.send(prefix: 'nodes/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
nodes_ := json.decode([]Node_, res.data) or {
|
||||
return error_with_code('error to get jsonstr for node list data, json decode: node filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
nodes := nodes_.map(it.with_nested_capacity())
|
||||
return nodes
|
||||
}
|
||||
|
||||
// get_gateways fetchs gateways information and public configurations and domains with pagination.
|
||||
//
|
||||
// * `available_for` (u64): Available for twin id. [optional].
|
||||
// * `certification_type` (string): Certificate type NotCertified, Silver or Gold. [optional].
|
||||
// * `city_contains` (string): Node partial city filter. [optional].
|
||||
// * `city` (string): Node city filter. [optional].
|
||||
// * `country_contains` (string): Node partial country filter. [optional].
|
||||
// * `country` (string): Node country filter. [optional].
|
||||
// * `dedicated` (bool): Set to true to get the dedicated nodes only. [optional].
|
||||
// * `domain` (bool): Set to true to filter nodes with domain. [optional].
|
||||
// * `farm_ids` ([]u64): List of farm ids. [optional].
|
||||
// * `farm_name_contains` (string): Get nodes for specific farm. [optional].
|
||||
// * `farm_name` (string): Get nodes for specific farm. [optional].
|
||||
// * `free_hru` (u64): Min free reservable hru in bytes. [optional].
|
||||
// * `free_ips` (u64): Min number of free ips in the farm of the node. [optional].
|
||||
// * `free_mru` (u64): Min free reservable mru in bytes. [optional].
|
||||
// * `free_sru` (u64): Min free reservable sru in bytes. [optional].
|
||||
// * `gpu_available` (bool): Filter nodes that have available GPU. [optional].
|
||||
// * `gpu_device_id` (string): Filter nodes based on GPU device ID. [optional].
|
||||
// * `gpu_device_name` (string): Filter nodes based on GPU device partial name. [optional].
|
||||
// * `gpu_vendor_id` (string): Filter nodes based on GPU vendor ID. [optional].
|
||||
// * `gpu_vendor_name` (string): Filter nodes based on GPU vendor partial name. [optional].
|
||||
// * `has_gpu`: Filter nodes on whether they have GPU support or not. [optional].
|
||||
// * `ipv4` (string): Set to true to filter nodes with ipv4. [optional].
|
||||
// * `ipv6` (string): Set to true to filter nodes with ipv6. [optional].
|
||||
// * `node_id` (u64): Node id. [optional].
|
||||
// * `page` (u64): Page number. [optional].
|
||||
// * `rentable` (bool): Set to true to filter the available nodes for renting. [optional].
|
||||
// * `rented_by` (u64): Rented by twin id. [optional].
|
||||
// * `ret_count` (bool): Set nodes' count on headers based on filter. [optional].
|
||||
// * `size` (u64): Max result per page. [optional].
|
||||
// * `status` (string): Node status filter, set to 'up' to get online nodes only. [optional].
|
||||
// * `total_cru` (u64): Min total cru in bytes. [optional].
|
||||
// * `total_hru` (u64): Min total hru in bytes. [optional].
|
||||
// * `total_mru` (u64): Min total mru in bytes. [optional].
|
||||
// * `total_sru` (u64): Min total sru in bytes. [optional].
|
||||
// * `twin_id` (u64): Twin id. [optional].
|
||||
//
|
||||
// returns: `[]Node` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_gateways(params NodeFilter) ![]Node {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
params_map := params.to_map()
|
||||
res := http_client.send(prefix: 'gateways/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
nodes_ := json.decode([]Node_, res.data) or {
|
||||
return error_with_code('error to get jsonstr for gateways list data, json decode: gateway filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
nodes := nodes_.map(it.with_nested_capacity())
|
||||
return nodes
|
||||
}
|
||||
|
||||
// get_stats fetchs stats about the grid.
|
||||
//
|
||||
// * `status` (string): Node status filter, set to 'up' to get online nodes only.. [optional].
|
||||
//
|
||||
// returns: `GridStat` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_stats(filter StatFilter) !GridStat {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
mut params_map := map[string]string{}
|
||||
params_map['status'] = match filter.status {
|
||||
.all { '' }
|
||||
.online { 'up' }
|
||||
}
|
||||
|
||||
res := http_client.send(prefix: 'stats/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
stats := json.decode(GridStat, res.data) or {
|
||||
return error_with_code('error to get jsonstr for grid stats data, json decode: stats filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// get_twins fetchs twins information with pagaination.
|
||||
//
|
||||
// * `account_id` (string): Account address. [optional].
|
||||
// * `page` (u64): Page number. [optional].
|
||||
// * `public_key` (string): twin public key used for e2e encryption. [optional].
|
||||
// * `relay` (string): relay domain name. [optional].
|
||||
// * `ret_count` (bool): Set farms' count on headers based on filter. [optional].
|
||||
// * `size` (u64): Max result per page. [optional].
|
||||
// * `twin_id` (u64): Twin id. [optional].
|
||||
//
|
||||
// returns: `[]Twin` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_twins(params TwinFilter) ![]Twin {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
params_map := params.to_map()
|
||||
res := http_client.send(prefix: 'twins/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
twins := json.decode([]Twin, res.data) or {
|
||||
return error_with_code('error to get jsonstr for twin list data, json decode: twin filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return twins
|
||||
}
|
||||
|
||||
// get_contracts fetchs contracts information with pagination.
|
||||
//
|
||||
// * `contract_id` (u64): Contract id. [optional].
|
||||
// * `contract_type` (string): [optional].
|
||||
// * `deployment_data` (string): Contract deployment data in case of 'node' contracts. [optional].
|
||||
// * `deployment_hash` (string): Contract deployment hash in case of 'node' contracts. [optional].
|
||||
// * `name` (string): Contract name in case of 'name' contracts. [optional].
|
||||
// * `node_id` (u64): Node id which contract is deployed on in case of ('rent' or 'node' contracts). [optional].
|
||||
// * `number_of_public_ips` (u64): Min number of public ips in the 'node' contract. [optional].
|
||||
// * `page` (u64): Page number. [optional].
|
||||
// * `randomize` (bool): [optional].
|
||||
// * `ret_count` (bool): Set farms' count on headers based on filter. [optional].
|
||||
// * `size` (u64): Max result per page. [optional].
|
||||
// * `state` (string): Contract state 'Created', or 'Deleted'. [optional].
|
||||
// * `twin_id` (u64): Twin id. [optional].
|
||||
// * `type` (string): Contract type 'node', 'name', or 'rent'. [optional].
|
||||
//
|
||||
// * returns: `[]Contract` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_contracts(params ContractFilter) ![]Contract {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
params_map := params.to_map()
|
||||
res := http_client.send(prefix: 'contracts/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
contracts := json.decode([]Contract, res.data) or {
|
||||
return error_with_code('error to get jsonstr for contract list data, json decode: contract filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return contracts
|
||||
}
|
||||
|
||||
pub fn (mut c GridProxyClient) get_contract_bill(contract_id u64) ![]Bill {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
|
||||
res := http_client.send(prefix: 'contracts/', id: '${contract_id}/bills') or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
console.print_debug(res.data)
|
||||
bills := json.decode([]Bill, res.data) or {
|
||||
return error_with_code('error to get jsonstr for billing data, json decode: contract_id id: ${contract_id}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return bills
|
||||
}
|
||||
|
||||
pub fn (mut c GridProxyClient) get_contract_hourly_bill(contract_id u64) !f64 {
|
||||
bills := c.get_contract_bill(contract_id)!
|
||||
if bills.len == 0 {
|
||||
return f64(0)
|
||||
}
|
||||
mut duration := u64(0)
|
||||
if bills.len >= 2 {
|
||||
duration = (bills[0].timestamp - bills[1].timestamp) / 3600 // one hour
|
||||
} else if bills.len == 1 {
|
||||
contracts := c.get_contracts(contract_id: contract_id)!
|
||||
if contracts.len > 0 {
|
||||
duration = (bills[0].timestamp - contracts[0].created_at) / 3600
|
||||
}
|
||||
}
|
||||
if duration > 0 {
|
||||
return bills[0].amount_billed / duration / math.pow(10, 7)
|
||||
}
|
||||
return f64(0)
|
||||
}
|
||||
|
||||
// get_farms fetchs farms information and public ips.
|
||||
//
|
||||
// * `certification_type` (string): Certificate type DIY or Certified. [optional].
|
||||
// * `country` (string): Farm country. [optional].
|
||||
// * `dedicated` (bool): Farm is dedicated. [optional].
|
||||
// * `farm_id` (u64): Farm id. [optional].
|
||||
// * `free_ips` (u64): Min number of free ips in the farm. [optional].
|
||||
// * `name_contains` (string): Farm name contains. [optional].
|
||||
// * `name` (string): Farm name. [optional].
|
||||
// * `node_available_for` (u64): Twin ID of user for whom there is at least one node that is available to be deployed to in the farm. [optional].
|
||||
// * `node_certified` (bool): True for farms who have at least one certified node. [optional].
|
||||
// * `node_free_hru` (u64): Min free reservable hru for at least a single node that belongs to the farm, in bytes. [optional].
|
||||
// * `node_free_mru` (u64): Min free reservable mru for at least a single node that belongs to the farm, in bytes. [optional].
|
||||
// * `node_free_sru` (u64): Min free reservable sru for at least a single node that belongs to the farm, in bytes. [optional].
|
||||
// * `node_has_gpu` (bool): True for farms who have at least one node with a GPU
|
||||
// * `node_rented_by` (u64): Twin ID of user who has at least one rented node in the farm
|
||||
// * `node_status` (string): Node status for at least a single node that belongs to the farm
|
||||
// * `page` (u64): Page number. [optional].
|
||||
// * `pricing_policy_id` (u64): Pricing policy id. [optional].
|
||||
// * `randomize` (bool): [optional].
|
||||
// * `ret_count` (bool): Set farms' count on headers based on filter. [optional].
|
||||
// * `size` (u64): Max result per page. [optional].
|
||||
// * `stellar_address` (string): Farm stellar_address. [optional].
|
||||
// * `total_ips` (u64): Min number of total ips in the farm. [optional].
|
||||
// * `twin_id` (u64): Twin id associated with the farm. [optional].
|
||||
// * `version` (u64): Farm version. [optional].
|
||||
//
|
||||
// returns: `[]Farm` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_farms(params FarmFilter) ![]Farm {
|
||||
// needed to allow to use threads
|
||||
mut http_client := c.http_client
|
||||
params_map := params.to_map()
|
||||
res := http_client.send(prefix: 'farms/', params: params_map) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
|
||||
if !res.is_ok() {
|
||||
return error_with_code(res.data, res.code)
|
||||
}
|
||||
|
||||
if res.data == '' {
|
||||
return error_with_code('empty response', err_invalid_resp)
|
||||
}
|
||||
|
||||
farms := json.decode([]Farm, res.data) or {
|
||||
return error_with_code('error to get jsonstr for farm list data, json decode: farm filter: ${params_map}, data: ${res.data}',
|
||||
err_json_parse)
|
||||
}
|
||||
return farms
|
||||
}
|
||||
|
||||
// is_pingable checks if API server is reachable and responding.
|
||||
//
|
||||
// returns: bool, `true` if API server is reachable and responding, `false` otherwise
|
||||
pub fn (mut c GridProxyClient) is_pingable() !bool {
|
||||
mut http_client := c.http_client
|
||||
res := http_client.send(prefix: 'ping/') or { return false }
|
||||
if !res.is_ok() {
|
||||
return false
|
||||
}
|
||||
health_map := json.decode(map[string]string, res.data) or { return false }
|
||||
|
||||
if health_map['ping'] != 'pong' {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Iterators have the next() method, which returns the next page of the objects.
|
||||
// to be used in a loop to get all available results, or to lazely traverse pages till a specific condition is met.
|
||||
|
||||
// get_nodes_iterator creates an iterator through node pages with custom filter
|
||||
fn (mut c GridProxyClient) get_nodes_iterator(filter NodeFilter) NodeIterator {
|
||||
return NodeIterator{filter, c.get_nodes}
|
||||
}
|
||||
|
||||
// get_gateways_iterator creates an iterator through gateway pages with custom filter
|
||||
fn (mut c GridProxyClient) get_gateways_iterator(filter NodeFilter) NodeIterator {
|
||||
return NodeIterator{filter, c.get_gateways}
|
||||
}
|
||||
|
||||
// get_farms_iterator creates an iterator through farms pages with custom filter
|
||||
fn (mut c GridProxyClient) get_farms_iterator(filter FarmFilter) FarmIterator {
|
||||
return FarmIterator{filter, c.get_farms}
|
||||
}
|
||||
|
||||
// get_twins_iterator creates an iterator through twin pages with custom filter
|
||||
fn (mut c GridProxyClient) get_twins_iterator(filter TwinFilter) TwinIterator {
|
||||
return TwinIterator{filter, c.get_twins}
|
||||
}
|
||||
|
||||
// get_contracts_iterator creates an iterator through contracts pages with custom filter
|
||||
fn (mut c GridProxyClient) get_contracts_iterator(filter ContractFilter) ContractIterator {
|
||||
return ContractIterator{filter, c.get_contracts}
|
||||
}
|
||||
111
lib/threefold/grid3/gridproxy/gridproxy_factory.v
Normal file
111
lib/threefold/grid3/gridproxy/gridproxy_factory.v
Normal file
@@ -0,0 +1,111 @@
|
||||
module gridproxy
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model
|
||||
// import freeflowuniverse.herolib.installers.threefold.griddriver
|
||||
|
||||
@[heap]
|
||||
pub struct GridProxyClient {
|
||||
pub mut:
|
||||
http_client httpconnection.HTTPConnection
|
||||
}
|
||||
|
||||
pub enum TFGridNet {
|
||||
main
|
||||
test
|
||||
dev
|
||||
qa
|
||||
}
|
||||
|
||||
@[heap]
|
||||
struct GridproxyFactory {
|
||||
mut:
|
||||
instances map[string]&GridProxyClient
|
||||
}
|
||||
|
||||
fn init_factory() GridproxyFactory {
|
||||
mut ef := GridproxyFactory{}
|
||||
return ef
|
||||
}
|
||||
|
||||
// Singleton creation
|
||||
const factory = init_factory()
|
||||
|
||||
fn factory_get() &GridproxyFactory {
|
||||
return &factory
|
||||
}
|
||||
|
||||
fn gridproxy_url_get(net TFGridNet) string {
|
||||
return match net {
|
||||
.main { 'https://gridproxy.grid.tf' }
|
||||
.test { 'https://gridproxy.test.grid.tf' }
|
||||
.dev { 'https://gridproxy.dev.grid.tf' }
|
||||
.qa { 'https://gridproxy.qa.grid.tf/' }
|
||||
}
|
||||
}
|
||||
|
||||
// return which net in string form
|
||||
fn tfgrid_net_string(net TFGridNet) string {
|
||||
return match net {
|
||||
.main { 'main' }
|
||||
.test { 'test' }
|
||||
.dev { 'dev' }
|
||||
.qa { 'qa' }
|
||||
}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct GridProxyClientArgs {
|
||||
pub mut:
|
||||
net TFGridNet = .main
|
||||
cache bool
|
||||
}
|
||||
|
||||
// get returns a gridproxy client for the given net.
|
||||
//
|
||||
//```
|
||||
// net TFGridNet = .main
|
||||
// cache bool
|
||||
//```
|
||||
pub fn new(args GridProxyClientArgs) !&GridProxyClient {
|
||||
mut f := factory_get()
|
||||
netstr := tfgrid_net_string(args.net)
|
||||
if netstr !in factory.instances {
|
||||
url := gridproxy_url_get(args.net)
|
||||
mut httpconn := httpconnection.new(
|
||||
name: 'gridproxy_${netstr}'
|
||||
url: url
|
||||
cache: args.cache
|
||||
)!
|
||||
// do the settings on the connection
|
||||
httpconn.cache.expire_after = 7200 // make the cache timeout 2h
|
||||
mut connection := GridProxyClient{
|
||||
http_client: httpconn
|
||||
}
|
||||
f.instances[netstr] = &connection
|
||||
}
|
||||
return f.instances[netstr] or {
|
||||
return error_with_code('http client error: unknow error happened while trying to access the GridProxyClient instance',
|
||||
err_grid_client)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn nodefilter() !model.NodeFilter {
|
||||
return model.NodeFilter{}
|
||||
}
|
||||
|
||||
pub fn contractfilter() !model.ContractFilter {
|
||||
return model.ContractFilter{}
|
||||
}
|
||||
|
||||
pub fn farmfilter() !model.FarmFilter {
|
||||
return model.FarmFilter{}
|
||||
}
|
||||
|
||||
pub fn twinfilter() !model.TwinFilter {
|
||||
return model.TwinFilter{}
|
||||
}
|
||||
|
||||
pub fn statfilter() !model.StatFilter {
|
||||
return model.StatFilter{}
|
||||
}
|
||||
169
lib/threefold/grid3/gridproxy/gridproxy_highlevel.v
Normal file
169
lib/threefold/grid3/gridproxy/gridproxy_highlevel.v
Normal file
@@ -0,0 +1,169 @@
|
||||
module gridproxy
|
||||
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model { Contract, ContractFilter, Farm, FarmFilter, Node, NodeFilter, ResourceFilter, Twin }
|
||||
|
||||
// fetch specific twin information by twin id.
|
||||
//
|
||||
// * `twin_id`: twin id.
|
||||
//
|
||||
// returns: `Twin` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_twin_by_id(twin_id u64) !Twin {
|
||||
twins := c.get_twins(twin_id: twin_id) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
if twins.len == 0 {
|
||||
return error_with_code('no twin found for id: ${twin_id}', err_not_found)
|
||||
}
|
||||
return twins[0]
|
||||
}
|
||||
|
||||
// fetch specific twin information by account.
|
||||
//
|
||||
// * `account_id`: account id.
|
||||
//
|
||||
// returns: `Twin` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_twin_by_account(account_id string) !Twin {
|
||||
twins := c.get_twins(account_id: account_id) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
if twins.len == 0 {
|
||||
return error_with_code('no twin found for account_id: ${account_id}', err_not_found)
|
||||
}
|
||||
return twins[0]
|
||||
}
|
||||
|
||||
// fetch specific farm information by id.
|
||||
//
|
||||
// * `farm_id`: farm id.
|
||||
//
|
||||
// returns: `Farm` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_farm_by_id(farm_id u64) !Farm {
|
||||
farms := c.get_farms(farm_id: farm_id) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
if farms.len == 0 {
|
||||
return error_with_code('no farm found for id: ${farm_id}', err_not_found)
|
||||
}
|
||||
return farms[0]
|
||||
}
|
||||
|
||||
// fetch specific farm information by farm name.
|
||||
//
|
||||
// * `farm_name`: farm name.
|
||||
//
|
||||
// returns: `Farm` or `Error`.
|
||||
pub fn (mut c GridProxyClient) get_farm_by_name(farm_name string) !Farm {
|
||||
farms := c.get_farms(name: farm_name) or {
|
||||
return error_with_code('http client error: ${err.msg()}', err_http_client)
|
||||
}
|
||||
if farms.len == 0 {
|
||||
return error_with_code('no farm found with name: ${farm_name}', err_not_found)
|
||||
}
|
||||
return farms[0]
|
||||
}
|
||||
|
||||
// get_farms_by_twin_id returns iterator over all farms information associated with specific twin.
|
||||
//
|
||||
// * `twin_id`: twin id.
|
||||
//
|
||||
// returns: `FarmIterator`.
|
||||
pub fn (mut c GridProxyClient) get_farms_by_twin_id(twin_id u64) []Farm {
|
||||
mut filter := FarmFilter{
|
||||
twin_id: twin_id
|
||||
}
|
||||
mut iter := c.get_farms_iterator(filter)
|
||||
mut result := []Farm{}
|
||||
for f in iter {
|
||||
result << f
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// get_contracts_by_twin_id returns iterator over all contracts owned by specific twin.
|
||||
//
|
||||
// * `twin_id`: twin id.
|
||||
//
|
||||
// returns: `ContractIterator`.
|
||||
pub fn (mut c GridProxyClient) get_contracts_by_twin_id(twin_id u64) []Contract {
|
||||
/*
|
||||
contracts := c.get_contracts(twin_id: twin_id) or {
|
||||
return error_with_code('http client error: $err.msg()', gridproxy.err_http_client)
|
||||
}*/
|
||||
mut filter := ContractFilter{
|
||||
twin_id: twin_id
|
||||
}
|
||||
mut iter := c.get_contracts_iterator(filter)
|
||||
mut result := []Contract{}
|
||||
for f in iter {
|
||||
result << f
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// get_active_contracts returns iterator over `created` contracts owned by specific twin.
|
||||
//
|
||||
// * `twin_id`: twin id.
|
||||
//
|
||||
// returns: `ContractIterator`.
|
||||
pub fn (mut c GridProxyClient) get_contracts_active(twin_id u64) []Contract {
|
||||
/*
|
||||
contracts := c.get_contracts(twin_id: twin_id) or {
|
||||
return error_with_code('http client error: $err.msg()', gridproxy.err_http_client)
|
||||
}*/
|
||||
mut filter := ContractFilter{
|
||||
twin_id: twin_id
|
||||
state: 'created'
|
||||
}
|
||||
|
||||
mut iter := c.get_contracts_iterator(filter)
|
||||
mut result := []Contract{}
|
||||
for f in iter {
|
||||
result << f
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// get_contracts_by_node_id returns iterator over all contracts deployed on specific node.
|
||||
//
|
||||
// * `node_id`: node id.
|
||||
//
|
||||
// returns: `ContractIterator`.
|
||||
pub fn (mut c GridProxyClient) get_contracts_by_node_id(node_id u64) []Contract {
|
||||
/*
|
||||
contracts := c.get_contracts(node_id: node_id) or {
|
||||
return error_with_code('http client error: $err.msg()', gridproxy.err_http_client)
|
||||
}*/
|
||||
mut filter := ContractFilter{
|
||||
node_id: node_id
|
||||
}
|
||||
mut iter := c.get_contracts_iterator(filter)
|
||||
mut result := []Contract{}
|
||||
for f in iter {
|
||||
result << f
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// get_nodes_has_resources returns iterator over all nodes with specific minimum free reservable resources.
|
||||
//
|
||||
// * `free_ips` (u64): minimum free ips. [optional].
|
||||
// * `free_mru_gb` (u64): minimum free mru in GB. [optional].
|
||||
// * `free_sru_gb` (u64): minimum free sru in GB. [optional].
|
||||
// * `free_hru_gb` (u64): minimum free hru in GB. [optional].
|
||||
//
|
||||
// returns: `NodeIterator`.
|
||||
fn (mut c GridProxyClient) get_nodes_has_resources(filter ResourceFilter) []Node {
|
||||
mut filter_ := NodeFilter{
|
||||
free_ips: filter.free_ips
|
||||
free_mru: filter.free_mru_gb * (1204 * 1204 * 1204)
|
||||
free_sru: filter.free_sru_gb * (1204 * 1204 * 1204)
|
||||
free_hru: filter.free_hru_gb * (1204 * 1204 * 1204)
|
||||
total_cru: filter.free_cpu
|
||||
}
|
||||
mut iter := c.get_nodes_iterator(filter_)
|
||||
mut result := []Node{}
|
||||
for f in iter {
|
||||
result << f
|
||||
}
|
||||
return result
|
||||
}
|
||||
247
lib/threefold/grid3/gridproxy/gridproxy_test.v
Normal file
247
lib/threefold/grid3/gridproxy/gridproxy_test.v
Normal file
@@ -0,0 +1,247 @@
|
||||
module gridproxy
|
||||
|
||||
import freeflowuniverse.herolib.threefold.gridproxy.model
|
||||
import time
|
||||
|
||||
const cache = false
|
||||
const dummy_node = model.Node{
|
||||
id: '0000129706-000001-c1e78'
|
||||
node_id: 1
|
||||
farm_id: 2
|
||||
twin_id: 8
|
||||
grid_version: 3
|
||||
uptime: model.SecondUnit(86400) // 86400 seconds = 1440 minutes = 24 hours = 1 day
|
||||
created: model.UnixTime(1654848126) // GMT: 2022-06-10 08:02:06
|
||||
farming_policy_id: 1
|
||||
updated_at: model.UnixTime(1654848132) // GMT: 2022-06-10 08:02:12
|
||||
capacity: model.NodeCapacity{
|
||||
total_resources: model.NodeResources{
|
||||
cru: 4
|
||||
mru: model.ByteUnit(5178437632) // 5178437632 bytes = 5178.437632 megabytes = 5.2 gigabytes = 0.005178437632 terabytes
|
||||
sru: model.ByteUnit(1610612736000) // 1610612736000 bytes = 1610612.736000 megabytes = 1610.612736 gigabytes = 16.1 terabytes
|
||||
hru: model.ByteUnit(1073741824000) // 1073741824000 bytes = 1073741.824 megabytes = 1073.741824 gigabytes = 10.7 terabytes
|
||||
}
|
||||
used_resources: model.NodeResources{
|
||||
cru: 0
|
||||
mru: model.ByteUnit(0)
|
||||
sru: model.ByteUnit(0)
|
||||
hru: model.ByteUnit(0)
|
||||
}
|
||||
}
|
||||
location: model.NodeLocation{
|
||||
country: 'Belgium'
|
||||
city: 'Lochristi'
|
||||
}
|
||||
public_config: model.PublicConfig{
|
||||
domain: ''
|
||||
gw4: ''
|
||||
gw6: ''
|
||||
ipv4: ''
|
||||
ipv6: ''
|
||||
}
|
||||
certification: 'Diy'
|
||||
status: 'down'
|
||||
dedicated: false
|
||||
rent_contract_id: 0
|
||||
rented_by_twin_id: 0
|
||||
}
|
||||
const dummy_contract_billing = model.ContractBilling{
|
||||
amount_billed: model.DropTFTUnit(10000000) // 1 TFT == 1000 mTFT == 1000000 uTFT
|
||||
discount_received: 'None'
|
||||
timestamp: model.UnixTime(1655118966)
|
||||
}
|
||||
|
||||
fn test_create_gridproxy_client_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
assert gp.is_pingable()! == true
|
||||
}
|
||||
|
||||
fn test_create_gridproxy_client_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
assert gp.is_pingable()! == true
|
||||
}
|
||||
|
||||
fn test_create_gridproxy_client_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
assert gp.is_pingable()! == true
|
||||
}
|
||||
|
||||
fn test_create_gridproxy_client_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
assert gp.is_pingable()! == true
|
||||
}
|
||||
|
||||
fn test_get_nodes_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
nodes := gp.get_nodes() or { panic('Failed to get nodes') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_nodes_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
nodes := gp.get_nodes() or { panic('Failed to get nodes') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_nodes_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
nodes := gp.get_nodes() or { panic('Failed to get nodes') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_nodes_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
nodes := gp.get_nodes() or { panic('Failed to get nodes') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_gateways_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
nodes := gp.get_gateways() or { panic('Failed to get gateways') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_gateways_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
nodes := gp.get_gateways() or { panic('Failed to get gateways') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_gateways_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
nodes := gp.get_gateways() or { panic('Failed to get gateways') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_gateways_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
nodes := gp.get_gateways() or { panic('Failed to get gateways') }
|
||||
assert nodes.len > 0
|
||||
}
|
||||
|
||||
fn test_get_twins_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
twins := gp.get_twins() or { panic('Failed to get twins') }
|
||||
assert twins.len > 0
|
||||
}
|
||||
|
||||
fn test_get_twins_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
twins := gp.get_twins() or { panic('Failed to get twins') }
|
||||
assert twins.len > 0
|
||||
}
|
||||
|
||||
fn test_get_twins_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
twins := gp.get_twins() or { panic('Failed to get twins') }
|
||||
assert twins.len > 0
|
||||
}
|
||||
|
||||
fn test_get_twins_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
twins := gp.get_twins() or { panic('Failed to get twins') }
|
||||
assert twins.len > 0
|
||||
}
|
||||
|
||||
fn test_get_stats_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
stats := gp.get_stats() or { panic('Failed to get stats') }
|
||||
assert stats.nodes > 0
|
||||
}
|
||||
|
||||
fn test_get_stats_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
stats := gp.get_stats() or { panic('Failed to get stats') }
|
||||
assert stats.nodes > 0
|
||||
}
|
||||
|
||||
fn test_get_stats_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
stats := gp.get_stats() or { panic('Failed to get stats') }
|
||||
assert stats.nodes > 0
|
||||
}
|
||||
|
||||
fn test_get_stats_main() {
|
||||
mut gp := get(.test, cache)!
|
||||
stats := gp.get_stats() or { panic('Failed to get stats') }
|
||||
assert stats.nodes > 0
|
||||
}
|
||||
|
||||
fn test_get_contracts_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
contracts := gp.get_contracts() or { panic('Failed to get contracts') }
|
||||
assert contracts.len > 0
|
||||
}
|
||||
|
||||
fn test_get_contracts_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
contracts := gp.get_contracts() or { panic('Failed to get contracts') }
|
||||
assert contracts.len > 0
|
||||
}
|
||||
|
||||
fn test_get_contracts_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
contracts := gp.get_contracts() or { panic('Failed to get contracts') }
|
||||
assert contracts.len > 0
|
||||
}
|
||||
|
||||
fn test_get_contracts_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
contracts := gp.get_contracts() or { panic('Failed to get contracts') }
|
||||
assert contracts.len > 0
|
||||
}
|
||||
|
||||
fn test_get_farms_qa() {
|
||||
mut gp := get(.qa, cache)!
|
||||
farms := gp.get_farms() or { panic('Failed to get farms') }
|
||||
assert farms.len > 0
|
||||
}
|
||||
|
||||
fn test_get_farms_dev() {
|
||||
mut gp := get(.dev, cache)!
|
||||
farms := gp.get_farms() or { panic('Failed to get farms') }
|
||||
assert farms.len > 0
|
||||
}
|
||||
|
||||
fn test_get_farms_test() {
|
||||
mut gp := get(.test, cache)!
|
||||
farms := gp.get_farms() or { panic('Failed to get farms') }
|
||||
assert farms.len > 0
|
||||
}
|
||||
|
||||
fn test_get_farms_main() {
|
||||
mut gp := get(.main, cache)!
|
||||
farms := gp.get_farms() or { panic('Failed to get farms') }
|
||||
assert farms.len > 0
|
||||
}
|
||||
|
||||
fn test_elapsed_seconds_conversion() {
|
||||
assert dummy_node.uptime.to_minutes() == 1440
|
||||
assert dummy_node.uptime.to_hours() == 24
|
||||
assert dummy_node.uptime.to_days() == 1
|
||||
}
|
||||
|
||||
fn test_timestamp_conversion() {
|
||||
assert dummy_node.created.to_time() == time.unix(1654848126)
|
||||
assert dummy_node.updated_at.to_time() == time.unix(1654848132)
|
||||
}
|
||||
|
||||
fn test_storage_unit_conversion() {
|
||||
assert dummy_node.capacity.total_resources.mru.to_megabytes() == 5178.437632
|
||||
assert dummy_node.capacity.total_resources.mru.to_gigabytes() == 5.178437632
|
||||
assert dummy_node.capacity.total_resources.mru.to_terabytes() == 0.005178437632
|
||||
}
|
||||
|
||||
fn test_tft_conversion() {
|
||||
assert dummy_contract_billing.amount_billed.to_tft() == 1
|
||||
assert dummy_contract_billing.amount_billed.to_mtft() == 1000
|
||||
assert dummy_contract_billing.amount_billed.to_utft() == 1000000
|
||||
}
|
||||
|
||||
fn test_calc_available_resources_on_node() {
|
||||
// dummy node was created with 0 used resources
|
||||
assert dummy_node.calc_available_resources().mru == dummy_node.capacity.total_resources.mru
|
||||
assert dummy_node.calc_available_resources().hru == dummy_node.capacity.total_resources.hru
|
||||
assert dummy_node.calc_available_resources().sru == dummy_node.capacity.total_resources.sru
|
||||
assert dummy_node.calc_available_resources().cru == dummy_node.capacity.total_resources.cru
|
||||
}
|
||||
52
lib/threefold/grid3/gridproxy/model/contract.v
Normal file
52
lib/threefold/grid3/gridproxy/model/contract.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module model
|
||||
|
||||
pub struct ContractBilling {
|
||||
pub:
|
||||
amount_billed DropTFTUnit @[json: amountBilled]
|
||||
discount_received string @[json: discountReceived]
|
||||
timestamp UnixTime @[json: timestamp]
|
||||
}
|
||||
|
||||
pub struct NodeContractDetails {
|
||||
pub:
|
||||
node_id u64 @[json: nodeId]
|
||||
deployment_data string @[json: deployment_data]
|
||||
deployment_hash string @[json: deployment_hash]
|
||||
number_of_public_ips u64 @[json: number_of_public_ips]
|
||||
}
|
||||
|
||||
pub struct Contract {
|
||||
pub:
|
||||
contract_id u64
|
||||
twin_id u64
|
||||
state string @[json: state]
|
||||
created_at UnixTime @[json: created_at]
|
||||
contract_type string @[json: 'type']
|
||||
details NodeContractDetails @[json: details]
|
||||
}
|
||||
|
||||
pub struct Bill {
|
||||
pub:
|
||||
amount_billed u64 @[json: amountBilled]
|
||||
timestamp UnixTime @[json: timestamp]
|
||||
discount_received string @[json: discountReceived]
|
||||
}
|
||||
|
||||
// total_billed returns the total amount billed for the contract.
|
||||
//
|
||||
// returns: `DropTFTUnit`
|
||||
// pub fn (c &Contract) total_billed() DropTFTUnit {
|
||||
// if c.billing.len == 0 {
|
||||
// return 0
|
||||
// }
|
||||
// mut total := u64(0)
|
||||
// for b in c.billing {
|
||||
// total += b.amount_billed
|
||||
// }
|
||||
// return DropTFTUnit(total)
|
||||
// }
|
||||
|
||||
// TODO: Implement Limit struct (size, page, retcount, randomize)
|
||||
// and embeded it in other structs like Contract to avoid duplicated code
|
||||
// TODO: check if RetCount is bool or string as swagger doc says
|
||||
// TODO: check if Randomize can be used in the client and where, it is not documnetd in swagger
|
||||
22
lib/threefold/grid3/gridproxy/model/farm.v
Normal file
22
lib/threefold/grid3/gridproxy/model/farm.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module model
|
||||
|
||||
pub struct PublicIP {
|
||||
pub:
|
||||
id string
|
||||
ip string
|
||||
farm_id string @[json: farmId]
|
||||
contract_id int @[json: contractId]
|
||||
gateway string
|
||||
}
|
||||
|
||||
pub struct Farm {
|
||||
pub:
|
||||
name string
|
||||
farm_id u64 @[json: farmId]
|
||||
twin_id u64 @[json: twinId]
|
||||
pricing_policy_id u64 @[json: pricingPolicyId]
|
||||
certification_type string @[json: certificationType]
|
||||
stellar_address string @[json: stellarAddress]
|
||||
dedicated bool
|
||||
public_ips []PublicIP @[json: publicIps]
|
||||
}
|
||||
180
lib/threefold/grid3/gridproxy/model/filter.v
Normal file
180
lib/threefold/grid3/gridproxy/model/filter.v
Normal file
@@ -0,0 +1,180 @@
|
||||
module model
|
||||
|
||||
@[params]
|
||||
pub struct FarmFilter {
|
||||
pub mut:
|
||||
page ?u64
|
||||
size ?u64
|
||||
ret_count ?bool
|
||||
randomize ?bool
|
||||
free_ips ?u64
|
||||
total_ips ?u64
|
||||
stellar_address ?string
|
||||
pricing_policy_id ?u64
|
||||
farm_id ?u64
|
||||
twin_id ?u64
|
||||
name ?string
|
||||
name_contains ?string
|
||||
certification_type ?string
|
||||
dedicated ?bool
|
||||
country ?string
|
||||
node_free_mru ?u64
|
||||
node_free_hru ?u64
|
||||
node_free_sru ?u64
|
||||
node_status ?string
|
||||
node_rented_by ?u64
|
||||
node_available_for ?u64
|
||||
node_has_gpu ?bool
|
||||
node_certified ?bool
|
||||
}
|
||||
|
||||
// serialize FarmFilter to map
|
||||
pub fn (f FarmFilter) to_map() map[string]string {
|
||||
return to_map(f)
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ContractFilter {
|
||||
pub mut:
|
||||
page ?u64
|
||||
size ?u64
|
||||
ret_count ?bool
|
||||
randomize ?bool
|
||||
contract_id ?u64
|
||||
twin_id ?u64
|
||||
node_id ?u64
|
||||
contract_type ?string
|
||||
state ?string
|
||||
name ?string
|
||||
number_of_public_ips ?u64
|
||||
deployment_data ?string
|
||||
deployment_hash ?string
|
||||
}
|
||||
|
||||
// serialize ContractFilter to map
|
||||
pub fn (f ContractFilter) to_map() map[string]string {
|
||||
return to_map(f)
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct NodeFilter {
|
||||
pub mut:
|
||||
page ?u64
|
||||
size ?u64
|
||||
ret_count ?bool
|
||||
randomize ?bool
|
||||
free_mru ?u64
|
||||
free_sru ?u64
|
||||
free_hru ?u64
|
||||
free_ips ?u64
|
||||
total_mru ?u64
|
||||
total_sru ?u64
|
||||
total_hru ?u64
|
||||
total_cru ?u64
|
||||
city ?string
|
||||
city_contains ?string
|
||||
country ?string
|
||||
country_contains ?string
|
||||
farm_name ?string
|
||||
farm_name_contains ?string
|
||||
ipv4 ?bool
|
||||
ipv6 ?bool
|
||||
domain ?bool
|
||||
status ?string
|
||||
dedicated ?bool
|
||||
healthy ?bool
|
||||
rentable ?bool
|
||||
rented_by ?u64
|
||||
rented ?bool
|
||||
available_for ?u64
|
||||
farm_ids []u64
|
||||
node_ids []u64
|
||||
node_id ?u32
|
||||
twin_id ?u64
|
||||
certification_type ?string
|
||||
has_gpu ?bool
|
||||
has_ipv6 ?bool
|
||||
gpu_device_id ?string
|
||||
gpu_device_name ?string
|
||||
gpu_vendor_id ?string
|
||||
gpu_vendor_name ?string
|
||||
gpu_available ?bool
|
||||
features []string
|
||||
}
|
||||
|
||||
// serialize NodeFilter to map
|
||||
pub fn (f NodeFilter) to_map() map[string]string {
|
||||
return to_map(f)
|
||||
}
|
||||
|
||||
pub enum NodeStatus {
|
||||
all
|
||||
online
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ResourceFilter {
|
||||
pub mut:
|
||||
free_mru_gb u64
|
||||
free_sru_gb u64
|
||||
free_hru_gb u64
|
||||
free_cpu u64
|
||||
free_ips u64
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StatFilter {
|
||||
pub mut:
|
||||
status NodeStatus
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct TwinFilter {
|
||||
pub mut:
|
||||
page ?u64
|
||||
size ?u64
|
||||
ret_count ?bool
|
||||
randomize ?bool
|
||||
twin_id ?u64
|
||||
account_id ?string
|
||||
relay ?string
|
||||
public_key ?string
|
||||
}
|
||||
|
||||
// serialize TwinFilter to map
|
||||
pub fn (f TwinFilter) to_map() map[string]string {
|
||||
return to_map(f)
|
||||
}
|
||||
|
||||
pub fn to_map[T](t T) map[string]string {
|
||||
mut m := map[string]string{}
|
||||
$for field in T.fields {
|
||||
value := t.$(field.name)
|
||||
$if value is $option {
|
||||
opt := t.$(field.name)
|
||||
if opt != none {
|
||||
// NOTE: for some reason when passing the value to another function
|
||||
// it is not recognized as an Option and is dereferenced
|
||||
encode_val(field.name, value, mut m)
|
||||
}
|
||||
}
|
||||
|
||||
$if value !is $option {
|
||||
encode_val(field.name, value, mut m)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
fn encode_val[T](field_name string, val T, mut m map[string]string) {
|
||||
$if T is $array {
|
||||
mut arr := []string{}
|
||||
for a in val {
|
||||
arr << a.str()
|
||||
}
|
||||
|
||||
m[field_name] = arr.join(',')
|
||||
} $else {
|
||||
m[field_name] = val.str()
|
||||
}
|
||||
}
|
||||
90
lib/threefold/grid3/gridproxy/model/iterators.v
Normal file
90
lib/threefold/grid3/gridproxy/model/iterators.v
Normal file
@@ -0,0 +1,90 @@
|
||||
module model
|
||||
|
||||
pub type NodeGetter = fn (NodeFilter) ![]Node
|
||||
|
||||
pub struct NodeIterator {
|
||||
pub mut:
|
||||
filter NodeFilter
|
||||
pub:
|
||||
get_func NodeGetter @[required]
|
||||
}
|
||||
|
||||
pub fn (mut i NodeIterator) next() ?[]Node {
|
||||
if v := i.filter.page {
|
||||
i.filter.page = v + 1
|
||||
} else {
|
||||
i.filter.page = u64(1)
|
||||
}
|
||||
|
||||
nodes := i.get_func(i.filter) or { return none }
|
||||
if nodes.len == 0 {
|
||||
return none
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
pub type FarmGetter = fn (FarmFilter) ![]Farm
|
||||
|
||||
pub struct FarmIterator {
|
||||
pub mut:
|
||||
filter FarmFilter
|
||||
pub:
|
||||
get_func FarmGetter @[required]
|
||||
}
|
||||
|
||||
pub fn (mut i FarmIterator) next() ?[]Farm {
|
||||
if v := i.filter.page {
|
||||
i.filter.page = v + 1
|
||||
} else {
|
||||
i.filter.page = u64(1)
|
||||
}
|
||||
farms := i.get_func(i.filter) or { return none }
|
||||
if farms.len == 0 {
|
||||
return none
|
||||
}
|
||||
return farms
|
||||
}
|
||||
|
||||
pub type ContractGetter = fn (ContractFilter) ![]Contract
|
||||
|
||||
pub struct ContractIterator {
|
||||
pub mut:
|
||||
filter ContractFilter
|
||||
pub:
|
||||
get_func ContractGetter @[required]
|
||||
}
|
||||
|
||||
pub fn (mut i ContractIterator) next() ?[]Contract {
|
||||
if v := i.filter.page {
|
||||
i.filter.page = v + 1
|
||||
} else {
|
||||
i.filter.page = u64(1)
|
||||
}
|
||||
contracts := i.get_func(i.filter) or { return none }
|
||||
if contracts.len == 0 {
|
||||
return none
|
||||
}
|
||||
return contracts
|
||||
}
|
||||
|
||||
pub type TwinGetter = fn (TwinFilter) ![]Twin
|
||||
|
||||
pub struct TwinIterator {
|
||||
pub mut:
|
||||
filter TwinFilter
|
||||
pub:
|
||||
get_func TwinGetter @[required]
|
||||
}
|
||||
|
||||
pub fn (mut i TwinIterator) next() ?[]Twin {
|
||||
if v := i.filter.page {
|
||||
i.filter.page = v + 1
|
||||
} else {
|
||||
i.filter.page = u64(1)
|
||||
}
|
||||
twins := i.get_func(i.filter) or { return none }
|
||||
if twins.len == 0 {
|
||||
return none
|
||||
}
|
||||
return twins
|
||||
}
|
||||
104
lib/threefold/grid3/gridproxy/model/model.v
Normal file
104
lib/threefold/grid3/gridproxy/model/model.v
Normal file
@@ -0,0 +1,104 @@
|
||||
module model
|
||||
|
||||
import time { Time }
|
||||
import math { floor, pow10 }
|
||||
|
||||
type ByteUnit = u64
|
||||
|
||||
pub fn (u ByteUnit) to_megabytes() f64 {
|
||||
return f64(u) / 1e+6
|
||||
}
|
||||
|
||||
pub fn (u ByteUnit) to_gigabytes() f64 {
|
||||
return f64(u) / 1e+9
|
||||
}
|
||||
|
||||
pub fn (u ByteUnit) to_terabytes() f64 {
|
||||
return f64(u) / 1e+12
|
||||
}
|
||||
|
||||
pub fn (u ByteUnit) str() string {
|
||||
if u >= 1e+12 {
|
||||
return '${u.to_terabytes():.2} TB'
|
||||
} else if u >= 1e+9 {
|
||||
return '${u.to_gigabytes():.2} GB'
|
||||
} else if u >= 1e+6 {
|
||||
return '${u.to_megabytes():.2} MB'
|
||||
}
|
||||
return '${u64(u)} Bytes'
|
||||
}
|
||||
|
||||
// SecondUnit represents a duration in seconds
|
||||
type SecondUnit = u64
|
||||
|
||||
pub fn (u SecondUnit) to_minutes() f64 {
|
||||
return f64(u) / 60
|
||||
}
|
||||
|
||||
pub fn (u SecondUnit) to_hours() f64 {
|
||||
return f64(u) / (60 * 60)
|
||||
}
|
||||
|
||||
pub fn (u SecondUnit) to_days() f64 {
|
||||
return f64(u) / (60 * 60 * 24)
|
||||
}
|
||||
|
||||
pub fn (u SecondUnit) str() string {
|
||||
sec_num := u64(u)
|
||||
d := floor(sec_num / 86400)
|
||||
h := math.fmod(floor(sec_num / 3600), 24)
|
||||
m := math.fmod(floor(sec_num / 60), 60)
|
||||
s := sec_num % 60
|
||||
mut str := ''
|
||||
if d > 0 {
|
||||
str += '${d} days '
|
||||
}
|
||||
if h > 0 {
|
||||
str += '${h} hours '
|
||||
}
|
||||
if m > 0 {
|
||||
str += '${m} minutes '
|
||||
}
|
||||
if s > 0 {
|
||||
str += '${s} seconds'
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// UnixTime represent time in seconds since epoch (timestamp)
|
||||
type UnixTime = u64
|
||||
|
||||
pub fn (t UnixTime) to_time() Time {
|
||||
return time.unix(t)
|
||||
}
|
||||
|
||||
pub fn (t UnixTime) str() string {
|
||||
return '${t.to_time().local()}'
|
||||
}
|
||||
|
||||
// this is the smallest unit used to calculate the billing and and the one natively fetched from the API
|
||||
// 1 TFT = 10_000_000 drops = 1_000 mTFT = 1_000_000 uTFT
|
||||
type DropTFTUnit = u64
|
||||
|
||||
pub fn (t DropTFTUnit) to_tft() f64 {
|
||||
return f64(t) / pow10(7) // 1 TFT = 10_000_000 drops
|
||||
}
|
||||
|
||||
pub fn (t DropTFTUnit) to_mtft() f64 {
|
||||
return f64(t) / pow10(4) // 1 mTFT (milliTFT) = 10_000 drops
|
||||
}
|
||||
|
||||
pub fn (t DropTFTUnit) to_utft() f64 {
|
||||
return f64(t) / 10.0 // 1 uTFT (microTFT) = 10 drops
|
||||
}
|
||||
|
||||
pub fn (u DropTFTUnit) str() string {
|
||||
if u >= pow10(7) {
|
||||
return '${u.to_tft():.3} TFT'
|
||||
} else if u >= pow10(4) {
|
||||
return '${u.to_mtft():.3} mTFT'
|
||||
} else if u >= 10 {
|
||||
return '${u.to_utft():.3} uTFT'
|
||||
}
|
||||
return '${u64(u)} dTFT' // Short for dropTFT (1 TFT = 10_000_000 drops). dylan suggests the name and i'm using this till we have an officail name!
|
||||
}
|
||||
128
lib/threefold/grid3/gridproxy/model/node.v
Normal file
128
lib/threefold/grid3/gridproxy/model/node.v
Normal file
@@ -0,0 +1,128 @@
|
||||
module model
|
||||
|
||||
pub struct NodeResources {
|
||||
pub:
|
||||
cru u64
|
||||
mru ByteUnit
|
||||
sru ByteUnit
|
||||
hru ByteUnit
|
||||
}
|
||||
|
||||
pub struct NodeCapacity {
|
||||
pub:
|
||||
total_resources NodeResources
|
||||
used_resources NodeResources
|
||||
}
|
||||
|
||||
pub struct NodeLocation {
|
||||
pub:
|
||||
country string
|
||||
city string
|
||||
}
|
||||
|
||||
pub struct PublicConfig {
|
||||
pub:
|
||||
domain string
|
||||
gw4 string
|
||||
gw6 string
|
||||
ipv4 string
|
||||
ipv6 string
|
||||
}
|
||||
|
||||
// this is ugly, but it works. we need two models for `Node` and reimplemnt the same fields expcept for capacity srtucture
|
||||
// it's a hack to make the json parser work as the gridproxy API have some inconsistencies
|
||||
// see for more context: https://github.com/threefoldtech/tfgridclient_proxy/issues/164
|
||||
pub struct Node_ {
|
||||
pub:
|
||||
id string
|
||||
node_id u64 @[json: nodeId]
|
||||
farm_id u64 @[json: farmId]
|
||||
twin_id u64 @[json: twinId]
|
||||
grid_version u64 @[json: gridVersion]
|
||||
uptime SecondUnit
|
||||
created UnixTime @[json: created]
|
||||
farming_policy_id u64 @[json: farmingPolicyId]
|
||||
updated_at UnixTime @[json: updatedAt]
|
||||
total_resources NodeResources
|
||||
used_resources NodeResources
|
||||
location NodeLocation
|
||||
public_config PublicConfig @[json: publicConfig]
|
||||
certification string @[json: certificationType]
|
||||
status string
|
||||
dedicated bool
|
||||
healthy bool
|
||||
rent_contract_id u64 @[json: rentContractId]
|
||||
rented_by_twin_id u64 @[json: rentedByTwinId]
|
||||
}
|
||||
|
||||
pub struct Node {
|
||||
pub:
|
||||
id string
|
||||
node_id u64 @[json: nodeId]
|
||||
farm_id u64 @[json: farmId]
|
||||
twin_id u64 @[json: twinId]
|
||||
grid_version u64 @[json: gridVersion]
|
||||
uptime SecondUnit
|
||||
created UnixTime @[json: created]
|
||||
farming_policy_id u64 @[json: farmingPolicyId]
|
||||
updated_at UnixTime @[json: updatedAt]
|
||||
capacity NodeCapacity
|
||||
location NodeLocation
|
||||
public_config PublicConfig @[json: publicConfig]
|
||||
certification string @[json: certificationType]
|
||||
status string
|
||||
dedicated bool
|
||||
healthy bool
|
||||
rent_contract_id u64 @[json: rentContractId]
|
||||
rented_by_twin_id u64 @[json: rentedByTwinId]
|
||||
}
|
||||
|
||||
fn calc_available_resources(total_resources NodeResources, used_resources NodeResources) NodeResources {
|
||||
return NodeResources{
|
||||
cru: total_resources.cru - used_resources.cru
|
||||
mru: total_resources.mru - used_resources.mru
|
||||
sru: total_resources.sru - used_resources.sru
|
||||
hru: total_resources.hru - used_resources.hru
|
||||
}
|
||||
}
|
||||
|
||||
// calc_available_resources calculate the reservable capacity of the node.
|
||||
//
|
||||
// Returns: `NodeResources`
|
||||
pub fn (n &Node) calc_available_resources() NodeResources {
|
||||
total_resources := n.capacity.total_resources
|
||||
used_resources := n.capacity.used_resources
|
||||
return calc_available_resources(total_resources, used_resources)
|
||||
}
|
||||
|
||||
// with_nested_capacity enable the client to have one representation of the node model
|
||||
pub fn (n &Node_) with_nested_capacity() Node {
|
||||
return Node{
|
||||
id: n.id
|
||||
node_id: n.node_id
|
||||
farm_id: n.farm_id
|
||||
twin_id: n.twin_id
|
||||
grid_version: n.grid_version
|
||||
uptime: n.uptime
|
||||
created: n.created
|
||||
farming_policy_id: n.farming_policy_id
|
||||
updated_at: n.updated_at
|
||||
capacity: NodeCapacity{
|
||||
total_resources: n.total_resources
|
||||
used_resources: n.used_resources
|
||||
}
|
||||
location: n.location
|
||||
public_config: n.public_config
|
||||
certification: n.certification
|
||||
status: n.status
|
||||
dedicated: n.dedicated
|
||||
healthy: n.healthy
|
||||
rent_contract_id: n.rent_contract_id
|
||||
rented_by_twin_id: n.rented_by_twin_id
|
||||
}
|
||||
}
|
||||
|
||||
// is_online returns true if the node is online, otherwise false.
|
||||
pub fn (n &Node) is_online() bool {
|
||||
return n.status == 'up'
|
||||
}
|
||||
44
lib/threefold/grid3/gridproxy/model/stats.v
Normal file
44
lib/threefold/grid3/gridproxy/model/stats.v
Normal file
@@ -0,0 +1,44 @@
|
||||
module model
|
||||
|
||||
pub struct GridStat {
|
||||
pub:
|
||||
nodes u64
|
||||
farms u64
|
||||
countries u64
|
||||
total_cru u64 @[json: totalCru]
|
||||
total_sru ByteUnit @[json: totalSru]
|
||||
total_mru ByteUnit @[json: totalMru]
|
||||
total_hru ByteUnit @[json: totalHru]
|
||||
public_ips u64 @[json: publicIps]
|
||||
access_nodes u64 @[json: accessNodes]
|
||||
gateways u64
|
||||
twins u64
|
||||
contracts u64
|
||||
nodes_distribution map[string]u64 @[json: nodesDistribution]
|
||||
}
|
||||
|
||||
pub struct NodeStatisticsResources {
|
||||
pub:
|
||||
cru u64
|
||||
hru ByteUnit
|
||||
ipv4u u64
|
||||
mru ByteUnit
|
||||
sru ByteUnit
|
||||
}
|
||||
|
||||
pub struct NodeStatisticsUsers {
|
||||
pub:
|
||||
deployments u64
|
||||
workloads u64
|
||||
}
|
||||
|
||||
pub struct NodeStats {
|
||||
pub:
|
||||
system NodeStatisticsResources
|
||||
|
||||
total NodeStatisticsResources
|
||||
|
||||
used NodeStatisticsResources
|
||||
|
||||
users NodeStatisticsUsers
|
||||
}
|
||||
8
lib/threefold/grid3/gridproxy/model/twin.v
Normal file
8
lib/threefold/grid3/gridproxy/model/twin.v
Normal file
@@ -0,0 +1,8 @@
|
||||
module model
|
||||
|
||||
pub struct Twin {
|
||||
pub:
|
||||
twin_id u64 @[json: twinId]
|
||||
account_id string @[json: accountId]
|
||||
ip string
|
||||
}
|
||||
16
lib/threefold/grid3/models/computecapacity.v
Normal file
16
lib/threefold/grid3/models/computecapacity.v
Normal file
@@ -0,0 +1,16 @@
|
||||
module models
|
||||
|
||||
pub struct ComputeCapacity {
|
||||
pub mut:
|
||||
// cpu cores
|
||||
cpu u8
|
||||
// memory in bytes, minimal 100 MB
|
||||
memory i64
|
||||
}
|
||||
|
||||
pub fn (mut c ComputeCapacity) challenge() string {
|
||||
mut out := ''
|
||||
out += '${c.cpu}'
|
||||
out += '${c.memory}'
|
||||
return out
|
||||
}
|
||||
188
lib/threefold/grid3/models/deployment.v
Normal file
188
lib/threefold/grid3/models/deployment.v
Normal file
@@ -0,0 +1,188 @@
|
||||
module models
|
||||
|
||||
import crypto.md5
|
||||
import json
|
||||
|
||||
pub struct SignatureRequest {
|
||||
pub mut:
|
||||
// unique id as used in TFGrid DB
|
||||
twin_id u32
|
||||
// if put on required then this twin_id needs to sign
|
||||
required bool
|
||||
// signing weight
|
||||
weight int
|
||||
}
|
||||
|
||||
// Challenge computes challenge for SignatureRequest
|
||||
pub fn (request SignatureRequest) challenge() string {
|
||||
mut out := []string{}
|
||||
out << '${request.twin_id}'
|
||||
out << '${request.required}'
|
||||
out << '${request.weight}'
|
||||
|
||||
return out.join('')
|
||||
}
|
||||
|
||||
pub struct Signature {
|
||||
pub mut:
|
||||
// unique id as used in TFGrid DB
|
||||
twin_id u32
|
||||
// signature (done with private key of the twin_id)
|
||||
signature string
|
||||
signature_type string
|
||||
}
|
||||
|
||||
pub struct SignatureRequirement {
|
||||
pub mut:
|
||||
// the requests which can allow to get to required quorum
|
||||
requests []SignatureRequest
|
||||
// minimal weight which needs to be achieved to let this workload become valid
|
||||
weight_required int
|
||||
signatures []Signature
|
||||
signature_style string
|
||||
}
|
||||
|
||||
// Challenge computes challenge for SignatureRequest
|
||||
pub fn (requirement SignatureRequirement) challenge() string {
|
||||
mut out := []string{}
|
||||
|
||||
for request in requirement.requests {
|
||||
out << request.challenge()
|
||||
}
|
||||
|
||||
out << '${requirement.weight_required}'
|
||||
out << '${requirement.signature_style}'
|
||||
return out.join('')
|
||||
}
|
||||
|
||||
// deployment is given to each Zero-OS who needs to deploy something
|
||||
// the zero-os'es will only take out what is relevant for them
|
||||
// if signature not done on the main Deployment one, nothing will happen
|
||||
@[heap]
|
||||
pub struct Deployment {
|
||||
pub mut:
|
||||
// increments for each new interation of this model
|
||||
// signature needs to be achieved when version goes up
|
||||
version u32 = 1
|
||||
// the twin who is responsible for this deployment
|
||||
twin_id u32
|
||||
// each deployment has unique id (in relation to originator)
|
||||
contract_id u64
|
||||
// when the full workload will stop working
|
||||
// default, 0 means no expiration
|
||||
expiration i64
|
||||
metadata string
|
||||
description string
|
||||
// list of all worklaods
|
||||
workloads []Workload
|
||||
|
||||
signature_requirement SignatureRequirement
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DeploymentArgs {
|
||||
pub:
|
||||
version ?u32
|
||||
twin_id u32
|
||||
contract_id u64
|
||||
expiration ?i64
|
||||
metadata DeploymentData
|
||||
description ?string
|
||||
workloads []Workload
|
||||
signature_requirement SignatureRequirement
|
||||
}
|
||||
|
||||
pub fn (deployment Deployment) challenge() string {
|
||||
// we need to scape `"` with `\"`char when sending the payload to be a valid json but when calculating the challenge we should remove `\` so we don't get invlaid signature
|
||||
metadata := deployment.metadata.replace('\\"', '"')
|
||||
mut out := []string{}
|
||||
out << '${deployment.version}'
|
||||
out << '${deployment.twin_id}'
|
||||
out << '${metadata}'
|
||||
out << '${deployment.description}'
|
||||
out << '${deployment.expiration}'
|
||||
for workload in deployment.workloads {
|
||||
out << workload.challenge()
|
||||
}
|
||||
out << deployment.signature_requirement.challenge()
|
||||
ret := out.join('')
|
||||
return ret
|
||||
}
|
||||
|
||||
// ChallengeHash computes the hash of the challenge signed
|
||||
// by the user. used for validation
|
||||
pub fn (deployment Deployment) challenge_hash() []u8 {
|
||||
return md5.sum(deployment.challenge().bytes())
|
||||
}
|
||||
|
||||
pub fn (mut d Deployment) add_signature(twin u32, signature string) {
|
||||
for mut sig in d.signature_requirement.signatures {
|
||||
if sig.twin_id == twin {
|
||||
sig.signature = signature
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.signature_requirement.signatures << Signature{
|
||||
twin_id: twin
|
||||
signature: signature
|
||||
signature_type: 'sr25519'
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut d Deployment) json_encode() string {
|
||||
mut encoded_workloads := []string{}
|
||||
for mut w in d.workloads {
|
||||
encoded_workloads << w.json_encode()
|
||||
}
|
||||
|
||||
workloads := '[${encoded_workloads.join(',')}]'
|
||||
return '{"version":${d.version},"twin_id":${d.twin_id},"contract_id":${d.contract_id},"expiration":${d.expiration},"metadata":"${d.metadata}","description":"${d.description}","workloads":${workloads},"signature_requirement":${json.encode(d.signature_requirement)}}'
|
||||
}
|
||||
|
||||
pub fn (dl Deployment) count_public_ips() u8 {
|
||||
mut count := u8(0)
|
||||
for wl in dl.workloads {
|
||||
if wl.type_ == workload_types.public_ip {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
pub fn new_deployment(args DeploymentArgs) Deployment {
|
||||
return Deployment{
|
||||
version: args.version or { 0 }
|
||||
twin_id: args.twin_id
|
||||
contract_id: args.contract_id
|
||||
expiration: args.expiration or { 0 }
|
||||
metadata: args.metadata.json_encode()
|
||||
description: args.description or { '' }
|
||||
workloads: args.workloads
|
||||
signature_requirement: args.signature_requirement
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DeploymentData {
|
||||
pub:
|
||||
type_ string @[json: 'type']
|
||||
name string
|
||||
project_name string @[json: 'projectName']
|
||||
}
|
||||
|
||||
pub fn (data DeploymentData) json_encode() string {
|
||||
return "{\\\"type\\\":\\\"${data.type_}\\\",\\\"name\\\":\\\"${data.name}\\\",\\\"projectName\\\":\\\"${data.project_name}\\\"}"
|
||||
}
|
||||
|
||||
pub fn (mut dl Deployment) add_metadata(type_ string, project_name string) {
|
||||
mut data := DeploymentData{
|
||||
type_: type_
|
||||
name: project_name
|
||||
project_name: '${type_}/${project_name}' // To be listed in the dashboard.
|
||||
}
|
||||
dl.metadata = data.json_encode()
|
||||
}
|
||||
|
||||
pub fn (mut d Deployment) parse_metadata() !DeploymentData {
|
||||
return json.decode(DeploymentData, d.metadata)!
|
||||
}
|
||||
35
lib/threefold/grid3/models/gw_fqdn.v
Normal file
35
lib/threefold/grid3/models/gw_fqdn.v
Normal file
@@ -0,0 +1,35 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub struct GatewayFQDNProxy {
|
||||
pub:
|
||||
tls_passthrough bool
|
||||
backends []string // The backends of the gateway proxy. must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port]
|
||||
network ?string // Network name to join, if backend IP is private.
|
||||
fqdn string // The fully qualified domain name of the deployed workload.
|
||||
}
|
||||
|
||||
pub fn (g GatewayFQDNProxy) challenge() string {
|
||||
mut output := ''
|
||||
output += g.fqdn
|
||||
output += '${g.tls_passthrough}'
|
||||
for b in g.backends {
|
||||
output += b
|
||||
}
|
||||
output += g.network or { '' }
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
pub fn (g GatewayFQDNProxy) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.gateway_fqdn
|
||||
data: json.encode(g)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
41
lib/threefold/grid3/models/gw_name.v
Normal file
41
lib/threefold/grid3/models/gw_name.v
Normal file
@@ -0,0 +1,41 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub struct GatewayNameProxy {
|
||||
pub:
|
||||
tls_passthrough bool
|
||||
backends []string // The backends of the gateway proxy. must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port]
|
||||
network ?string // Network name to join, if backend IP is private.
|
||||
name string // Domain prefix. The fqdn will be <name>.<gateway-domain>. This has to be unique within the deployment. Must contain only alphanumeric and underscore characters.
|
||||
}
|
||||
|
||||
pub fn (g GatewayNameProxy) challenge() string {
|
||||
mut output := ''
|
||||
output += g.name
|
||||
output += '${g.tls_passthrough}'
|
||||
for b in g.backends {
|
||||
output += b
|
||||
}
|
||||
output += g.network or { '' }
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// GatewayProxyResult results
|
||||
pub struct GatewayProxyResult {
|
||||
pub mut:
|
||||
fqdn string
|
||||
}
|
||||
|
||||
pub fn (g GatewayNameProxy) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.gateway_name
|
||||
data: json.encode(g)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
37
lib/threefold/grid3/models/ip.v
Normal file
37
lib/threefold/grid3/models/ip.v
Normal file
@@ -0,0 +1,37 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub struct PublicIP {
|
||||
pub:
|
||||
v4 bool
|
||||
v6 bool
|
||||
}
|
||||
|
||||
pub fn (p PublicIP) challenge() string {
|
||||
mut output := ''
|
||||
output += '${p.v4}'
|
||||
output += '${p.v6}'
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// PublicIPResult result returned by publicIP reservation
|
||||
struct PublicIPResult {
|
||||
pub mut:
|
||||
ip string
|
||||
ip6 string
|
||||
gateway string
|
||||
}
|
||||
|
||||
pub fn (p PublicIP) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.public_ip
|
||||
data: json.encode(p)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
52
lib/threefold/grid3/models/qsfs.v
Normal file
52
lib/threefold/grid3/models/qsfs.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module models
|
||||
|
||||
pub struct QuantumSafeFS {
|
||||
cache u64
|
||||
config QuantumSafeFSConfig
|
||||
}
|
||||
|
||||
pub struct QuantumSafeFSConfig {
|
||||
minimal_shards u32
|
||||
expected_shards u32
|
||||
redundant_groups u32
|
||||
redundant_nodes u32
|
||||
max_zdb_data_dir_size u32
|
||||
encryption Encryption
|
||||
meta QuantumSafeMeta
|
||||
goups []ZDBGroup
|
||||
compression QuantumCompression
|
||||
}
|
||||
|
||||
pub struct Encryption {
|
||||
algorithm string = 'AES' // configuration to use for the encryption stage. Currently only AES is supported.
|
||||
key []u8 // 64 long hex encoded encryption key (e.g. 0000000000000000000000000000000000000000000000000000000000000000).
|
||||
}
|
||||
|
||||
pub struct QuantumSafeMeta {
|
||||
type_ string = 'ZDB' @[json: 'type'] // configuration for the metadata store to use, currently only ZDB is supported.
|
||||
config QuantumSafeConfig
|
||||
}
|
||||
|
||||
pub struct ZDBGroup {
|
||||
backends []ZDBBackend
|
||||
}
|
||||
|
||||
pub struct ZDBBackend {
|
||||
address string // Address of backend ZDB (e.g. [300:a582:c60c:df75:f6da:8a92:d5ed:71ad]:9900 or 60.60.60.60:9900).
|
||||
namespace string // ZDB namespace.
|
||||
password string // Namespace password.
|
||||
}
|
||||
|
||||
pub struct QuantumCompression {
|
||||
algorithm string = 'snappy' // configuration to use for the compression stage. Currently only snappy is supported.
|
||||
}
|
||||
|
||||
pub struct QuantumSafeConfig {
|
||||
prefix string // Data stored on the remote metadata is prefixed with.
|
||||
encryption Encryption
|
||||
backends []ZDBBackend
|
||||
}
|
||||
|
||||
pub fn (qsfs QuantumSafeFS) challenge() string {
|
||||
return ''
|
||||
}
|
||||
166
lib/threefold/grid3/models/workload.v
Normal file
166
lib/threefold/grid3/models/workload.v
Normal file
@@ -0,0 +1,166 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
import crypto.md5
|
||||
|
||||
pub struct WorkloadTypes {
|
||||
pub:
|
||||
zmachine string = 'zmachine'
|
||||
zmount string = 'zmount'
|
||||
network string = 'network'
|
||||
zdb string = 'zdb'
|
||||
public_ip string = 'ip'
|
||||
qsfs string = 'qsfs'
|
||||
gateway_name string = 'gateway-name-proxy'
|
||||
gateway_fqdn string = 'gateway-fqdn-proxy'
|
||||
zlogs string = 'zlogs'
|
||||
}
|
||||
|
||||
pub const workload_types = WorkloadTypes{}
|
||||
|
||||
type WorkloadType = string
|
||||
|
||||
pub struct ResultStates {
|
||||
pub:
|
||||
error ResultState = 'error'
|
||||
ok ResultState = 'ok'
|
||||
deleted ResultState = 'deleted'
|
||||
}
|
||||
|
||||
pub const result_states = ResultStates{}
|
||||
|
||||
type ResultState = string
|
||||
|
||||
pub fn challenge(data string, type_ string) !string {
|
||||
match type_ {
|
||||
workload_types.zmount {
|
||||
mut w := json.decode(Zmount, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.network {
|
||||
mut w := json.decode(Znet, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.zdb {
|
||||
mut w := json.decode(Zdb, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.zmachine {
|
||||
mut w := json.decode(Zmachine, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.qsfs {
|
||||
mut w := json.decode(QuantumSafeFS, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.public_ip {
|
||||
mut w := json.decode(PublicIP, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.gateway_name {
|
||||
mut w := json.decode(GatewayNameProxy, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.gateway_fqdn {
|
||||
mut w := json.decode(GatewayFQDNProxy, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.zlogs {
|
||||
mut w := json.decode(ZLogs, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
else {
|
||||
return ''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Right {
|
||||
restart
|
||||
delete
|
||||
stats
|
||||
logs
|
||||
}
|
||||
|
||||
// Access Control Entry
|
||||
pub struct ACE {
|
||||
// the administrator twin id
|
||||
twin_ids []int
|
||||
rights []Right
|
||||
}
|
||||
|
||||
pub struct WorkloadResult {
|
||||
pub mut:
|
||||
created i64
|
||||
state ResultState
|
||||
error string
|
||||
data string @[raw] // also json.RawMessage
|
||||
message string
|
||||
}
|
||||
|
||||
pub struct Workload {
|
||||
pub mut:
|
||||
version u32
|
||||
// unique name per Deployment
|
||||
name string
|
||||
type_ WorkloadType @[json: 'type']
|
||||
// this should be something like json.RawMessage in golang
|
||||
data string @[raw] // serialize({size: 10}) ---> "data": {size:10},
|
||||
metadata string
|
||||
description string
|
||||
// list of Access Control Entries
|
||||
// what can an administrator do
|
||||
// not implemented in zos
|
||||
// acl []ACE
|
||||
|
||||
result WorkloadResult
|
||||
}
|
||||
|
||||
pub fn (workload Workload) challenge() string {
|
||||
mut out := []string{}
|
||||
out << '${workload.version}'
|
||||
out << '${workload.name}'
|
||||
out << '${workload.type_}'
|
||||
out << '${workload.metadata}'
|
||||
out << '${workload.description}'
|
||||
out << challenge(workload.data, workload.type_) or { return out.join('') }
|
||||
|
||||
return out.join('')
|
||||
}
|
||||
|
||||
pub fn (workload Workload) challenge_hash() []u8 {
|
||||
return md5.sum(workload.challenge().bytes())
|
||||
}
|
||||
|
||||
pub fn (mut w Workload) json_encode() string {
|
||||
return '{"version":${w.version},"name":"${w.name}","type":"${w.type_}","data":${w.data},"metadata":"${w.metadata}","description":"${w.description}"}'
|
||||
}
|
||||
|
||||
type WorkloadData = GatewayFQDNProxy
|
||||
| GatewayNameProxy
|
||||
| PublicIP
|
||||
| QuantumSafeFS
|
||||
| ZLogs
|
||||
| Zdb
|
||||
| Zmachine
|
||||
| Zmount
|
||||
| Znet
|
||||
type WorkloadDataResult = GatewayProxyResult
|
||||
| PublicIPResult
|
||||
| ZdbResult
|
||||
| ZmachineResult
|
||||
| ZmountResult
|
||||
|
||||
// pub fn(mut w WorkloadData) challenge() string {
|
||||
// return w.challenge()
|
||||
// }
|
||||
|
||||
@[params]
|
||||
pub struct WorkloadArgs {
|
||||
pub:
|
||||
version ?u32
|
||||
name string
|
||||
description ?string
|
||||
metadata ?string
|
||||
result ?WorkloadResult
|
||||
}
|
||||
61
lib/threefold/grid3/models/zdb.v
Normal file
61
lib/threefold/grid3/models/zdb.v
Normal file
@@ -0,0 +1,61 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub type ZdbMode = string
|
||||
|
||||
pub struct ZdbModes {
|
||||
pub:
|
||||
seq string = 'seq'
|
||||
user string = 'user'
|
||||
}
|
||||
|
||||
pub const zdb_modes = ZdbModes{}
|
||||
|
||||
type DeviceType = string
|
||||
|
||||
pub struct DeviceTypes {
|
||||
pub:
|
||||
hdd string = 'hdd'
|
||||
ssd string = 'ssd'
|
||||
}
|
||||
|
||||
pub const device_types = DeviceTypes{}
|
||||
|
||||
pub struct Zdb {
|
||||
pub mut:
|
||||
// size in bytes
|
||||
size u64
|
||||
mode ZdbMode
|
||||
password string
|
||||
public bool
|
||||
}
|
||||
|
||||
pub fn (mut z Zdb) challenge() string {
|
||||
mut out := ''
|
||||
out += '${z.size}'
|
||||
out += '${z.mode}'
|
||||
out += z.password
|
||||
out += '${z.public}'
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
pub struct ZdbResult {
|
||||
pub mut:
|
||||
namespace string @[json: 'Namespace']
|
||||
ips []string @[json: 'IPs']
|
||||
port u32 @[json: 'Port']
|
||||
}
|
||||
|
||||
pub fn (z Zdb) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.zdb
|
||||
data: json.encode(z)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
29
lib/threefold/grid3/models/zlogs.v
Normal file
29
lib/threefold/grid3/models/zlogs.v
Normal file
@@ -0,0 +1,29 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub struct ZLogs {
|
||||
pub:
|
||||
zmachine string // zmachine name to stream logs of
|
||||
output string // the `target` location to stream the logs to, it must be a redis or web-socket url
|
||||
}
|
||||
|
||||
pub fn (z ZLogs) challenge() string {
|
||||
mut output := ''
|
||||
output += z.zmachine
|
||||
output += z.output
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
pub fn (z ZLogs) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.zlogs
|
||||
data: json.encode(z)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
139
lib/threefold/grid3/models/zmachine.v
Normal file
139
lib/threefold/grid3/models/zmachine.v
Normal file
@@ -0,0 +1,139 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
pub struct Zmachine {
|
||||
pub mut:
|
||||
flist string // if full url means custom flist meant for containers, if just name should be an official vm
|
||||
network ZmachineNetwork
|
||||
size u64 // size of the rootfs disk in bytes
|
||||
compute_capacity ComputeCapacity
|
||||
mounts []Mount
|
||||
entrypoint string // how to invoke that in a vm?
|
||||
env map[string]string // environment for the zmachine
|
||||
corex bool
|
||||
gpu []string
|
||||
}
|
||||
|
||||
pub struct ZmachineNetwork {
|
||||
pub mut:
|
||||
public_ip string // PublicIP optional public IP attached to this machine. If set it must be a valid name of a PublicIP workload in the same deployment
|
||||
interfaces []ZNetworkInterface // Interfaces list of user znets to join
|
||||
planetary bool // Planetary support planetary network
|
||||
mycelium ?MyceliumIP
|
||||
}
|
||||
|
||||
pub struct ZNetworkInterface {
|
||||
pub mut:
|
||||
network string // Network name (znet name) to join
|
||||
ip string // IP of the zmachine on this network must be a valid Ip in the selected network
|
||||
}
|
||||
|
||||
pub struct MyceliumIP {
|
||||
pub mut:
|
||||
network string
|
||||
hex_seed string
|
||||
}
|
||||
|
||||
pub fn (mut n ZmachineNetwork) challenge() string {
|
||||
mut out := ''
|
||||
out += n.public_ip
|
||||
out += n.planetary.str()
|
||||
|
||||
for iface in n.interfaces {
|
||||
out += iface.network
|
||||
out += iface.ip
|
||||
}
|
||||
|
||||
if m := n.mycelium {
|
||||
out += m.challenge()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
pub fn (m MyceliumIP) challenge() string {
|
||||
mut out := ''
|
||||
out += m.network
|
||||
out += m.hex_seed
|
||||
return out
|
||||
}
|
||||
|
||||
pub struct Mount {
|
||||
pub mut:
|
||||
name string
|
||||
mountpoint string // the path to mount the disk into e.g. '/disk1'
|
||||
}
|
||||
|
||||
pub fn (mut m Mount) challenge() string {
|
||||
mut out := ''
|
||||
out += m.name
|
||||
out += m.mountpoint
|
||||
return out
|
||||
}
|
||||
|
||||
pub fn (mut m Zmachine) challenge() string {
|
||||
mut out := ''
|
||||
|
||||
out += m.flist
|
||||
out += m.network.challenge()
|
||||
out += '${m.size}'
|
||||
out += m.compute_capacity.challenge()
|
||||
|
||||
for mut mnt in m.mounts {
|
||||
out += mnt.challenge()
|
||||
}
|
||||
out += m.entrypoint
|
||||
|
||||
mut keys := m.env.keys()
|
||||
keys.sort()
|
||||
for key in keys {
|
||||
out += key
|
||||
out += '='
|
||||
out += m.env[key]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// response of the deployment
|
||||
pub struct ZmachineResult {
|
||||
pub mut:
|
||||
// name unique per deployment, re-used in request & response
|
||||
id string
|
||||
ip string
|
||||
planetary_ip string
|
||||
mycelium_ip string
|
||||
console_url string
|
||||
}
|
||||
|
||||
pub fn (z Zmachine) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.zmachine
|
||||
data: json.encode(z)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
|
||||
// VM struct used to deploy machine in a high level manner
|
||||
pub struct VM {
|
||||
pub:
|
||||
name string = 'myvm'
|
||||
flist string = 'https://hub.grid.tf/tf-official-apps/base:latest.flist'
|
||||
entrypoint string = '/sbin/zinit init'
|
||||
env_vars map[string]string
|
||||
cpu int = 1
|
||||
memory int = 1024
|
||||
rootfs_size int
|
||||
}
|
||||
|
||||
pub fn (vm VM) json_encode() string {
|
||||
mut env_vars := []string{}
|
||||
for k, v in vm.env_vars {
|
||||
env_vars << '"${k}": "${v}"'
|
||||
}
|
||||
|
||||
return '{"name":"${vm.name}","flist":"${vm.flist}","entrypoint":"${vm.entrypoint}","env_vars":{${env_vars.join(',')}},"cpu":${vm.cpu},"memory":${vm.memory}, "rootfs_size": ${vm.rootfs_size}}'
|
||||
}
|
||||
32
lib/threefold/grid3/models/zmount.v
Normal file
32
lib/threefold/grid3/models/zmount.v
Normal file
@@ -0,0 +1,32 @@
|
||||
// ssd mounts under zmachine
|
||||
|
||||
module models
|
||||
|
||||
import json
|
||||
|
||||
// ONLY possible on SSD
|
||||
pub struct Zmount {
|
||||
pub mut:
|
||||
size i64 // bytes
|
||||
}
|
||||
|
||||
pub fn (mut mount Zmount) challenge() string {
|
||||
return '${mount.size}'
|
||||
}
|
||||
|
||||
pub struct ZmountResult {
|
||||
pub mut:
|
||||
volume_id string
|
||||
}
|
||||
|
||||
pub fn (z Zmount) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.zmount
|
||||
data: json.encode(z)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
117
lib/threefold/grid3/models/znet.v
Normal file
117
lib/threefold/grid3/models/znet.v
Normal file
@@ -0,0 +1,117 @@
|
||||
module models
|
||||
|
||||
import json
|
||||
import rand
|
||||
// wg network reservation (znet)
|
||||
|
||||
pub struct Znet {
|
||||
pub mut:
|
||||
// unique nr for each network chosen, this identified private networks as connected to a container or vm or ...
|
||||
// corresponds to the 2nd number of a class B ipv4 address
|
||||
// is a class C of a chosen class B
|
||||
// IPV4 subnet for this network resource
|
||||
// this must be a valid subnet of the entire network ip range.
|
||||
// for example 10.1.1.0/24
|
||||
subnet string
|
||||
// IP range of the network, must be an IPv4 /16
|
||||
// for example a 10.1.0.0/16
|
||||
ip_range string
|
||||
// wireguard private key, curve25519
|
||||
wireguard_private_key string // can be generated using `wg genkey` command
|
||||
wireguard_listen_port u16
|
||||
peers []Peer
|
||||
mycelium ?Mycelium
|
||||
}
|
||||
|
||||
pub struct Mycelium {
|
||||
pub mut:
|
||||
hex_key string
|
||||
peers []string
|
||||
}
|
||||
|
||||
pub fn (mut n Znet) challenge() string {
|
||||
mut out := ''
|
||||
out += n.ip_range
|
||||
out += n.subnet
|
||||
out += n.wireguard_private_key
|
||||
out += n.wireguard_listen_port.str()
|
||||
for mut p in n.peers {
|
||||
out += p.challenge()
|
||||
}
|
||||
|
||||
if m := n.mycelium {
|
||||
out += m.challenge()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
pub fn (m Mycelium) challenge() string {
|
||||
mut out := ''
|
||||
out += m.hex_key
|
||||
for p in m.peers {
|
||||
out += p
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// is a remote wireguard client which can connect to this node
|
||||
pub struct Peer {
|
||||
pub mut:
|
||||
subnet string // IPV4 subnet of the network resource of the peer
|
||||
// WGPublicKey of the peer (driven from its private key)
|
||||
wireguard_public_key string // can be generated by `echo <PRIVATE_KEY> | wg pubkey` command
|
||||
// is ipv4 or ipv6 address from a wireguard client who connects
|
||||
// this should be the node's subnet and the wireguard routing ip that should start with `100.64`
|
||||
// then the 2nd and 3rd part of the node's subnet
|
||||
// e.g. ["10.20.2.0/24", "100.64.20.2/32"]
|
||||
allowed_ips []string
|
||||
// Entrypoint of the peer; ipv4 or ipv6,
|
||||
// can be empty, one of the 2 need to be filled in though
|
||||
// e.g. [2a10:b600:0:9:225:90ff:fe82:7130]:7777
|
||||
endpoint string
|
||||
}
|
||||
|
||||
pub struct PublicConfig {
|
||||
pub:
|
||||
type_ string // Type define if we need to use the Vlan field or the MacVlan
|
||||
ipv4 string
|
||||
ipv6 string
|
||||
gw4 string
|
||||
gw6 string
|
||||
domain string // Domain is the node domain name e.g. gent01.devnet.grid.tf
|
||||
}
|
||||
|
||||
pub fn (mut p Peer) challenge() string {
|
||||
mut out := ''
|
||||
out += p.wireguard_public_key
|
||||
out += p.endpoint
|
||||
out += p.subnet
|
||||
|
||||
for ip in p.allowed_ips {
|
||||
out += ip
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
pub fn (z Znet) to_workload(args WorkloadArgs) Workload {
|
||||
return Workload{
|
||||
version: args.version or { 0 }
|
||||
name: args.name
|
||||
type_: workload_types.network
|
||||
data: json.encode(z)
|
||||
metadata: args.metadata or { '' }
|
||||
description: args.description or { '' }
|
||||
result: args.result or { WorkloadResult{} }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rand_port(takenPorts []u16) !u16 {
|
||||
mut port := u16(rand.u32n(u32(6000))! + 2000)
|
||||
|
||||
for takenPorts.any(it == port) {
|
||||
port = u16(rand.u32n(u32(6000))! + 2000)
|
||||
}
|
||||
return port
|
||||
}
|
||||
32
lib/threefold/grid3/rmb/model_rmb.v
Normal file
32
lib/threefold/grid3/rmb/model_rmb.v
Normal file
@@ -0,0 +1,32 @@
|
||||
module rmb
|
||||
|
||||
pub struct RmbMessage {
|
||||
pub mut:
|
||||
ver int = 1
|
||||
cmd string
|
||||
src string
|
||||
ref string
|
||||
exp u64
|
||||
dat string
|
||||
dst []u32
|
||||
ret string
|
||||
now u64
|
||||
shm string
|
||||
}
|
||||
|
||||
pub struct RmbError {
|
||||
pub mut:
|
||||
code int
|
||||
message string
|
||||
}
|
||||
|
||||
pub struct RmbResponse {
|
||||
pub mut:
|
||||
ver int = 1
|
||||
ref string // todo: define
|
||||
dat string
|
||||
dst string // todo: define what is this
|
||||
now u64
|
||||
shm string // todo: what is this?
|
||||
err RmbError
|
||||
}
|
||||
34
lib/threefold/grid3/rmb/readme.md
Normal file
34
lib/threefold/grid3/rmb/readme.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# RMB
|
||||
|
||||
Reliable Message Bus
|
||||
|
||||
Can talk to ZOS'es, ...
|
||||
|
||||
## requirements
|
||||
|
||||
We need client to rmb-rs
|
||||
|
||||
compile rmb-rs (see below)
|
||||
|
||||
```bash
|
||||
rmb-peer --mnemonics "$(cat mnemonic.txt)" --relay wss://relay.dev.grid.tf:443 --substrate wss://tfchain.dev.grid.tf:443
|
||||
|
||||
#OR:
|
||||
|
||||
export TFCHAINSECRET='something here'
|
||||
|
||||
rmb-peer --mnemonics "$TFCHAINSECRET" --relay wss://relay.dev.grid.tf:443 --substrate wss://tfchain.dev.grid.tf:443
|
||||
|
||||
```
|
||||
|
||||
### for developers
|
||||
|
||||
more info see https://github.com/threefoldtech/rmb-rs
|
||||
the message format of RMB itself https://github.com/threefoldtech/rmb-rs/blob/main/proto/types.proto
|
||||
|
||||
|
||||
> TODO: implement each endpoint on the zerohub here at client
|
||||
|
||||
> TODO: the original code comes from code/github/threefoldtech/farmerbot/farmerbot/system/zos.v
|
||||
|
||||
|
||||
30
lib/threefold/grid3/rmb/rmb_calls_zos.v
Normal file
30
lib/threefold/grid3/rmb/rmb_calls_zos.v
Normal file
@@ -0,0 +1,30 @@
|
||||
module rmb
|
||||
|
||||
import encoding.base64
|
||||
import json
|
||||
|
||||
// if true the ZOS has a public ip address
|
||||
pub fn (mut z RMBClient) zos_has_public_ipaddr(dst u32) !bool {
|
||||
response := z.rmb_request('zos.network.public_config_get', dst, '')!
|
||||
if response.err.message != '' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
pub fn (mut z RMBClient) get_zos_system_version(dst u32) !string {
|
||||
response := z.rmb_request('zos.system.version', dst, '')!
|
||||
if response.err.message != '' {
|
||||
return error('${response.err.message}')
|
||||
}
|
||||
return base64.decode_str(response.dat)
|
||||
}
|
||||
|
||||
// TODO: point to documentation where it explains what this means, what is zos_wg_port and why do we need it
|
||||
pub fn (mut z RMBClient) get_zos_wg_ports(dst u32) ![]u16 {
|
||||
response := z.rmb_request('zos.network.list_wg_ports', dst, '')!
|
||||
if response.err.message != '' {
|
||||
return error('${response.err.message}')
|
||||
}
|
||||
return json.decode([]u16, base64.decode_str(response.dat))
|
||||
}
|
||||
29
lib/threefold/grid3/rmb/rmb_calls_zos_statistics.v
Normal file
29
lib/threefold/grid3/rmb/rmb_calls_zos_statistics.v
Normal file
@@ -0,0 +1,29 @@
|
||||
module rmb
|
||||
|
||||
import json
|
||||
import encoding.base64
|
||||
|
||||
pub struct ZosResources {
|
||||
pub mut:
|
||||
cru u64
|
||||
sru u64
|
||||
hru u64
|
||||
mru u64
|
||||
ipv4u u64
|
||||
}
|
||||
|
||||
pub struct ZosResourcesStatistics {
|
||||
pub mut:
|
||||
total ZosResources
|
||||
used ZosResources
|
||||
system ZosResources
|
||||
}
|
||||
|
||||
// get zos statistic from a node, nodeid is the parameter
|
||||
pub fn (mut z RMBClient) get_zos_statistics(dst u32) !ZosResourcesStatistics {
|
||||
response := z.rmb_client_request('zos.statistics.get', dst, '')!
|
||||
if response.err.message != '' {
|
||||
return error('${response.err.message}')
|
||||
}
|
||||
return json.decode(ZosResourcesStatistics, base64.decode_str(response.dat))!
|
||||
}
|
||||
42
lib/threefold/grid3/rmb/rmb_calls_zos_storagepools.v
Normal file
42
lib/threefold/grid3/rmb/rmb_calls_zos_storagepools.v
Normal file
@@ -0,0 +1,42 @@
|
||||
module rmb
|
||||
|
||||
import json
|
||||
import encoding.base64
|
||||
|
||||
struct ZosPoolJSON {
|
||||
mut:
|
||||
name string
|
||||
pool_type string @[json: 'type'] // TODO: this should be an enum and we need to define what it is
|
||||
size int // TODO: what does it mean? used how much what type?
|
||||
used int // TODO: what does it mean? used how much what type?
|
||||
}
|
||||
|
||||
pub struct ZosPool {
|
||||
pub mut:
|
||||
name string
|
||||
pool_type PoolType
|
||||
size int
|
||||
used int
|
||||
}
|
||||
|
||||
enum PoolType {
|
||||
dontknow // TODO:
|
||||
}
|
||||
|
||||
// get storage pools from a zos, the argument is u32 address of the zos
|
||||
pub fn (mut z RMBClient) get_storage_pools(dst u32) ![]ZosPool {
|
||||
response := z.rmb_client_request('zos.storage.pools', dst, '')!
|
||||
if response.err.message != '' {
|
||||
return error('${response.err.message}')
|
||||
}
|
||||
objs := json.decode([]ZosPoolJSON, base64.decode_str(response.dat))
|
||||
_ := []ZosPool{}
|
||||
for o in objs {
|
||||
res = ZosPool{
|
||||
name: o.name
|
||||
size: o.size
|
||||
used: o.used
|
||||
pool_type: .dontknow // TODO
|
||||
}
|
||||
}
|
||||
}
|
||||
80
lib/threefold/grid3/rmb/rmb_client.v
Normal file
80
lib/threefold/grid3/rmb/rmb_client.v
Normal file
@@ -0,0 +1,80 @@
|
||||
module rmb
|
||||
|
||||
// import freeflowuniverse.herolib.core.httpconnection
|
||||
import freeflowuniverse.herolib.core.redisclient { RedisURL }
|
||||
import os
|
||||
|
||||
pub struct RMBClient {
|
||||
pub mut:
|
||||
relay_url string
|
||||
tfchain_url string
|
||||
tfchain_mnemonic string
|
||||
redis &redisclient.Redis @[str: skip]
|
||||
}
|
||||
|
||||
pub enum TFNetType {
|
||||
unspecified
|
||||
main
|
||||
test
|
||||
dev
|
||||
qa
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct RMBClientArgs {
|
||||
pub:
|
||||
nettype TFNetType
|
||||
relay_url string
|
||||
tfchain_url string
|
||||
}
|
||||
|
||||
// params
|
||||
// relay_url string
|
||||
// TFNetType, default not specified, can chose unspecified, main, test, dev, qa
|
||||
// tfchain_url string= e.g. "wss://relay.dev.grid.tf:443" OPTIONAL
|
||||
// tfchain_mnemonic string= e.g. "wss://tfchain.dev.grid.tf:443" OPTIONAL
|
||||
pub fn new(args_ RMBClientArgs) !RMBClient {
|
||||
mut args := args_
|
||||
if tfchain_mnemonic == '' {
|
||||
if 'TFCHAINSECRET' in os.environ {
|
||||
args.tfchain_mnemonic = os.environ['TFCHAINSECRET']
|
||||
} else {
|
||||
return error('need to specify TFCHAINSECRET (menomics for TFChain) as env argument or inside client')
|
||||
}
|
||||
}
|
||||
if args.nettype == .main {
|
||||
args.relay_url = 'wss://relay.grid.tf:443'
|
||||
args.tfchain_url = 'wss://tfchain.grid.tf:443'
|
||||
}
|
||||
if args.nettype == .test {
|
||||
args.relay_url = 'wss://relay.test.grid.tf:443'
|
||||
args.tfchain_url = 'wss://tfchain.test.grid.tf:443'
|
||||
}
|
||||
if args.nettype == .dev {
|
||||
args.relay_url = 'wss://relay.dev.grid.tf:443'
|
||||
args.tfchain_url = 'wss://tfchain.dev.grid.tf:443'
|
||||
}
|
||||
if args.nettype == .qa {
|
||||
args.relay_url = 'wss://relay.qa.grid.tf:443'
|
||||
args.tfchain_url = 'wss://tfchain.qa.grid.tf:443'
|
||||
}
|
||||
|
||||
mut redis := redisclient.core_get(RedisURL{})!
|
||||
|
||||
mut cl := RMBClient{
|
||||
redis: redis
|
||||
relay_url: args.relay_url
|
||||
tfchain_url: args.tfchain_url
|
||||
tfchain_mnemonic: args.tfchain_mnemonic
|
||||
}
|
||||
if args.relay_url == '' || args.tfchain_url == '' {
|
||||
return error('need to specify relay_url and tfchain_url.')
|
||||
}
|
||||
if args.tfchain_mnemonic.len < 20 {
|
||||
return error('need to specify tfchain mnemonic, now too short.')
|
||||
}
|
||||
|
||||
// TODO: there should be a check here that rmb peer is accessible and working
|
||||
|
||||
return cl
|
||||
}
|
||||
23
lib/threefold/grid3/rmb/rmb_request.v
Normal file
23
lib/threefold/grid3/rmb/rmb_request.v
Normal file
@@ -0,0 +1,23 @@
|
||||
module rmb
|
||||
|
||||
import encoding.base64
|
||||
import time
|
||||
import json
|
||||
|
||||
// cmd is e.g.
|
||||
pub fn (mut z RMBClient) rmb_request(cmd string, dst u32, payload string) !RmbResponse {
|
||||
msg := RmbMessage{
|
||||
ver: 1
|
||||
cmd: cmd
|
||||
exp: 5
|
||||
dat: base64.encode_str(payload)
|
||||
dst: [dst]
|
||||
ret: rand.uuid_v4()
|
||||
now: u64(time.now().unix())
|
||||
}
|
||||
request := json.encode_pretty(msg)
|
||||
z.redis.lpush('msgbus.system.local', request)!
|
||||
response_json := z.redis.blpop(msg.ret, 5)!
|
||||
response := json.decode(RmbResponse, response_json[1])!
|
||||
return response
|
||||
}
|
||||
13
lib/threefold/grid3/rmb/rmb_test.v
Normal file
13
lib/threefold/grid3/rmb/rmb_test.v
Normal file
@@ -0,0 +1,13 @@
|
||||
module rmb
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
fn test_main() ? {
|
||||
mut cl := new(nettype: .dev)!
|
||||
|
||||
mut r := cl.get_zos_statistics(1)!
|
||||
|
||||
console.print_debug(r)
|
||||
|
||||
panic('ddd')
|
||||
}
|
||||
3
lib/threefold/grid3/tfrobot/README.md
Normal file
3
lib/threefold/grid3/tfrobot/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# TFRobot
|
||||
|
||||
Wrapper for TFGrid mass deployer `tfrobot`
|
||||
46
lib/threefold/grid3/tfrobot/cancel.v
Normal file
46
lib/threefold/grid3/tfrobot/cancel.v
Normal file
@@ -0,0 +1,46 @@
|
||||
module tfrobot
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.osal
|
||||
|
||||
pub struct CancelConfig {
|
||||
mut:
|
||||
name string @[required]
|
||||
mnemonic string @[required]
|
||||
network Network @[required]
|
||||
node_groups []CancelGroup @[required]
|
||||
}
|
||||
|
||||
pub struct CancelGroup {
|
||||
name string @[required]
|
||||
}
|
||||
|
||||
pub fn (mut robot TFRobot[Config]) cancel(mut config CancelConfig) ! {
|
||||
cfg := robot.config()!
|
||||
if config.mnemonic == '' {
|
||||
config.mnemonic = cfg.mnemonics
|
||||
}
|
||||
config.network = Network.from(cfg.network)!
|
||||
check_cancel_config(config)!
|
||||
|
||||
mut cancel_file := pathlib.get_file(
|
||||
path: '${tfrobot_dir}/deployments/${config.name}_cancel.json'
|
||||
create: true
|
||||
)!
|
||||
|
||||
cancel_file.write(json.encode(config))!
|
||||
osal.exec(
|
||||
cmd: 'tfrobot cancel -c ${cancel_file.path}'
|
||||
stdout: true
|
||||
)!
|
||||
}
|
||||
|
||||
fn check_cancel_config(config CancelConfig) ! {
|
||||
if config.node_groups.len == 0 {
|
||||
return error('No node groups specified to cancel.')
|
||||
}
|
||||
if config.node_groups.any(it.name == '') {
|
||||
return error('Cannot cancel deployment without node_group name.')
|
||||
}
|
||||
}
|
||||
69
lib/threefold/grid3/tfrobot/cancel_test.v
Normal file
69
lib/threefold/grid3/tfrobot/cancel_test.v
Normal file
@@ -0,0 +1,69 @@
|
||||
module tfrobot
|
||||
|
||||
import os
|
||||
import toml
|
||||
|
||||
__global (
|
||||
mnemonics string
|
||||
ssh_key string
|
||||
)
|
||||
|
||||
const test_name = 'cancel_test'
|
||||
const test_flist = 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist'
|
||||
const test_entrypoint = '/usr/local/bin/entrypoint.sh'
|
||||
|
||||
fn testsuite_begin() ! {
|
||||
env := toml.parse_file(os.dir(@FILE) + '/.env') or { toml.Doc{} }
|
||||
mnemonics = os.getenv_opt('TFGRID_MNEMONIC') or {
|
||||
env.value_opt('TFGRID_MNEMONIC') or {
|
||||
panic('TFGRID_MNEMONIC variable should either be set as environment variable or set in .env file for this test')
|
||||
}.string()
|
||||
}
|
||||
ssh_key = os.getenv_opt('SSH_KEY') or {
|
||||
env.value_opt('SSH_KEY') or {
|
||||
panic('SSH_KEY variable should either be set as environment variable or set in .env file for this test')
|
||||
}.string()
|
||||
}
|
||||
}
|
||||
|
||||
fn test_cancel() ! {
|
||||
mut robot := new()!
|
||||
result := robot.deploy(
|
||||
name: '${test_name}_deployment'
|
||||
mnemonic: mnemonics
|
||||
network: .main
|
||||
node_groups: [
|
||||
NodeGroup{
|
||||
name: '${test_name}_group'
|
||||
nodes_count: 1
|
||||
free_cpu: 1
|
||||
free_mru: 256
|
||||
},
|
||||
]
|
||||
vms: [
|
||||
VMConfig{
|
||||
name: '${test_name}_vm'
|
||||
vms_count: 1
|
||||
cpu: 1
|
||||
mem: 256
|
||||
node_group: '${test_name}_group'
|
||||
ssh_key: '${test_name}_key'
|
||||
entry_point: test_entrypoint
|
||||
flist: test_flist
|
||||
},
|
||||
]
|
||||
ssh_keys: {
|
||||
'${test_name}_key': ssh_key
|
||||
}
|
||||
)!
|
||||
|
||||
assert result.ok.keys() == ['${test_name}_group']
|
||||
robot.cancel(
|
||||
name: '${test_name}_deployment'
|
||||
mnemonic: mnemonics
|
||||
network: .main
|
||||
node_groups: [CancelGroup{
|
||||
name: '${test_name}_group'
|
||||
}]
|
||||
)!
|
||||
}
|
||||
184
lib/threefold/grid3/tfrobot/deploy.v
Normal file
184
lib/threefold/grid3/tfrobot/deploy.v
Normal file
@@ -0,0 +1,184 @@
|
||||
module tfrobot
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import json
|
||||
import os
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.osal.sshagent
|
||||
|
||||
const tfrobot_dir = '${os.home_dir()}/hero/tfrobot' // path to tfrobot dir in fs
|
||||
|
||||
pub struct DeployConfig {
|
||||
pub mut:
|
||||
name string
|
||||
mnemonic string
|
||||
network Network = .main
|
||||
node_groups []NodeGroup @[required]
|
||||
vms []VMConfig @[required]
|
||||
ssh_keys map[string]string
|
||||
debug bool
|
||||
}
|
||||
|
||||
pub struct NodeGroup {
|
||||
name string
|
||||
nodes_count int @[required]
|
||||
free_cpu int @[required] // number of logical cores
|
||||
free_mru int @[required] // amount of memory in GB
|
||||
free_ssd int // amount of ssd storage in GB
|
||||
free_hdd int // amount of hdd storage in GB
|
||||
dedicated bool // are nodes dedicated
|
||||
public_ip4 bool
|
||||
public_ip6 bool
|
||||
certified bool // should the nodes be certified(if false the nodes could be certified of diyed)
|
||||
region string // region could be the name of the continents the nodes are located in (africa, americas, antarctic, antarctic ocean, asia, europe, oceania, polar)
|
||||
}
|
||||
|
||||
pub struct VMConfig {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
vms_count int = 1 @[required]
|
||||
node_group string
|
||||
cpu int = 4 @[required]
|
||||
mem int = 4 @[required] // in GB
|
||||
public_ip4 bool
|
||||
public_ip6 bool
|
||||
ygg_ip bool = true
|
||||
mycelium_ip bool = true
|
||||
flist string @[required]
|
||||
entry_point string @[required]
|
||||
root_size int = 20
|
||||
ssh_key string
|
||||
env_vars map[string]string
|
||||
}
|
||||
|
||||
pub struct DeployResult {
|
||||
pub:
|
||||
ok map[string][]VMOutput
|
||||
error map[string]string
|
||||
}
|
||||
|
||||
pub struct VMOutput {
|
||||
pub mut:
|
||||
name string @[json: 'Name'; required]
|
||||
network_name string @[json: 'NetworkName'; required]
|
||||
node_group string
|
||||
deployment_name string
|
||||
public_ip4 string @[json: 'PublicIP4'; required]
|
||||
public_ip6 string @[json: 'PublicIP6'; required]
|
||||
yggdrasil_ip string @[json: 'YggIP'; required]
|
||||
mycelium_ip string @[json: 'MyceliumIP'; required]
|
||||
ip string @[json: 'IP'; required]
|
||||
mounts []Mount @[json: 'Mounts'; required]
|
||||
node_id u32 @[json: 'NodeID']
|
||||
contract_id u64 @[json: 'ContractID']
|
||||
}
|
||||
|
||||
pub struct Mount {
|
||||
pub:
|
||||
disk_name string
|
||||
mount_point string
|
||||
}
|
||||
|
||||
// get all keys from ssh_agent and add to the config
|
||||
pub fn sshagent_keys_add(mut config DeployConfig) ! {
|
||||
mut ssha := sshagent.new()!
|
||||
if ssha.keys.len == 0 {
|
||||
return error('no ssh-keys found in ssh-agent, cannot add to tfrobot deploy config.')
|
||||
}
|
||||
for mut key in ssha.keys_loaded()! {
|
||||
config.ssh_keys[key.name] = key.keypub()!.trim('\n')
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut robot TFRobot[Config]) deploy(config_ DeployConfig) !DeployResult {
|
||||
mut config := config_
|
||||
cfg := robot.config()!
|
||||
if config.mnemonic == '' {
|
||||
config.mnemonic = cfg.mnemonics
|
||||
}
|
||||
config.network = Network.from(cfg.network)!
|
||||
|
||||
if config.ssh_keys.len == 0 {
|
||||
return error('no ssh-keys found in config')
|
||||
}
|
||||
|
||||
if config.node_groups.len == 0 {
|
||||
return error('there are no node requirement groups defined')
|
||||
}
|
||||
|
||||
node_group := config.node_groups.first().name
|
||||
|
||||
for mut vm in config.vms {
|
||||
if vm.ssh_key.len == 0 {
|
||||
vm.ssh_key = config.ssh_keys.keys().first() // first one of the dict
|
||||
}
|
||||
if vm.ssh_key !in config.ssh_keys {
|
||||
return error('Could not find specified sshkey: ${vm.ssh_key} in known sshkeys.\n${config.ssh_keys.values()}')
|
||||
}
|
||||
if vm.node_group == '' {
|
||||
vm.node_group = node_group
|
||||
}
|
||||
}
|
||||
|
||||
check_deploy_config(config)!
|
||||
|
||||
mut config_file := pathlib.get_file(
|
||||
path: '${tfrobot_dir}/deployments/${config.name}_config.json'
|
||||
create: true
|
||||
)!
|
||||
mut output_file := pathlib.get_file(
|
||||
path: '${tfrobot_dir}/deployments/${config.name}_output.json'
|
||||
create: false
|
||||
)!
|
||||
config_json := json.encode(config)
|
||||
config_file.write(config_json)!
|
||||
cmd := 'tfrobot deploy -c ${config_file.path} -o ${output_file.path}'
|
||||
if config.debug {
|
||||
console.print_debug(config.str())
|
||||
console.print_debug(cmd)
|
||||
}
|
||||
_ := osal.exec(
|
||||
cmd: cmd
|
||||
stdout: true
|
||||
) or { return error('TFRobot command ${cmd} failed:\n${err}') }
|
||||
output := output_file.read()!
|
||||
mut res := json.decode(DeployResult, output)!
|
||||
|
||||
if res.ok.len == 0 {
|
||||
return error('No vm was deployed, empty result')
|
||||
}
|
||||
|
||||
mut redis := redisclient.core_get()!
|
||||
|
||||
redis.hset('tfrobot:${config.name}', 'config', config_json)!
|
||||
for groupname, mut vms in res.ok {
|
||||
for mut vm in vms {
|
||||
if config.debug {
|
||||
console.print_header('vm deployed: ${vm.name}')
|
||||
console.print_debug(vm.str())
|
||||
}
|
||||
vm.node_group = groupname // remember the groupname
|
||||
vm.deployment_name = config.name
|
||||
vm_json := json.encode(vm)
|
||||
redis.hset('tfrobot:${config.name}', vm.name, vm_json)!
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
fn check_deploy_config(config DeployConfig) ! {
|
||||
// Checking if configuration is valid. For instance that there is no ssh_key key that isnt defined,
|
||||
// or that the specified node group of a vm configuration exists
|
||||
vms := config.vms.filter(it.node_group !in config.node_groups.map(it.name))
|
||||
if vms.len > 0 {
|
||||
error_msgs := vms.map('Node group: `${it.node_group}` for VM: `${it.name}`')
|
||||
return error('${error_msgs.join(',')} not found.')
|
||||
}
|
||||
|
||||
unknown_keys := config.vms.filter(it.ssh_key !in config.ssh_keys).map(it.ssh_key)
|
||||
if unknown_keys.len > 0 {
|
||||
return error('SSH Keys [${unknown_keys.join(',')}] not found.')
|
||||
}
|
||||
}
|
||||
64
lib/threefold/grid3/tfrobot/deploy_test.v
Normal file
64
lib/threefold/grid3/tfrobot/deploy_test.v
Normal file
@@ -0,0 +1,64 @@
|
||||
module tfrobot
|
||||
|
||||
import os
|
||||
import toml
|
||||
|
||||
__global (
|
||||
mnemonics string
|
||||
ssh_key string
|
||||
)
|
||||
|
||||
fn testsuite_begin() ! {
|
||||
env := toml.parse_file(os.dir(@FILE) + '/.env') or { toml.Doc{} }
|
||||
mnemonics = os.getenv_opt('TFGRID_MNEMONIC') or {
|
||||
env.value_opt('TFGRID_MNEMONIC') or {
|
||||
panic('TFGRID_MNEMONIC variable should either be set as environment variable or set in .env file for this test')
|
||||
}.string()
|
||||
}
|
||||
ssh_key = os.getenv_opt('SSH_KEY') or {
|
||||
env.value_opt('SSH_KEY') or {
|
||||
panic('SSH_KEY variable should either be set as environment variable or set in .env file for this test')
|
||||
}.string()
|
||||
}
|
||||
}
|
||||
|
||||
fn test_deploy() ! {
|
||||
mut robot := new()!
|
||||
result := robot.deploy(
|
||||
name: 'test'
|
||||
mnemonic: mnemonics
|
||||
network: .main
|
||||
node_groups: [
|
||||
NodeGroup{
|
||||
name: 'test_group'
|
||||
nodes_count: 1
|
||||
free_cpu: 1
|
||||
free_mru: 256
|
||||
},
|
||||
]
|
||||
vms: [
|
||||
VMConfig{
|
||||
name: 'test'
|
||||
vms_count: 1
|
||||
cpu: 1
|
||||
mem: 256
|
||||
node_group: 'test_group'
|
||||
ssh_key: 'test_key'
|
||||
entry_point: '/usr/local/bin/entrypoint.sh'
|
||||
flist: 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist'
|
||||
},
|
||||
]
|
||||
ssh_keys: {
|
||||
'test_key': ssh_key
|
||||
}
|
||||
)!
|
||||
|
||||
assert result.error.keys().len == 0
|
||||
assert result.ok.keys() == ['test_group']
|
||||
assert result.ok['test_group'].len == 1
|
||||
assert result.ok['test_group'][0].name == 'test0'
|
||||
assert result.ok['test_group'][0].public_ip4 == ''
|
||||
assert result.ok['test_group'][0].public_ip6 == ''
|
||||
assert result.ok['test_group'][0].planetary_ip == ''
|
||||
assert result.ok['test_group'][0].mounts.len == 0
|
||||
}
|
||||
80
lib/threefold/grid3/tfrobot/factory.v
Normal file
80
lib/threefold/grid3/tfrobot/factory.v
Normal file
@@ -0,0 +1,80 @@
|
||||
module tfrobot
|
||||
|
||||
import freeflowuniverse.herolib.installers.threefold.tfrobot as tfrobot_installer
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
pub struct TFRobot[T] {
|
||||
base.BaseConfig[T]
|
||||
pub mut:
|
||||
jobs map[string]Job
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct Config {
|
||||
pub mut:
|
||||
configtype string = 'tfrobot' // needs to be defined
|
||||
mnemonics string
|
||||
network string = 'main'
|
||||
}
|
||||
|
||||
pub fn get(instance string) !TFRobot[Config] {
|
||||
tfrobot_installer.install(reset: true)!
|
||||
mut robot := TFRobot[Config]{}
|
||||
robot.init('tfrobot', instance, .get)!
|
||||
return robot
|
||||
}
|
||||
|
||||
pub fn configure(instance string, config_ Config) !TFRobot[Config] {
|
||||
// tfrobot_installer.install()!
|
||||
mut config := config_
|
||||
mut robot := get(instance)!
|
||||
mut cfg := robot.config()!
|
||||
cfg = &config
|
||||
robot.init('tfrobot', instance, .set, cfg)!
|
||||
return robot
|
||||
}
|
||||
|
||||
// pub fn heroplay(args play.PLayBookAddArgs) ! {
|
||||
// // make session for configuring from heroscript
|
||||
// mut session := play.session_new(session_name: 'config')!
|
||||
// session.playbook_add(path: args.path, text: args.text, git_url: args.git_url)!
|
||||
// for mut action in session.plbook.find(filter: 'tfrobot.define')! {
|
||||
// mut p := action.params
|
||||
// instance := p.get_default('instance', 'default')!
|
||||
// mut cl := get(instance: instance)!
|
||||
// mut cfg := cl.config()!
|
||||
// cfg.description = p.get('description')!
|
||||
// cfg.mnemonics = p.get('mnemonics')!
|
||||
// cfg.network = p.get('network')!
|
||||
// cl.config_save()!
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn (mut self TFRobot[Config]) config_interactive() ! {
|
||||
// mut myui := ui.new()!
|
||||
// console.clear()
|
||||
// console.print_debug('\n## Configure tfrobot')
|
||||
// console.print_debug('========================\n\n')
|
||||
|
||||
// mut cfg := self.config()!
|
||||
|
||||
// self.instance = myui.ask_question(
|
||||
// question: 'name for tfrobot'
|
||||
// default: self.instance
|
||||
// )!
|
||||
// cfg.mnemonics = myui.ask_question(
|
||||
// question: 'please enter your mnemonics here'
|
||||
// minlen: 24
|
||||
// default: cfg.mnemonics
|
||||
// )!
|
||||
|
||||
// envs := ['main', 'qa', 'test', 'dev']
|
||||
// cfg.network = myui.ask_dropdown(
|
||||
// question: 'choose environment'
|
||||
// items: envs
|
||||
// )!
|
||||
|
||||
// self.config_save()!
|
||||
// }
|
||||
5
lib/threefold/grid3/tfrobot/factory_test.v
Normal file
5
lib/threefold/grid3/tfrobot/factory_test.v
Normal file
@@ -0,0 +1,5 @@
|
||||
module tfrobot
|
||||
|
||||
fn test_new() {
|
||||
bot := new()!
|
||||
}
|
||||
153
lib/threefold/grid3/tfrobot/job.v
Normal file
153
lib/threefold/grid3/tfrobot/job.v
Normal file
@@ -0,0 +1,153 @@
|
||||
module tfrobot
|
||||
|
||||
// import os
|
||||
// import arrays
|
||||
// import freeflowuniverse.herolib.core.pathlib
|
||||
// import freeflowuniverse.herolib.osal
|
||||
// import json
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
|
||||
// VirtualMachine represents the VM info outputted by tfrobot
|
||||
pub struct VirtualMachine {
|
||||
name string
|
||||
ip4 string
|
||||
ip6 string
|
||||
yggip string
|
||||
ip string
|
||||
// mounts []string
|
||||
}
|
||||
|
||||
pub struct Job {
|
||||
pub:
|
||||
name string
|
||||
network Network
|
||||
mneumonic string @[required]
|
||||
pub mut:
|
||||
ssh_keys map[string]string
|
||||
deployments []Deployment
|
||||
vms map[string]VirtualMachine
|
||||
}
|
||||
|
||||
// Deployment is an instruction to deploy a quantity of VMs with a given configuration
|
||||
pub struct Deployment {
|
||||
pub:
|
||||
config VMConfig
|
||||
quantity int
|
||||
}
|
||||
|
||||
// pub struct VMConfig {
|
||||
// pub:
|
||||
// name string
|
||||
// region string
|
||||
// nrcores int
|
||||
// flist string
|
||||
// memory_gb int
|
||||
// ssh_key string
|
||||
// pub_ip bool
|
||||
// env_vars map[string]string
|
||||
// }
|
||||
|
||||
// pub struct Output {
|
||||
// ok map[string][]VMOutput
|
||||
// error map[string]string
|
||||
// }
|
||||
|
||||
// pub struct VMOutput {
|
||||
// name string
|
||||
// public_ip4 string
|
||||
// public_ip6 string
|
||||
// ygg_ip string
|
||||
// ip string
|
||||
// mounts []Mount
|
||||
// }
|
||||
|
||||
// pub struct Mount {
|
||||
// disk_name string
|
||||
// mount_point string
|
||||
// }
|
||||
|
||||
pub enum Network {
|
||||
main
|
||||
dev
|
||||
qa
|
||||
test
|
||||
}
|
||||
|
||||
pub fn (mut r TFRobot[Config]) job_new(job Job) !Job {
|
||||
r.jobs[job.name] = job
|
||||
return job
|
||||
}
|
||||
|
||||
pub fn (mut j Job) deploy_vms(config VMConfig, quantity int) {
|
||||
j.deployments << Deployment{
|
||||
config: config
|
||||
quantity: quantity
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn (mut j Job) run() ![]VMOutput {
|
||||
// if j.deployments.len == 0 {
|
||||
// return error('Nothing to deploy.')
|
||||
// }
|
||||
// if j.ssh_keys.keys().len == 0 {
|
||||
// return error('Job requires at least one ssh key.')
|
||||
// }
|
||||
|
||||
// jsonfile := pathlib.get_file(
|
||||
// path: '${os.home_dir()}/hero/tfrobot/jobs/${j.name}.json'
|
||||
// create: true
|
||||
// )!
|
||||
// config := $tmpl('./templates/config.json')
|
||||
// // console.print_debug('config file*******\n${config}\n****')
|
||||
// pathlib.template_write(config, jsonfile.path, true)!
|
||||
// j.
|
||||
// result := osal.exec(cmd: 'tfrobot deploy -c ${jsonfile.path}', stdout: true)!
|
||||
|
||||
// vms := parse_output(result.output)!
|
||||
// // for vm in vms {
|
||||
// // j.vms[vm.name] = vm
|
||||
// // }
|
||||
// return vms
|
||||
// }
|
||||
|
||||
pub fn (j Job) vm_get(name string) ?VirtualMachine {
|
||||
if name !in j.vms {
|
||||
return none
|
||||
}
|
||||
return j.vms[name]
|
||||
}
|
||||
|
||||
pub fn (mut j Job) add_ssh_key(name string, key string) {
|
||||
j.ssh_keys[name] = key
|
||||
}
|
||||
|
||||
// // parse_output parses the output of the tfrobot cli command
|
||||
// fn parse_output(output string) ![]VMOutput {
|
||||
// res := json.decode(Output, output) or { return error('invalid json syntax. output:\n${output}') }
|
||||
// if res.error.len > 0{
|
||||
// return error('TFRobot CLI Error, output:\n${output}')
|
||||
// }
|
||||
|
||||
// mut vms := []VMOutput{}
|
||||
// for k, v in res.ok{
|
||||
// vms << v
|
||||
// }
|
||||
|
||||
// return vms
|
||||
// // if !output.trim_space().starts_with('ok:') {
|
||||
// // return error('TFRobot CLI Error, output:\n${output}')
|
||||
// // }
|
||||
|
||||
// // to_parse := output.trim_space().trim_string_left('ok:\n')
|
||||
// // trimmed := to_parse.trim_space().trim_string_left('[').trim_string_right(']').trim_space()
|
||||
// // vms_lst := arrays.chunk(trimmed.split_into_lines()[1..], 6)
|
||||
// // vms := vms_lst.map(VirtualMachine{
|
||||
// // name: it[0].trim_space().trim_string_left('name: ')
|
||||
// // ip4: it[1].trim_string_left('publicip4: ')
|
||||
// // ip6: it[2].trim_string_left('publicip6: ')
|
||||
// // yggip: it[3].trim_string_left('yggip: ')
|
||||
// // ip: it[4].trim_string_left('ip: ')
|
||||
// // mounts: []
|
||||
// // })
|
||||
// // return vms
|
||||
// }
|
||||
35
lib/threefold/grid3/tfrobot/job_test.v
Normal file
35
lib/threefold/grid3/tfrobot/job_test.v
Normal file
@@ -0,0 +1,35 @@
|
||||
module tfrobot
|
||||
|
||||
const test_ssh_key = ''
|
||||
const test_mneumonic = ''
|
||||
const test_flist = 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist'
|
||||
|
||||
fn test_job_new() {
|
||||
mut bot := new()!
|
||||
bot.job_new(
|
||||
name: 'test_job'
|
||||
mneumonic: test_mneumonic
|
||||
)!
|
||||
}
|
||||
|
||||
fn test_job_run() {
|
||||
mut bot := new()!
|
||||
mut job := bot.job_new(
|
||||
name: 'test_job'
|
||||
mneumonic: test_mneumonic
|
||||
)!
|
||||
|
||||
job.add_ssh_key('my_key', test_ssh_key)
|
||||
vm_config := VMConfig{
|
||||
name: 'holo_vm'
|
||||
region: 'europe'
|
||||
nrcores: 4
|
||||
memory_mb: 4096
|
||||
ssh_key: 'my_key'
|
||||
flist: test_flist
|
||||
pub_ip: true
|
||||
}
|
||||
|
||||
job.deploy_vms(vm_config, 10)
|
||||
job.run()!
|
||||
}
|
||||
57
lib/threefold/grid3/tfrobot/templates/config.json
Normal file
57
lib/threefold/grid3/tfrobot/templates/config.json
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"node_groups":
|
||||
@for deployment in j.deployments
|
||||
[
|
||||
{
|
||||
"name": "@{deployment.config.name}_group",
|
||||
"nodes_count": @{deployment.quantity},
|
||||
"free_cpu": @{deployment.config.nrcores},
|
||||
"free_mru": @{deployment.config.memory_gb},
|
||||
"free_ssd": 100,
|
||||
"free_hdd": 50,
|
||||
"dedicated": false,
|
||||
"public_ip4": @{deployment.config.pub_ip},
|
||||
"public_ip6": true,
|
||||
"certified": false,
|
||||
"region": "@{deployment.config.region}"
|
||||
}
|
||||
],
|
||||
@end
|
||||
@for deployment in j.deployments
|
||||
"vms": [
|
||||
{
|
||||
"name": "@{deployment.config.name}",
|
||||
"vms_count": @{deployment.quantity},
|
||||
"node_group": "@{deployment.config.name}_group",
|
||||
"cpu": @{deployment.config.nrcores},
|
||||
"mem": @{deployment.config.memory_gb},
|
||||
"ssd": [
|
||||
{
|
||||
"size": 15,
|
||||
"mount_point": "/mnt/ssd"
|
||||
}
|
||||
],
|
||||
"public_ip4": @{deployment.config.pub_ip},
|
||||
"public_ip6": true,
|
||||
"flist": "@{deployment.config.flist}",
|
||||
"entry_point": "/usr/local/bin/entrypoint.sh",
|
||||
"root_size": 0,
|
||||
"ssh_key": "@{deployment.config.ssh_key}",
|
||||
"env_vars": {
|
||||
@for key, val in deployment.config.env_vars
|
||||
"@{key}": "@{val}"
|
||||
@end
|
||||
}
|
||||
}
|
||||
],
|
||||
@end
|
||||
"ssh_keys": {
|
||||
@for key, val in j.ssh_keys
|
||||
"@{key}": "${val}"
|
||||
@end
|
||||
},
|
||||
"mnemonic": "@{j.mneumonic}",
|
||||
"network": "@{j.network}",
|
||||
"max_retries": 5
|
||||
}
|
||||
|
||||
40
lib/threefold/grid3/tfrobot/templates/config.yaml
Normal file
40
lib/threefold/grid3/tfrobot/templates/config.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
node_groups:
|
||||
@for deployment in j.deployments
|
||||
- name: @{deployment.config.name}_group
|
||||
nodes_count: @{deployment.quantity} # amount of nodes to be found
|
||||
free_cpu: @{deployment.config.nrcores} # number of logical cores
|
||||
free_mru: @{deployment.config.memory_gb} # amount of memory in GB
|
||||
free_ssd: 100 # amount of ssd storage in GB
|
||||
free_hdd: 50 # amount of hdd storage in GB
|
||||
dedicated: false # are nodes dedicated
|
||||
public_ip4: @{deployment.config.pub_ip}
|
||||
public_ip6: true
|
||||
certified: false # should the nodes be certified(if false the nodes could be certified of diyed)
|
||||
region: @{deployment.config.region} # region could be the name of the continents the nodes are located in (africa, americas, antarctic, antarctic ocean, asia, europe, oceania, polar)
|
||||
@end
|
||||
vms:
|
||||
@for deployment in j.deployments
|
||||
- name: @{deployment.config.name}
|
||||
vms_count: @{deployment.quantity} # amount of vms with the same configurations
|
||||
node_group: @{deployment.config.name}_group # the name of the predefined group of nodes
|
||||
cpu: @{deployment.config.nrcores} # number of logical cores
|
||||
mem: @{deployment.config.memory_gb} # amount of memory in GB
|
||||
public_ip4: @{deployment.config.pub_ip}
|
||||
public_ip6: true
|
||||
flist: @{deployment.config.flist}
|
||||
entry_point: /usr/local/bin/entrypoint.sh
|
||||
root_size: 0 # root size in GB
|
||||
ssh_key: @{deployment.config.ssh_key} # the name of the predefined ssh key
|
||||
env_vars: # env vars are passed to the newly created vms
|
||||
@for key, val in deployment.config.env_vars
|
||||
@{key}: "${val}"
|
||||
@end
|
||||
@end
|
||||
|
||||
ssh_keys: # map of ssh keys with key=name and value=the actual ssh key
|
||||
@for key, val in j.ssh_keys
|
||||
@{key}: "${val}"
|
||||
@end
|
||||
|
||||
mnemonic: "@{j.mneumonic}" # mnemonic of the user
|
||||
network: @{j.network} # eg: main, test, qa, dev
|
||||
47
lib/threefold/grid3/tfrobot/tfrobot_redis.v
Normal file
47
lib/threefold/grid3/tfrobot/tfrobot_redis.v
Normal file
@@ -0,0 +1,47 @@
|
||||
module tfrobot
|
||||
|
||||
import json
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
pub fn config_get(configname string) !DeployConfig {
|
||||
mut redis := redisclient.core_get()!
|
||||
data := redis.hget('tfrobot:${configname}', 'config')!
|
||||
if data.len == 0 {
|
||||
return error("couldn't find tfrobot config with name:${configname}")
|
||||
}
|
||||
return json.decode(DeployConfig, data)!
|
||||
}
|
||||
|
||||
pub fn vms_get(configname string) ![]VMOutput {
|
||||
mut vms := []VMOutput{}
|
||||
mut redis := redisclient.core_get()!
|
||||
for vmname in redis.hkeys('tfrobot:${configname}')! {
|
||||
if vmname == 'config' {
|
||||
continue
|
||||
}
|
||||
vms << vm_get(configname, vmname)!
|
||||
}
|
||||
return vms
|
||||
}
|
||||
|
||||
pub fn vm_get(configname string, name string) !VMOutput {
|
||||
mut redis := redisclient.core_get()!
|
||||
data := redis.hget('tfrobot:${configname}', name)!
|
||||
if data.len == 0 {
|
||||
return error("couldn't find tfrobot config with name:${name}")
|
||||
}
|
||||
return json.decode(VMOutput, data)!
|
||||
}
|
||||
|
||||
pub fn vm_config_get(configname string, name string) !VMConfig {
|
||||
mut config := config_get(configname)!
|
||||
// console.print_debug(name)
|
||||
for vm in config.vms {
|
||||
// console.print_debug(vm)
|
||||
if name.starts_with(vm.name) {
|
||||
return vm
|
||||
}
|
||||
}
|
||||
return error('Could not find vmconfig for ${configname}:${name}')
|
||||
}
|
||||
214
lib/threefold/grid3/tfrobot/vm.v
Normal file
214
lib/threefold/grid3/tfrobot/vm.v
Normal file
@@ -0,0 +1,214 @@
|
||||
module tfrobot
|
||||
|
||||
// import os
|
||||
import freeflowuniverse.herolib.builder
|
||||
import freeflowuniverse.herolib.osal
|
||||
// import freeflowuniverse.herolib.servers.daguserver as dagu
|
||||
// import freeflowuniverse.herolib.clients.daguclient as dagu_client
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import time
|
||||
|
||||
// pub fn (vm VMOutput) ssh_interactive(key_path string) ! {
|
||||
// // b := builder.new()
|
||||
// // node := b.node_new(ipaddr:"root@${vm.ip4}")!
|
||||
// // node.exec_interactive('${homedir}/hero/bin/install.sh')!
|
||||
// // time.sleep(15 * time.second)
|
||||
// if vm.public_ip4 != '' {
|
||||
// osal.execute_interactive('ssh -i ${key_path} root@${vm.public_ip4.all_before('/')}')!
|
||||
// } else if vm.yggdrasil_ip != '' {
|
||||
// osal.execute_interactive('ssh -i ${key_path} root@${vm.yggdrasil_ip}')!
|
||||
// } else {
|
||||
// return error('no public nor planetary ip available to use')
|
||||
// }
|
||||
// }
|
||||
|
||||
@[params]
|
||||
pub struct NodeArgs {
|
||||
pub mut:
|
||||
ip4 bool = true
|
||||
ip6 bool = true
|
||||
planetary bool = true
|
||||
mycelium bool = true
|
||||
timeout int = 120 // timeout in sec
|
||||
}
|
||||
|
||||
// return ssh node (can be used to do actions remotely)
|
||||
// will check all available channels till it can ssh into the node
|
||||
pub fn (vm VMOutput) node(args NodeArgs) !&builder.Node {
|
||||
mut b := builder.new()!
|
||||
start_time := time.now().unix_milli()
|
||||
mut run_time := 0.0
|
||||
for true {
|
||||
if args.ip4 && vm.public_ip4.len > 0 {
|
||||
console.print_debug('test ipv4 to: ${vm.public_ip4} for ${vm.name}')
|
||||
if osal.tcp_port_test(address: vm.public_ip4, port: 22, timeout: 2000) {
|
||||
console.print_debug('SSH port test ok')
|
||||
return b.node_new(
|
||||
ipaddr: 'root@${vm.public_ip4}'
|
||||
name: '${vm.deployment_name}_${vm.name}'
|
||||
)
|
||||
}
|
||||
}
|
||||
if args.ip6 && vm.public_ip6.len > 0 {
|
||||
console.print_debug('test ipv6 to: ${vm.public_ip6} for ${vm.name}')
|
||||
if osal.tcp_port_test(address: vm.public_ip6, port: 22, timeout: 2000) {
|
||||
console.print_debug('SSH port test ok')
|
||||
return b.node_new(
|
||||
ipaddr: 'root@[${vm.public_ip6}]'
|
||||
name: '${vm.deployment_name}_${vm.name}'
|
||||
)
|
||||
}
|
||||
}
|
||||
if args.planetary && vm.yggdrasil_ip.len > 0 {
|
||||
console.print_debug('test planetary to: ${vm.yggdrasil_ip} for ${vm.name}')
|
||||
if osal.tcp_port_test(address: vm.yggdrasil_ip, port: 22, timeout: 2000) {
|
||||
console.print_debug('SSH port test ok')
|
||||
return b.node_new(
|
||||
ipaddr: 'root@[${vm.yggdrasil_ip}]'
|
||||
name: '${vm.deployment_name}_${vm.name}'
|
||||
)
|
||||
}
|
||||
}
|
||||
run_time = time.now().unix_milli()
|
||||
if run_time > start_time + args.timeout * 1000 {
|
||||
break
|
||||
}
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
return error("couldn't connect to node, probably timeout.")
|
||||
}
|
||||
|
||||
pub fn (vm VMOutput) tcpport_addr_get(port int) !string {
|
||||
start_time := time.now().unix_milli()
|
||||
mut run_time := 0.0
|
||||
for true {
|
||||
if vm.yggdrasil_ip.len > 0 {
|
||||
console.print_debug('test planetary for port ${port}: ${vm.yggdrasil_ip} for ${vm.name}')
|
||||
if osal.tcp_port_test(address: vm.yggdrasil_ip, port: port, timeout: 2000) {
|
||||
console.print_debug('port test ok')
|
||||
return vm.yggdrasil_ip
|
||||
}
|
||||
}
|
||||
|
||||
// if vm.public_ip4.len>0 {
|
||||
// console.print_debug("test ipv4 to: ${vm.public_ip4} for ${vm.name}")
|
||||
// if osal.tcp_port_test(address:vm.public_ip4,port:22, timeout:2000) {
|
||||
// console.print_debug("SSH port test ok")
|
||||
// return b.node_new(ipaddr:"root@${vm.public_ip4}",name:"${vm.deployment_name}_${vm.name}")!
|
||||
// }
|
||||
// }
|
||||
// if args.ip6 && vm.public_ip6.len>0 {
|
||||
// console.print_debug("test ipv6 to: ${vm.public_ip6} for ${vm.name}")
|
||||
// if osal.tcp_port_test(address:vm.public_ip6, port:22, timeout:2000) {
|
||||
// console.print_debug("SSH port test ok")
|
||||
// return b.node_new(ipaddr:"root@[${vm.public_ip6}]",name:"${vm.deployment_name}_${vm.name}")!
|
||||
// }
|
||||
// }
|
||||
run_time = time.now().unix_milli()
|
||||
if run_time > start_time + 20000 {
|
||||
break
|
||||
}
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
return error("couldn't connect to node, probably timeout.")
|
||||
}
|
||||
|
||||
// // create new DAG
|
||||
// // ```
|
||||
// // name string // The name of the DAG (required)
|
||||
// // description ?string // A brief description of the DAG.
|
||||
// // tags ?string // Free tags that can be used to categorize DAGs, separated by commas.
|
||||
// // env ?map[string]string // Environment variables that can be accessed by the DAG and its steps.
|
||||
// // restart_wait_sec ?int // The number of seconds to wait after the DAG process stops before restarting it.
|
||||
// // hist_retention_days ?int // The number of days to retain execution history (not for log files).
|
||||
// // delay_sec ?int // The interval time in seconds between steps.
|
||||
// // max_active_runs ?int // The maximum number of parallel running steps.
|
||||
// // max_cleanup_time_sec ?int // The maximum time to wait after sending a TERM signal to running steps before killing them.
|
||||
// // ```
|
||||
// pub fn (mut vm VMOutput) tasks_new(args_ dagu.DAGArgs) &dagu.DAG {
|
||||
// mut args := args_
|
||||
// mut d := dagu.dag_new(
|
||||
// name: args.name
|
||||
// description: args.description
|
||||
// tags: args.tags
|
||||
// env: args.env
|
||||
// restart_wait_sec: args.restart_wait_sec
|
||||
// hist_retention_days: args.hist_retention_days
|
||||
// delay_sec: args.delay_sec
|
||||
// max_active_runs: args.max_active_runs
|
||||
// max_cleanup_time_sec: args.max_cleanup_time_sec
|
||||
// )
|
||||
|
||||
// d.env = {
|
||||
// 'PATH': '/root/.nix-profile/bin:/root/hero/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:\$PATH'
|
||||
// }
|
||||
|
||||
// return &d
|
||||
// }
|
||||
|
||||
// // name is the name of the tasker (dag), which has set of staps we will execute
|
||||
// pub fn (vm VMOutput) tasks_run(dag &dagu.DAG) ! {
|
||||
// // console.print_debug(dag)
|
||||
// r := vm.dagu_addr_get()!
|
||||
// console.print_debug('connect to dagu on ${vm.name} -> ${r.addr}')
|
||||
// mut client := dagu_client.get(instance: 'robot_dagu')!
|
||||
// mut cfg := client.config()!
|
||||
// cfg.url = 'http://${r.addr}:${r.port}'
|
||||
// cfg.username = r.username
|
||||
// cfg.password = r.password
|
||||
|
||||
// if dag.name in client.list_dags()!.dags.map(it.dag.name) {
|
||||
// console.print_debug('delete dag: ${dag.name}')
|
||||
// client.delete_dag(dag.name)!
|
||||
// }
|
||||
|
||||
// console.print_header('send dag to node: ${dag.name}')
|
||||
// console.print_debug(dag.str())
|
||||
// client.new_dag(dag)! // will post it
|
||||
// client.start_dag(dag.name)!
|
||||
// }
|
||||
|
||||
// pub fn (vm VMOutput) tasks_see(dag &dagu.DAG) ! {
|
||||
// r := vm.dagu_addr_get()!
|
||||
// // http://[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]:8081/dags/holochain_scaffold
|
||||
// cmd3 := "open 'http://[${r.addr}]:8081/dags/${dag.name}'"
|
||||
// // console.print_debug(cmd3)
|
||||
// osal.exec(cmd: cmd3)!
|
||||
// }
|
||||
|
||||
pub fn (vm VMOutput) vscode() ! {
|
||||
r := vm.dagu_addr_get()!
|
||||
cmd3 := "open 'http://[${r.addr}]:8080'"
|
||||
osal.exec(cmd: cmd3)!
|
||||
}
|
||||
|
||||
pub fn (vm VMOutput) vscode_holochain() ! {
|
||||
r := vm.dagu_addr_get()!
|
||||
cmd3 := "open 'http://[${r.addr}]:8080/?folder=/root/Holochain/hello-world'"
|
||||
osal.exec(cmd: cmd3)!
|
||||
}
|
||||
|
||||
pub fn (vm VMOutput) vscode_holochain_proxy() ! {
|
||||
r := vm.dagu_addr_get()!
|
||||
cmd3 := "open 'http://[${r.addr}]:8080/proxy/8282/"
|
||||
osal.exec(cmd: cmd3)!
|
||||
}
|
||||
|
||||
struct DaguInfo {
|
||||
mut:
|
||||
addr string
|
||||
username string
|
||||
password string
|
||||
port int
|
||||
}
|
||||
|
||||
fn (vm VMOutput) dagu_addr_get() !DaguInfo {
|
||||
mut vm_config := vm_config_get(vm.deployment_name, vm.name)!
|
||||
mut env := vm_config.env_vars.clone()
|
||||
mut r := DaguInfo{}
|
||||
r.username = env['DAGU_BASICAUTH_USERNAME'] or { 'admin' }
|
||||
r.password = env['DAGU_BASICAUTH_PASSWORD'] or { 'planetfirst' }
|
||||
r.port = 8081
|
||||
r.addr = vm.tcpport_addr_get(r.port)!
|
||||
return r
|
||||
}
|
||||
102
lib/threefold/grid3/tfrobot/vm_deploy.v
Normal file
102
lib/threefold/grid3/tfrobot/vm_deploy.v
Normal file
@@ -0,0 +1,102 @@
|
||||
module tfrobot
|
||||
|
||||
import rand
|
||||
|
||||
struct VMSpecs {
|
||||
deployment_name string
|
||||
name string
|
||||
nodeid u32
|
||||
pub_sshkeys []string
|
||||
flist string // if any, if used then ostype not used
|
||||
size u32 // size of the rootfs disk in bytes
|
||||
cores int // number of virtual cores
|
||||
memory u32 // ram in mb
|
||||
ostype OSType
|
||||
}
|
||||
|
||||
enum OSType {
|
||||
ubuntu_22_04
|
||||
ubuntu_24_04
|
||||
arch
|
||||
alpine
|
||||
}
|
||||
|
||||
// only connect to yggdrasil and mycelium
|
||||
pub fn (mut robot TFRobot[Config]) vm_deploy(args_ VMSpecs) !VMOutput {
|
||||
mut args := args_
|
||||
|
||||
if args.pub_sshkeys.len == 0 {
|
||||
return error('at least one ssh key needed to deploy vm')
|
||||
}
|
||||
|
||||
size := if args.size < 20 {
|
||||
20
|
||||
} else {
|
||||
args.size
|
||||
}
|
||||
// deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))!
|
||||
|
||||
mut ssh_keys := {
|
||||
'SSH_KEY': args.pub_sshkeys[0]
|
||||
}
|
||||
// QUESTION: how to implement multiple ssh keys
|
||||
for i, key in args.pub_sshkeys[0..] {
|
||||
ssh_keys['SSH_KEY${i}'] = key
|
||||
}
|
||||
|
||||
flist := if args.flist == '' {
|
||||
'https://hub.grid.tf/samehabouelsaad.3bot/abouelsaad-grid3_ubuntu20.04-latest.flist'
|
||||
} else {
|
||||
args.flist
|
||||
}
|
||||
|
||||
node_group := 'ng_${args.cores}_${args.memory}_${args.size}_${rand.string(8).to_lower()}'
|
||||
|
||||
config := robot.config()!
|
||||
mneumonics := config.mnemonics
|
||||
output := robot.deploy(
|
||||
name: args.name
|
||||
mnemonic: mneumonics
|
||||
network: .main
|
||||
node_groups: [
|
||||
NodeGroup{
|
||||
name: node_group
|
||||
nodes_count: 1
|
||||
free_cpu: args.cores
|
||||
free_mru: int(args.memory)
|
||||
free_ssd: int(size)
|
||||
},
|
||||
]
|
||||
vms: [
|
||||
VMConfig{
|
||||
name: args.name
|
||||
vms_count: 1
|
||||
cpu: args.cores
|
||||
mem: int(args.memory)
|
||||
root_size: int(size)
|
||||
node_group: node_group
|
||||
ssh_key: 'SSH_KEY'
|
||||
flist: flist
|
||||
entry_point: '/sbin/zinit init'
|
||||
},
|
||||
]
|
||||
ssh_keys: ssh_keys
|
||||
) or { return error('\nTFRobot deploy error:\n - ${err}') }
|
||||
|
||||
if output.ok.len < 1 {
|
||||
if output.error.len < 1 {
|
||||
panic('this should never happen')
|
||||
}
|
||||
|
||||
err := output.error[output.error.keys()[0]]
|
||||
return error('failed to deploy vm ${err}')
|
||||
}
|
||||
|
||||
vm_outputs := output.ok[output.ok.keys()[0]]
|
||||
if vm_outputs.len != 1 {
|
||||
panic('this should never happen ${vm_outputs}')
|
||||
}
|
||||
|
||||
vm_output := vm_outputs[0]
|
||||
return vm_output
|
||||
}
|
||||
32
lib/threefold/grid3/tfrobot/vm_deploy_test.v
Normal file
32
lib/threefold/grid3/tfrobot/vm_deploy_test.v
Normal file
@@ -0,0 +1,32 @@
|
||||
module tfrobot
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.osal
|
||||
|
||||
const testdata_dir = '${os.dir(@FILE)}/testdata'
|
||||
|
||||
fn testsuite_begin() ! {
|
||||
osal.load_env_file('${testdata_dir}/.env')!
|
||||
}
|
||||
|
||||
fn test_vm_deploy() ! {
|
||||
mneumonics := os.getenv('MNEUMONICS')
|
||||
ssh_key := os.getenv('SSH_KEY')
|
||||
|
||||
mut robot := configure('testrobot',
|
||||
mnemonics: mneumonics
|
||||
network: 'main'
|
||||
)!
|
||||
result := robot.vm_deploy(
|
||||
deployment_name: 'test_deployment'
|
||||
name: 'test_vm'
|
||||
cores: 1
|
||||
memory: 256
|
||||
pub_sshkeys: [ssh_key]
|
||||
)!
|
||||
panic(result)
|
||||
|
||||
assert result.name.starts_with('test_vm')
|
||||
assert result.yggdrasil_ip.len > 0
|
||||
assert result.mycelium_ip.len > 0
|
||||
}
|
||||
2
lib/threefold/grid3/tokens/readme.md
Normal file
2
lib/threefold/grid3/tokens/readme.md
Normal file
@@ -0,0 +1,2 @@
|
||||
|
||||
> TODO:! please make example and see it works
|
||||
422
lib/threefold/grid3/tokens/tokens_fetch.v
Normal file
422
lib/threefold/grid3/tokens/tokens_fetch.v
Normal file
@@ -0,0 +1,422 @@
|
||||
module tokens
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.httpcache
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
//
|
||||
// Raw JSON struct
|
||||
//
|
||||
struct Raw_Wallet {
|
||||
address string
|
||||
description string
|
||||
liquid bool
|
||||
amount string
|
||||
}
|
||||
|
||||
struct Raw_FoundationAccountInfo {
|
||||
category string
|
||||
wallets []Raw_Wallet
|
||||
}
|
||||
|
||||
struct Raw_StatsTFT {
|
||||
total_tokens string
|
||||
total_accounts string
|
||||
total_locked_tokens string
|
||||
total_vested_tokens string
|
||||
total_liquid_foundation_tokens string
|
||||
total_illiquid_foundation_tokens string
|
||||
total_liquid_tokens string
|
||||
foundation_accounts_info []Raw_FoundationAccountInfo
|
||||
locked_tokens_info []string
|
||||
}
|
||||
|
||||
struct Raw_Balance {
|
||||
amount string
|
||||
asset string
|
||||
}
|
||||
|
||||
struct Raw_Account {
|
||||
address string
|
||||
balances []Raw_Balance
|
||||
vesting_accounts []Raw_VestingAccount
|
||||
locked_amounts []Raw_LockedAmount
|
||||
}
|
||||
|
||||
struct Raw_VestingAccount {
|
||||
address string
|
||||
vestingscheme string
|
||||
balances []Raw_Balance
|
||||
}
|
||||
|
||||
struct Raw_LockedAmount {
|
||||
address string
|
||||
locked_until string
|
||||
balances []Raw_Balance
|
||||
}
|
||||
|
||||
struct Raw_StellarBalance {
|
||||
asset string
|
||||
balance string
|
||||
}
|
||||
|
||||
struct Raw_StellarHistory {
|
||||
ts int
|
||||
payments int
|
||||
trades int
|
||||
balances []Raw_StellarBalance
|
||||
}
|
||||
|
||||
struct Raw_StellarAccount {
|
||||
account string
|
||||
history []Raw_StellarHistory
|
||||
}
|
||||
|
||||
//
|
||||
// Improved struct
|
||||
//
|
||||
pub struct Wallet {
|
||||
pub mut:
|
||||
address string
|
||||
description string
|
||||
liquid bool
|
||||
amount f64
|
||||
}
|
||||
|
||||
pub struct FoundationAccountInfo {
|
||||
pub mut:
|
||||
category string
|
||||
wallets []Wallet
|
||||
}
|
||||
|
||||
struct LockedTokensInfo {
|
||||
pub mut:
|
||||
amount f64
|
||||
until string
|
||||
}
|
||||
|
||||
struct StatsTFT {
|
||||
pub mut:
|
||||
total_tokens f64
|
||||
total_accounts f64
|
||||
total_locked_tokens f64
|
||||
total_vested_tokens f64
|
||||
total_liquid_foundation_tokens f64
|
||||
total_illiquid_foundation_tokens f64
|
||||
total_liquid_tokens f64
|
||||
foundation_accounts_info []FoundationAccountInfo
|
||||
locked_tokens_info []LockedTokensInfo
|
||||
}
|
||||
|
||||
struct Balance {
|
||||
pub:
|
||||
amount f64
|
||||
asset string
|
||||
}
|
||||
|
||||
struct Account {
|
||||
pub mut:
|
||||
address string
|
||||
balances []Balance
|
||||
vesting_accounts []VestingAccount
|
||||
locked_amounts []LockedAmount
|
||||
}
|
||||
|
||||
struct VestingAccount {
|
||||
pub mut:
|
||||
address string
|
||||
vestingscheme string
|
||||
balances []Balance
|
||||
}
|
||||
|
||||
struct LockedAmount {
|
||||
pub mut:
|
||||
address string
|
||||
locked_until string
|
||||
balances []Balance
|
||||
}
|
||||
|
||||
struct Group {
|
||||
pub mut:
|
||||
name string
|
||||
distribution f32 // in percent from 0..1
|
||||
farmed f64 // in tokens
|
||||
done f64
|
||||
amount f64
|
||||
remain f64
|
||||
}
|
||||
|
||||
//
|
||||
// Workflow
|
||||
//
|
||||
fn account_url(account string) string {
|
||||
return 'https://statsdata.threefoldtoken.com/stellar_stats/api/account/' + account
|
||||
}
|
||||
|
||||
fn parsef(f string) f64 {
|
||||
x := f.replace(',', '')
|
||||
return x.f64()
|
||||
}
|
||||
|
||||
fn parse(tft Raw_StatsTFT, tfta Raw_StatsTFT, stellar Raw_StellarAccount) StatsTFT {
|
||||
mut final := StatsTFT{}
|
||||
|
||||
final.total_tokens = parsef(tft.total_tokens) + parsef(tfta.total_tokens)
|
||||
final.total_accounts = parsef(tft.total_accounts) + parsef(tfta.total_accounts)
|
||||
final.total_locked_tokens = parsef(tft.total_locked_tokens) + parsef(tfta.total_locked_tokens)
|
||||
final.total_vested_tokens = parsef(tft.total_vested_tokens) + parsef(tfta.total_vested_tokens)
|
||||
final.total_liquid_foundation_tokens = parsef(tft.total_liquid_foundation_tokens) +
|
||||
parsef(tfta.total_liquid_foundation_tokens)
|
||||
final.total_illiquid_foundation_tokens = parsef(tft.total_illiquid_foundation_tokens) +
|
||||
parsef(tfta.total_illiquid_foundation_tokens)
|
||||
final.total_liquid_tokens = parsef(tft.total_liquid_tokens) + parsef(tfta.total_liquid_tokens)
|
||||
|
||||
mut info := map[string]map[string]Wallet{}
|
||||
src := [tft, tfta]
|
||||
|
||||
//
|
||||
// FoundationAccountInfo
|
||||
//
|
||||
for source in src {
|
||||
for entry in source.foundation_accounts_info {
|
||||
for wal in entry.wallets {
|
||||
mut found := info[entry.category][wal.address]
|
||||
|
||||
found.address = wal.address
|
||||
found.description = wal.description
|
||||
found.liquid = wal.liquid
|
||||
found.amount += parsef(wal.amount)
|
||||
|
||||
info[entry.category][wal.address] = found
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for cat, val in info {
|
||||
mut accountinfo := FoundationAccountInfo{
|
||||
category: cat
|
||||
}
|
||||
|
||||
for _, wal in val {
|
||||
accountinfo.wallets << wal
|
||||
}
|
||||
|
||||
final.foundation_accounts_info << accountinfo
|
||||
}
|
||||
|
||||
//
|
||||
// LockedTokensInfo
|
||||
//
|
||||
for source in src {
|
||||
for locked in source.locked_tokens_info {
|
||||
x := locked.fields()
|
||||
|
||||
final.locked_tokens_info << LockedTokensInfo{
|
||||
amount: parsef(x[0])
|
||||
until: x[3] + ' ' + x[4]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return final
|
||||
}
|
||||
|
||||
pub fn parse_special(s StatsTFT) map[string]Group {
|
||||
// fixed 4 billion tokens
|
||||
// master_total_tokens := f64(4000000000)
|
||||
total_tokens := s.total_tokens
|
||||
|
||||
// mut liquidity := tokens.FoundationAccountInfo{}
|
||||
mut contribution := FoundationAccountInfo{}
|
||||
mut council := FoundationAccountInfo{}
|
||||
|
||||
for info in s.foundation_accounts_info {
|
||||
if info.category == 'threefold contribution wallets' {
|
||||
contribution = info
|
||||
}
|
||||
|
||||
/*
|
||||
if info.category == "liquidity wallets" {
|
||||
liquidity = info
|
||||
}
|
||||
*/
|
||||
|
||||
if info.category == 'wisdom council wallets' {
|
||||
council = info
|
||||
}
|
||||
}
|
||||
|
||||
// console.print_debug(liquidity)
|
||||
|
||||
mut group := map[string]Group{}
|
||||
|
||||
// Farming rewards after April 19 2018 (***)
|
||||
group['farming-rewards-2018'] = Group{
|
||||
name: 'Farming rewards after April 19 2018'
|
||||
distribution: 0.75
|
||||
done: s.total_tokens - 695000000 // Genesis pool
|
||||
}
|
||||
|
||||
mut grant_amount := f64(0)
|
||||
|
||||
for wallet in contribution.wallets {
|
||||
if wallet.description == 'TF Grants Wallet' {
|
||||
grant_amount += f64(wallet.amount)
|
||||
}
|
||||
}
|
||||
|
||||
for wallet in council.wallets {
|
||||
if wallet.description == 'TF Grants Wisdom' {
|
||||
grant_amount += f64(wallet.amount)
|
||||
}
|
||||
}
|
||||
|
||||
// Ecosystem Grants (*)
|
||||
group['ecosystem-grants'] = Group{
|
||||
name: 'Ecosystem Grants'
|
||||
distribution: 0.03
|
||||
done: grant_amount
|
||||
}
|
||||
|
||||
// Promotion & Marketing Effort
|
||||
group['promotion-marketing'] = Group{
|
||||
name: 'Promotion & Marketing Effort '
|
||||
distribution: 0.05
|
||||
done: 100000000 // estimation
|
||||
}
|
||||
|
||||
mut liquidity_amount := i64(0)
|
||||
|
||||
for info in s.foundation_accounts_info {
|
||||
for wallet in info.wallets {
|
||||
if wallet.liquid == true {
|
||||
liquidity_amount += i64(wallet.amount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ecosystem Contribution, Liquidity Exchanges
|
||||
group['ecosystem-contribution'] = Group{
|
||||
name: 'Ecosystem Contribution, Liquidity Exchanges'
|
||||
distribution: 0.04
|
||||
done: liquidity_amount
|
||||
}
|
||||
|
||||
// Technology Acquisition + Starting Team (40p)
|
||||
group['technology'] = Group{
|
||||
name: 'Technology Acquisition + Starting Team'
|
||||
distribution: 0.07
|
||||
done: 290000000
|
||||
}
|
||||
|
||||
// Advisors, Founders & Team
|
||||
group['advisors-founders'] = Group{
|
||||
name: 'Advisors, Founders & Team'
|
||||
distribution: 0.06
|
||||
}
|
||||
|
||||
sum := group['farming-rewards-2018'].done + group['ecosystem-grants'].done +
|
||||
group['promotion-marketing'].done + group['ecosystem-contribution'].done +
|
||||
group['technology'].done
|
||||
|
||||
group['advisors-founders'].done = total_tokens - sum
|
||||
|
||||
return group
|
||||
}
|
||||
|
||||
fn parse_balance(bal Raw_Balance) Balance {
|
||||
return Balance{
|
||||
amount: parsef(bal.amount)
|
||||
asset: bal.asset
|
||||
}
|
||||
}
|
||||
|
||||
fn account_info(account Raw_Account) Account {
|
||||
mut final := Account{
|
||||
address: account.address
|
||||
}
|
||||
|
||||
for bal in account.balances {
|
||||
final.balances << parse_balance(bal)
|
||||
}
|
||||
|
||||
for vest in account.vesting_accounts {
|
||||
mut vesting := VestingAccount{
|
||||
address: vest.address
|
||||
vestingscheme: vest.vestingscheme
|
||||
}
|
||||
|
||||
for bal in vest.balances {
|
||||
vesting.balances << parse_balance(bal)
|
||||
}
|
||||
|
||||
final.vesting_accounts << vesting
|
||||
}
|
||||
|
||||
for locking in account.locked_amounts {
|
||||
mut locked := LockedAmount{
|
||||
address: locking.address
|
||||
locked_until: locking.locked_until
|
||||
}
|
||||
|
||||
for bal in locking.balances {
|
||||
locked.balances << parse_balance(bal)
|
||||
}
|
||||
|
||||
final.locked_amounts << locked
|
||||
}
|
||||
|
||||
return final
|
||||
}
|
||||
|
||||
pub fn load_tokens() ?StatsTFT {
|
||||
mut hc := httpcache.newcache()
|
||||
|
||||
urltft := 'https://statsdata.threefoldtoken.com/stellar_stats/api/stats?detailed=true'
|
||||
urltfta := 'https://statsdata.threefoldtoken.com/stellar_stats/api/stats?detailed=true&tokencode=TFTA'
|
||||
|
||||
// console.print_debug("[+] fetching tokens data from redis")
|
||||
rtft := hc.getex(urltft, 86400)?
|
||||
rtfta := hc.getex(urltfta, 86400)?
|
||||
|
||||
// extra stellar account for missing account in tft
|
||||
addac := 'GB2C5HCZYWNGVM6JGXDWQBJTMUY4S2HPPTCAH63HFAQVL2ALXDW7SSJ7'
|
||||
addurl := account_url(addac)
|
||||
rstel := hc.getex(addurl, 86400)?
|
||||
|
||||
tft := json.decode(Raw_StatsTFT, rtft) or {
|
||||
console.print_debug('Failed to decode json (statsdata: ${urltft})')
|
||||
return StatsTFT{}
|
||||
}
|
||||
|
||||
tfta := json.decode(Raw_StatsTFT, rtfta) or {
|
||||
console.print_debug('Failed to decode json (statsdata: ${urltfta})')
|
||||
return StatsTFT{}
|
||||
}
|
||||
|
||||
stellar := json.decode(Raw_StellarAccount, rstel) or {
|
||||
console.print_debug('Failed to decode json (account: ${addurl})')
|
||||
return StatsTFT{}
|
||||
}
|
||||
|
||||
merged := parse(tft, tfta, stellar)
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
pub fn load_account(accid string) ?Account {
|
||||
mut hc := httpcache.newcache()
|
||||
|
||||
// console.print_debug("[+] fetching account data from redis")
|
||||
accurl := account_url(accid)
|
||||
raccount := hc.getex(accurl, 86400)?
|
||||
|
||||
account := json.decode(Raw_Account, raccount) or {
|
||||
console.print_debug('Failed to decode json (stellar: ${accurl})')
|
||||
return Account{}
|
||||
}
|
||||
|
||||
nicer := account_info(account)
|
||||
|
||||
return nicer
|
||||
}
|
||||
176
lib/threefold/grid3/zerohub/flist.v
Normal file
176
lib/threefold/grid3/zerohub/flist.v
Normal file
@@ -0,0 +1,176 @@
|
||||
module zerohub
|
||||
|
||||
import net.http
|
||||
import json
|
||||
import x.json2
|
||||
import os
|
||||
|
||||
pub struct Repository {
|
||||
pub:
|
||||
name string
|
||||
official bool
|
||||
}
|
||||
|
||||
pub struct FlistInfo {
|
||||
pub:
|
||||
name string
|
||||
size string
|
||||
updated i64
|
||||
type_ string
|
||||
linktime i64
|
||||
target string
|
||||
}
|
||||
|
||||
pub struct FlistContents {
|
||||
pub:
|
||||
regular i32
|
||||
failure i32
|
||||
directory i32
|
||||
symlink string
|
||||
fullsize i64
|
||||
content []File
|
||||
}
|
||||
|
||||
pub struct File {
|
||||
pub:
|
||||
size i64
|
||||
path string
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_flists() ![]string {
|
||||
resp := http.get('https://${cl.url}/api/flist')!
|
||||
return json.decode([]string, resp.body)!
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_repos() ![]Repository {
|
||||
resp := http.get('https://${cl.url}/api/repositories')!
|
||||
return json.decode([]Repository, resp.body)!
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_files() !map[string][]FlistInfo {
|
||||
resp := http.get('https://${cl.url}/api/fileslist')!
|
||||
return json.decode(map[string][]FlistInfo, resp.body)
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_repo_flists(repo_name string) ![]FlistInfo {
|
||||
resp := http.get('https://${cl.url}/api/flist/${repo_name}')!
|
||||
return json.decode([]FlistInfo, resp.body)
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_flist_dump(repo_name string, flist_name string) !FlistContents {
|
||||
resp := http.get('https://${cl.url}/api/flist/${repo_name}/${flist_name}')!
|
||||
data := json.decode(FlistContents, resp.body)!
|
||||
return data
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_me() !json2.Any {
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me'
|
||||
}
|
||||
|
||||
resp := req.do()!
|
||||
return json2.raw_decode(resp.body)!
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) get_my_flist(flist string) !FlistContents {
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/${flist}'
|
||||
}
|
||||
|
||||
resp := req.do()!
|
||||
data := json.decode(FlistContents, resp.body)!
|
||||
return data
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) remove_my_flist(flist string) !json2.Any {
|
||||
req := http.Request{
|
||||
method: http.Method.delete
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/${flist}'
|
||||
}
|
||||
|
||||
resp := req.do()!
|
||||
return json2.raw_decode(resp.body)!
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) symlink(source string, linkname string) !string {
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/${source}/link/${linkname}'
|
||||
}
|
||||
resp := req.do()!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) cross_symlink(repo string, source string, linkname string) !string {
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/${linkname}/crosslink/${repo}/${source}'
|
||||
}
|
||||
resp := req.do()!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) rename(source string, dest string) !string {
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/${source}/rename/${dest}'
|
||||
}
|
||||
resp := req.do()!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) promote(source_repo string, source_name string, localname string) !string {
|
||||
// Copy cross-repository sourcerepo/sourcefile to your [local-repository]/localname
|
||||
req := http.Request{
|
||||
method: http.Method.get
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/promote/${source_repo}/${source_name}/${localname}'
|
||||
}
|
||||
resp := req.do()!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) convert(image string) !string {
|
||||
form := http.PostMultipartFormConfig{
|
||||
form: {
|
||||
'image': image
|
||||
}
|
||||
header: cl.header
|
||||
}
|
||||
|
||||
resp := http.post_multipart_form('https://${cl.url}/api/flist/me/docker', form)!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) merge_flists(flists []string, target string) !string {
|
||||
req := http.Request{
|
||||
method: http.Method.post
|
||||
header: cl.header
|
||||
url: 'https://${cl.url}/api/flist/me/merge/${target}'
|
||||
data: json.encode(flists)
|
||||
}
|
||||
resp := req.do()!
|
||||
return resp.body
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) upload_flist(path string) !os.Result {
|
||||
cmd := "curl -X Post -H 'Authorization: Bearer ${cl.secret}' -F 'file=@${path}' https://${cl.url}/api/flist/me/upload-flist"
|
||||
|
||||
res := os.execute(cmd)
|
||||
return res
|
||||
}
|
||||
|
||||
pub fn (mut cl ZeroHubClient) upload_archive(path string) !os.Result {
|
||||
cmd := "curl -X Post -H 'Authorization: Bearer ${cl.secret}' -F 'file=@${path}' https://${cl.url}/api/flist/me/upload"
|
||||
|
||||
res := os.execute(cmd)
|
||||
return res
|
||||
}
|
||||
18
lib/threefold/grid3/zerohub/readme.md
Normal file
18
lib/threefold/grid3/zerohub/readme.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# ZeroHub
|
||||
|
||||
This is a SAL for the ZeroHub
|
||||
|
||||
The default hub we connect to is https://hub.grid.tf/
|
||||
|
||||
### for developers
|
||||
|
||||
more info see https://github.com/threefoldtech/0-hub#public-api-endpoints-no-authentication-needed
|
||||
|
||||
|
||||
> TODO: implement each endpoint on the zerohub here at client
|
||||
|
||||
|
||||
## Hub Authorization
|
||||
ZeroHub authorized enpoints can be accessed with exporting a jwt in env vars. to do so:
|
||||
- go to https://hub.grid.tf, and on the login section try `Generate API Token`
|
||||
- copy the token you got and `export HUB_JWT=<jwt>`
|
||||
42
lib/threefold/grid3/zerohub/zerohub.v
Normal file
42
lib/threefold/grid3/zerohub/zerohub.v
Normal file
@@ -0,0 +1,42 @@
|
||||
module zerohub
|
||||
|
||||
import net.http
|
||||
|
||||
// import freeflowuniverse.herolib.core.httpconnection
|
||||
|
||||
// TODO: curl -H "Authorization: bearer 6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0=" https://hub.grid.tf/api/flist/
|
||||
|
||||
pub struct ZeroHubClient {
|
||||
pub mut:
|
||||
url string
|
||||
secret string // is called bearer in documentation
|
||||
header http.Header
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ZeroHubClientArgs {
|
||||
pub:
|
||||
url string = 'hub.grid.tf'
|
||||
secret string // is called bearer in documentation
|
||||
}
|
||||
|
||||
// see https://hub.grid.tf/
|
||||
// more info see https://github.com/threefoldtech/0-hub#public-api-endpoints-no-authentication-needed
|
||||
pub fn new(args ZeroHubClientArgs) !ZeroHubClient {
|
||||
// mut conn := httpconnection.new(name:'zerohub', url:'https://${args.url}')
|
||||
|
||||
// TODO: use our caching rest client (httpclient)
|
||||
// example which was working: curl -H "Authorization: bearer ""..." https://hub.grid.tf/api/flist/
|
||||
// see how to get this Authorization bearer to work with our httpclient, certain header to be set.
|
||||
// if args.reset {
|
||||
// //if reset asked for cache will be emptied
|
||||
// conn.cache.cache_drop()!
|
||||
// }
|
||||
|
||||
mut cl := ZeroHubClient{
|
||||
url: args.url
|
||||
secret: args.secret
|
||||
}
|
||||
// TODO: there should be a check here that its accessible
|
||||
return cl
|
||||
}
|
||||
68
lib/threefold/grid3/zerohub/zerohub_test.v
Normal file
68
lib/threefold/grid3/zerohub/zerohub_test.v
Normal file
@@ -0,0 +1,68 @@
|
||||
module zerohub
|
||||
|
||||
import net.http
|
||||
import os
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
const secret = '6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0='
|
||||
|
||||
fn test_main() ? {
|
||||
mut cl := new(secret: secret)!
|
||||
|
||||
// flists := cl.get_flists()!
|
||||
// console.print_debug(flists)
|
||||
|
||||
// repos := cl.get_repos()!
|
||||
// console.print_debug(repos)
|
||||
|
||||
// files := cl.get_files()!
|
||||
// console.print_debug(files)
|
||||
|
||||
// flists := cl.get_repo_flists('omarabdulaziz.3bot')!
|
||||
// console.print_debug(flists)
|
||||
|
||||
// flist_data := cl.get_flist_dump('omarabdulaziz.3bot', 'omarabdul3ziz-obuntu-zinit.flist')!
|
||||
// console.print_debug(flist_data)
|
||||
|
||||
hub_token := os.getenv('HUB_JWT')
|
||||
header_config := http.HeaderConfig{
|
||||
key: http.CommonHeader.authorization
|
||||
value: 'bearer ${hub_token}'
|
||||
}
|
||||
|
||||
cl.header = http.new_header(header_config)
|
||||
cl.secret = hub_token
|
||||
|
||||
// mine := cl.get_me()!
|
||||
// console.print_debug(mine.as_map()["status"])
|
||||
|
||||
// flist := cl.get_my_flist("omarabdul3ziz-forum-docker-v3.1.flist")!
|
||||
// console.print_debug(flist)
|
||||
|
||||
// resp := cl.remove_my_flist("threefolddev-presearch-v2.3.flist")!
|
||||
// console.print_debug(resp)
|
||||
|
||||
// res := cl.symlink("mahmoudemmad-mastodon_after_update-test3.flist", "testsymlink")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.cross_symlink("abdelrad", "0-hub.flist", "testcrosssymlink")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.rename("omarabdul3ziz-forum-docker-v3.1.flist", "renamed")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.promote("abdelrad", "0-hub.flist", "promoted")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.convert("alpine")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.merge_flists( ["omarabdulaziz.3bot/omarabdul3ziz-obuntu-zinit.flist", "omarabdulaziz.3bot/omarabdul3ziz-peertube-v3.1.1.flist"], "merged")!
|
||||
// console.print_debug(res)
|
||||
|
||||
// res := cl.upload_flist("./testup.flist")!
|
||||
// console.print_debug(res)
|
||||
|
||||
res := cl.upload_archive('./alpine.tar.gz')!
|
||||
console.print_debug(res)
|
||||
}
|
||||
Reference in New Issue
Block a user