feat(tfgrid3deployer): add delete method for deployments

- add a delete method for deployments which deletes all deployment
  contracts
- improve logging messages
- update examples according to changes
This commit is contained in:
2025-01-15 13:00:04 +02:00
parent dc47c81b0c
commit 1da8a2d319
7 changed files with 61 additions and 31 deletions

View File

@@ -12,13 +12,4 @@ v := tfgrid3deployer.get()!
println('cred: ${v}') println('cred: ${v}')
deployment_name := 'vm_caddy1' deployment_name := 'vm_caddy1'
mut deployment := tfgrid3deployer.get_deployment(deployment_name)! tfgrid3deployer.delete_deployment(deployment_name)!
deployment.remove_machine('vm_caddy1')!
deployment.deploy()!
os.rm('${os.home_dir()}/hero/db/0/session_deployer/${deployment_name}')!
deployment_name2 := 'vm_caddy_gw'
mut deployment2 := tfgrid3deployer.get_deployment(deployment_name2)!
deployment2.remove_webname('gwnamecaddy')!
deployment2.deploy()!
os.rm('${os.home_dir()}/hero/db/0/session_deployer/${deployment_name2}')!

View File

@@ -29,12 +29,9 @@ println('vm1 info: ${vm1}')
vm1_public_ip4 := vm1.public_ip4.all_before('/') vm1_public_ip4 := vm1.public_ip4.all_before('/')
deployment_name2 := 'vm_caddy_gw' deployment.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80')
mut deployment2 := tfgrid3deployer.new_deployment(deployment_name2)! deployment.deploy()!
deployment2.add_webname(name: 'gwnamecaddy', backend: 'http://${vm1_public_ip4}:80') gw1 := deployment.webname_get('gwnamecaddy')!
deployment2.deploy()!
gw1 := deployment2.webname_get('gwnamecaddy')!
println('gw info: ${gw1}') println('gw info: ${gw1}')
// Retry logic to wait for the SSH server to be up // Retry logic to wait for the SSH server to be up

View File

@@ -146,7 +146,7 @@ pub fn (mut db DB) set(args_ SetArgs) !u32 {
args.id = db.parent.incr()! args.id = db.parent.incr()!
pathsrc = db.path_get(args.id)! pathsrc = db.path_get(args.id)!
} }
console.print_debug('keydb ${pathsrc}')
if db.config.encrypted { if db.config.encrypted {
args.valueb = aes_symmetric.encrypt(args.valueb, db.secret()!) args.valueb = aes_symmetric.encrypt(args.valueb, db.secret()!)
pathsrc.write(base64.encode(args.valueb))! pathsrc.write(base64.encode(args.valueb))!

View File

@@ -75,12 +75,35 @@ pub fn get_deployment(name string) !TFDeployment {
return dl return dl
} }
pub fn delete_deployment(name string) ! {
mut deployer := get_deployer()!
mut dl := TFDeployment{
name: name
kvstore: KVStoreFS{}
deployer: &deployer
}
dl.load() or { return error('Faild to load the deployment due to: ${err}') }
console.print_header('Current deployment contracts: ${dl.contracts}')
mut contracts := []u64{}
contracts << dl.contracts.name
contracts << dl.contracts.node.values()
contracts << dl.contracts.rent.values()
dl.deployer.client.batch_cancel_contracts(contracts)!
console.print_header('Deployment contracts are canceled successfully.')
dl.kvstore.delete(dl.name)!
console.print_header('Deployment is deleted successfully.')
}
pub fn (mut self TFDeployment) deploy() ! { pub fn (mut self TFDeployment) deploy() ! {
console.print_header('Starting deployment process.') console.print_header('Starting deployment process.')
self.set_nodes()! self.set_nodes()!
old_deployment := self.list_deployments()! old_deployment := self.list_deployments()!
println('old_deployment ${old_deployment}') console.print_header('old contract ids: ${old_deployment.keys()}')
mut setup := new_deployment_setup(self.network, self.vms, self.zdbs, self.webnames, mut setup := new_deployment_setup(self.network, self.vms, self.zdbs, self.webnames,
old_deployment, mut self.deployer)! old_deployment, mut self.deployer)!
@@ -92,6 +115,10 @@ pub fn (mut self TFDeployment) deploy() ! {
fn (mut self TFDeployment) set_nodes() ! { fn (mut self TFDeployment) set_nodes() ! {
for mut vm in self.vms { for mut vm in self.vms {
if vm.node_id != 0 {
continue
}
mut node_ids := []u64{} mut node_ids := []u64{}
for node_id in vm.requirements.nodes { for node_id in vm.requirements.nodes {
@@ -122,6 +149,10 @@ fn (mut self TFDeployment) set_nodes() ! {
} }
for mut zdb in self.zdbs { for mut zdb in self.zdbs {
if zdb.node_id != 0 {
continue
}
nodes := filter_nodes( nodes := filter_nodes(
free_sru: convert_to_gigabytes(u64(zdb.requirements.size)) free_sru: convert_to_gigabytes(u64(zdb.requirements.size))
status: 'up' status: 'up'
@@ -138,6 +169,10 @@ fn (mut self TFDeployment) set_nodes() ! {
} }
for mut webname in self.webnames { for mut webname in self.webnames {
if webname.node_id != 0 {
continue
}
nodes := filter_nodes( nodes := filter_nodes(
domain: true domain: true
status: 'up' status: 'up'
@@ -205,7 +240,6 @@ fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! {
} }
if create_name_contracts.len > 0 || create_deployments.len > 0 { if create_name_contracts.len > 0 || create_deployments.len > 0 {
console.print_header('Attempting batch deployment')
created_name_contracts_map, ret_dls := self.deployer.batch_deploy(create_name_contracts, mut created_name_contracts_map, ret_dls := self.deployer.batch_deploy(create_name_contracts, mut
create_deployments, none)! create_deployments, none)!

View File

@@ -90,6 +90,11 @@ fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, old_deployme
// Returns: // Returns:
// - None // - None
fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! { fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
if machines.len == 0 {
return
}
console.print_header('Preparing Zmachine workloads.')
mut used_ip_octets := map[u32][]u8{} mut used_ip_octets := map[u32][]u8{}
for machine in machines { for machine in machines {
mut req := machine.requirements mut req := machine.requirements
@@ -100,7 +105,6 @@ fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
self.set_public_ip_workload(machine.node_id, public_ip_name, req)! self.set_public_ip_workload(machine.node_id, public_ip_name, req)!
} }
console.print_header('Creating Zmachine workload.')
self.set_zmachine_workload(machine, public_ip_name, mut used_ip_octets)! self.set_zmachine_workload(machine, public_ip_name, mut used_ip_octets)!
} }
} }
@@ -114,10 +118,14 @@ fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! {
// //
// Each ZDB is processed to convert the requirements into a grid workload and associated with a healthy node. // Each ZDB is processed to convert the requirements into a grid workload and associated with a healthy node.
fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! { fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! {
if zdbs.len == 0 {
return
}
console.print_header('Preparing ZDB workloads.')
for zdb in zdbs { for zdb in zdbs {
// Retrieve ZDB requirements from the result // Retrieve ZDB requirements from the result
mut req := zdb.requirements mut req := zdb.requirements
console.print_header('Creating a ZDB workload for `${req.name}` DB.')
// Create the Zdb model with the size converted to bytes // Create the Zdb model with the size converted to bytes
zdb_model := grid_models.Zdb{ zdb_model := grid_models.Zdb{
@@ -150,6 +158,11 @@ fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! {
// Returns: // Returns:
// - None // - None
fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! { fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! {
if webnames.len == 0 {
return
}
console.print_header('Preparing WebName workloads.')
for wn in webnames { for wn in webnames {
req := wn.requirements req := wn.requirements
@@ -238,7 +251,7 @@ fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip
// - public_ip_name: Name of the public IP to assign to the workload // - public_ip_name: Name of the public IP to assign to the workload
fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name string, vm VMRequirements) ! { fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name string, vm VMRequirements) ! {
// Add the public IP workload // Add the public IP workload
console.print_header('Creating Public IP workload.') console.print_header('Preparing Public IP workload for node ${node_id}.')
public_ip_workload := grid_models.PublicIP{ public_ip_workload := grid_models.PublicIP{
v4: vm.public_ip4 v4: vm.public_ip4
v6: vm.public_ip6 v6: vm.public_ip6
@@ -257,7 +270,6 @@ fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name
// Throws: // Throws:
// - Error if failed to assign a private IP in the subnet // - Error if failed to assign a private IP in the subnet
fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets map[u32][]u8) !string { fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets map[u32][]u8) !string {
console.print_header('Assign private IP to node ${node_id}.')
ip := self.network_handler.wg_subnet[node_id].split('/')[0] ip := self.network_handler.wg_subnet[node_id].split('/')[0]
mut split_ip := ip.split('.') mut split_ip := ip.split('.')
last_octet := ip.split('.').last().u8() last_octet := ip.split('.').last().u8()
@@ -268,7 +280,6 @@ fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets
split_ip[3] = '${candidate}' split_ip[3] = '${candidate}'
used_ip_octets[node_id] << candidate used_ip_octets[node_id] << candidate
ip_ := split_ip.join('.') ip_ := split_ip.join('.')
console.print_header('Private IP Assigned: ${ip_}.')
return ip_ return ip_
} }
return error('failed to assign private IP in subnet: ${self.network_handler.wg_subnet[node_id]}') return error('failed to assign private IP in subnet: ${self.network_handler.wg_subnet[node_id]}')

View File

@@ -26,4 +26,8 @@ fn (kvs KVStoreFS) get(key string) ![]u8 {
} }
fn (kvs KVStoreFS) delete(key string) ! { fn (kvs KVStoreFS) delete(key string) ! {
mut mycontext := context.context_new()!
mut session := mycontext.session_new(name: 'deployer')!
mut db := session.db_get()!
db.delete(key: key) or { return error('Cannot set the key due to: ${err}') }
} }

View File

@@ -169,17 +169,10 @@ fn (mut self NetworkHandler) setup_wireguard_data() ! {
} }
self.wg_ports[node_id] = self.deployer.assign_wg_port(node_id)! self.wg_ports[node_id] = self.deployer.assign_wg_port(node_id)!
console.print_header('Assign Wireguard port for node ${node_id}.')
console.print_header('Generate Wireguard keys for node ${node_id}.')
self.wg_keys[node_id] = self.deployer.client.generate_wg_priv_key()! self.wg_keys[node_id] = self.deployer.client.generate_wg_priv_key()!
console.print_header('Wireguard keys for node ${node_id} are ${self.wg_keys[node_id]}.')
console.print_header('Calculate subnet for node ${node_id}.')
self.wg_subnet[node_id] = self.calculate_subnet()! self.wg_subnet[node_id] = self.calculate_subnet()!
console.print_header('Node ${node_id} subnet is ${self.wg_subnet[node_id]}.')
console.print_header('Node ${node_id} public config ${public_config}.')
if public_config.ipv4.len != 0 { if public_config.ipv4.len != 0 {
self.endpoints[node_id] = public_config.ipv4.split('/')[0] self.endpoints[node_id] = public_config.ipv4.split('/')[0]