Merge branch 'development_grid_deploy' into development_actions007
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env -S v -n -w -parallel-cc -enable-globals run
|
||||
// #!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -cg -w -parallel-cc -enable-globals run
|
||||
// #!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
|
||||
0
examples/aitest/atest.vsh
Normal file
0
examples/aitest/atest.vsh
Normal file
86
examples/aitest/dir_listing.vsh
Executable file
86
examples/aitest/dir_listing.vsh
Executable file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
|
||||
// Helper function to format file sizes
|
||||
fn format_size(size i64) string {
|
||||
if size < 1024 {
|
||||
return '${size} B'
|
||||
} else if size < 1024 * 1024 {
|
||||
kb := f64(size) / 1024.0
|
||||
return '${kb:.1f} KB'
|
||||
} else if size < 1024 * 1024 * 1024 {
|
||||
mb := f64(size) / (1024.0 * 1024.0)
|
||||
return '${mb:.1f} MB'
|
||||
} else {
|
||||
gb := f64(size) / (1024.0 * 1024.0 * 1024.0)
|
||||
return '${gb:.1f} GB'
|
||||
}
|
||||
}
|
||||
|
||||
// Set parameters directly in the script
|
||||
// Change these values as needed
|
||||
target_dir := '/tmp' // Current directory by default
|
||||
show_hidden := false // Set to true to show hidden files
|
||||
recursive := false // Set to true for recursive listing
|
||||
|
||||
// Create a Path object for the target directory
|
||||
mut path := pathlib.get(target_dir)
|
||||
|
||||
// Ensure the directory exists and is a directory
|
||||
if path.exist == .no {
|
||||
eprintln('Error: Directory "${target_dir}" does not exist')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if path.cat != .dir && path.cat != .linkdir {
|
||||
eprintln('Error: "${target_dir}" is not a directory')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Main execution
|
||||
println('Listing contents of: ${path.absolute()}')
|
||||
println('----------------------------')
|
||||
|
||||
// Define list arguments
|
||||
mut list_args := pathlib.ListArgs{
|
||||
recursive: recursive,
|
||||
ignoredefault: !show_hidden
|
||||
}
|
||||
|
||||
// Use pathlib to list the directory contents
|
||||
mut list_result := path.list(list_args) or {
|
||||
eprintln('Error listing directory: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Print each file/directory
|
||||
for p in list_result.paths {
|
||||
// Skip the root directory itself
|
||||
if p.path == path.path {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the level based on the path depth relative to the root
|
||||
rel_path := p.path.replace(list_result.root, '')
|
||||
level := rel_path.count('/') - if rel_path.starts_with('/') { 1 } else { 0 }
|
||||
|
||||
// Print indentation based on level
|
||||
if level > 0 {
|
||||
print(' '.repeat(level))
|
||||
}
|
||||
|
||||
// Print file/directory info
|
||||
name := p.name()
|
||||
if p.cat == .dir || p.cat == .linkdir {
|
||||
println('📁 ${name}/')
|
||||
} else {
|
||||
// Get file size
|
||||
file_size := os.file_size(p.path)
|
||||
println('📄 ${name} (${format_size(file_size)})')
|
||||
}
|
||||
}
|
||||
|
||||
println('----------------------------')
|
||||
println('Done!')
|
||||
@@ -49,7 +49,7 @@ pub mut:
|
||||
// GitRepoConfig holds repository-specific configuration options.
|
||||
pub struct GitRepoConfig {
|
||||
pub mut:
|
||||
remote_check_period int = 3600 * 24 * 3 // Seconds to wait between remote checks (0 = check every time), default 3 days
|
||||
remote_check_period int = 3600 * 24 * 7 // Seconds to wait between remote checks (0 = check every time), default 7 days
|
||||
}
|
||||
|
||||
// just some initialization mechanism
|
||||
|
||||
@@ -83,7 +83,8 @@ pub fn (mut repo GitRepo) need_pull() !bool {
|
||||
// Therefore we need to pull
|
||||
return true
|
||||
}
|
||||
return error('Failed to check merge-base: ${err}')
|
||||
return true
|
||||
// return error('Failed to check merge-base: ${err}')
|
||||
}
|
||||
// If we get here, the remote commit is in our history
|
||||
// Therefore we don't need to pull
|
||||
|
||||
@@ -13,6 +13,9 @@ pub fn (mut repo GitRepo) status_update(args StatusUpdateArgs) ! {
|
||||
// Check current time vs last check, if needed (check period) then load
|
||||
repo.cache_get() or { return error('Failed to get cache for repo ${repo.name}: ${err}') } // Ensure we have the situation from redis
|
||||
repo.init() or { return error('Failed to initialize repo ${repo.name}: ${err}') }
|
||||
if 'OFFLINE' !in os.environ() {
|
||||
return
|
||||
}
|
||||
current_time := int(time.now().unix())
|
||||
if args.reload || repo.last_load == 0
|
||||
|| current_time - repo.last_load >= repo.config.remote_check_period {
|
||||
|
||||
@@ -129,17 +129,20 @@ pub fn (mut d Deployment) add_signature(twin u32, signature string) {
|
||||
signature_type: 'sr25519'
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut d Deployment) json_encode() string {
|
||||
mut encoded_workloads := []string{}
|
||||
for mut w in d.workloads {
|
||||
encoded_workloads << w.json_encode()
|
||||
}
|
||||
|
||||
workloads := '[${encoded_workloads.join(',')}]'
|
||||
return '{"version":${d.version},"twin_id":${d.twin_id},"contract_id":${d.contract_id},"expiration":${d.expiration},"metadata":"${d.metadata}","description":"${d.description}","workloads":${workloads},"signature_requirement":${json.encode(d.signature_requirement)}}'
|
||||
return json.encode(d)
|
||||
}
|
||||
|
||||
// pub fn (mut d Deployment) json_encode() string {
|
||||
// mut encoded_workloads := []string{}
|
||||
// for mut w in d.workloads {
|
||||
// encoded_workloads << w.json_encode()
|
||||
// }
|
||||
|
||||
// workloads := '[${encoded_workloads.join(',')}]'
|
||||
// return '{"version":${d.version},"twin_id":${d.twin_id},"contract_id":${d.contract_id},"expiration":${d.expiration},"metadata":"${d.metadata}","description":"${d.description}","workloads":${workloads},"signature_requirement":${json.encode(d.signature_requirement)}}'
|
||||
// }
|
||||
|
||||
pub fn (dl Deployment) count_public_ips() u8 {
|
||||
mut count := u8(0)
|
||||
for wl in dl.workloads {
|
||||
@@ -170,8 +173,12 @@ pub:
|
||||
project_name string @[json: 'projectName']
|
||||
}
|
||||
|
||||
pub fn (data DeploymentData) json_encode() string {
|
||||
return "{\\\"type\\\":\\\"${data.type_}\\\",\\\"name\\\":\\\"${data.name}\\\",\\\"projectName\\\":\\\"${data.project_name}\\\"}"
|
||||
// pub fn (data DeploymentData) json_encode() string {
|
||||
// return "{\\\"type\\\":\\\"${data.type_}\\\",\\\"name\\\":\\\"${data.name}\\\",\\\"projectName\\\":\\\"${data.project_name}\\\"}"
|
||||
// }
|
||||
|
||||
pub fn (mut d Deployment) json_encode() string {
|
||||
return json.encode(d)
|
||||
}
|
||||
|
||||
pub fn (mut dl Deployment) add_metadata(type_ string, project_name string) {
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
module models
|
||||
|
||||
pub struct QuantumSafeFS {
|
||||
cache u64
|
||||
config QuantumSafeFSConfig
|
||||
}
|
||||
|
||||
pub struct QuantumSafeFSConfig {
|
||||
minimal_shards u32
|
||||
expected_shards u32
|
||||
redundant_groups u32
|
||||
redundant_nodes u32
|
||||
max_zdb_data_dir_size u32
|
||||
encryption Encryption
|
||||
meta QuantumSafeMeta
|
||||
goups []ZDBGroup
|
||||
compression QuantumCompression
|
||||
}
|
||||
|
||||
pub struct Encryption {
|
||||
algorithm string = 'AES' // configuration to use for the encryption stage. Currently only AES is supported.
|
||||
key []u8 // 64 long hex encoded encryption key (e.g. 0000000000000000000000000000000000000000000000000000000000000000).
|
||||
}
|
||||
|
||||
pub struct QuantumSafeMeta {
|
||||
type_ string = 'ZDB' @[json: 'type'] // configuration for the metadata store to use, currently only ZDB is supported.
|
||||
config QuantumSafeConfig
|
||||
}
|
||||
|
||||
pub struct ZDBGroup {
|
||||
backends []ZDBBackend
|
||||
}
|
||||
|
||||
pub struct ZDBBackend {
|
||||
address string // Address of backend ZDB (e.g. [300:a582:c60c:df75:f6da:8a92:d5ed:71ad]:9900 or 60.60.60.60:9900).
|
||||
namespace string // ZDB namespace.
|
||||
password string // Namespace password.
|
||||
}
|
||||
|
||||
pub struct QuantumCompression {
|
||||
algorithm string = 'snappy' // configuration to use for the compression stage. Currently only snappy is supported.
|
||||
}
|
||||
|
||||
pub struct QuantumSafeConfig {
|
||||
prefix string // Data stored on the remote metadata is prefixed with.
|
||||
encryption Encryption
|
||||
backends []ZDBBackend
|
||||
}
|
||||
|
||||
pub fn (qsfs QuantumSafeFS) challenge() string {
|
||||
return ''
|
||||
}
|
||||
52
lib/threefold/grid3/models/qsfs_notimplemented.v
Normal file
52
lib/threefold/grid3/models/qsfs_notimplemented.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module models
|
||||
|
||||
// pub struct QuantumSafeFS {
|
||||
// cache u64
|
||||
// config QuantumSafeFSConfig
|
||||
// }
|
||||
|
||||
// pub struct QuantumSafeFSConfig {
|
||||
// minimal_shards u32
|
||||
// expected_shards u32
|
||||
// redundant_groups u32
|
||||
// redundant_nodes u32
|
||||
// max_zdb_data_dir_size u32
|
||||
// encryption Encryption
|
||||
// meta QuantumSafeMeta
|
||||
// goups []ZDBGroup
|
||||
// compression QuantumCompression
|
||||
// }
|
||||
|
||||
// pub struct Encryption {
|
||||
// algorithm string = 'AES' // configuration to use for the encryption stage. Currently only AES is supported.
|
||||
// key []u8 // 64 long hex encoded encryption key (e.g. 0000000000000000000000000000000000000000000000000000000000000000).
|
||||
// }
|
||||
|
||||
// pub struct QuantumSafeMeta {
|
||||
// type_ string = 'ZDB' @[json: 'type'] // configuration for the metadata store to use, currently only ZDB is supported.
|
||||
// config QuantumSafeConfig
|
||||
// }
|
||||
|
||||
// pub struct ZDBGroup {
|
||||
// backends []ZDBBackend
|
||||
// }
|
||||
|
||||
// pub struct ZDBBackend {
|
||||
// address string // Address of backend ZDB (e.g. [300:a582:c60c:df75:f6da:8a92:d5ed:71ad]:9900 or 60.60.60.60:9900).
|
||||
// namespace string // ZDB namespace.
|
||||
// password string // Namespace password.
|
||||
// }
|
||||
|
||||
// pub struct QuantumCompression {
|
||||
// algorithm string = 'snappy' // configuration to use for the compression stage. Currently only snappy is supported.
|
||||
// }
|
||||
|
||||
// pub struct QuantumSafeConfig {
|
||||
// prefix string // Data stored on the remote metadata is prefixed with.
|
||||
// encryption Encryption
|
||||
// backends []ZDBBackend
|
||||
// }
|
||||
|
||||
// pub fn (qsfs QuantumSafeFS) challenge() string {
|
||||
// return ''
|
||||
// }
|
||||
@@ -49,10 +49,10 @@ pub fn challenge(data string, type_ string) !string {
|
||||
mut w := json.decode(Zmachine, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
workload_types.qsfs {
|
||||
mut w := json.decode(QuantumSafeFS, data)!
|
||||
return w.challenge()
|
||||
}
|
||||
// workload_types.qsfs {
|
||||
// mut w := json.decode(QuantumSafeFS, data)!
|
||||
// return w.challenge()
|
||||
// }
|
||||
workload_types.public_ip {
|
||||
mut w := json.decode(PublicIP, data)!
|
||||
return w.challenge()
|
||||
|
||||
@@ -129,11 +129,15 @@ pub:
|
||||
rootfs_size int
|
||||
}
|
||||
|
||||
pub fn (vm VM) json_encode() string {
|
||||
mut env_vars := []string{}
|
||||
for k, v in vm.env_vars {
|
||||
env_vars << '"${k}": "${v}"'
|
||||
}
|
||||
// pub fn (vm VM) json_encode() string {
|
||||
// mut env_vars := []string{}
|
||||
// for k, v in vm.env_vars {
|
||||
// env_vars << '"${k}": "${v}"'
|
||||
// }
|
||||
|
||||
return '{"name":"${vm.name}","flist":"${vm.flist}","entrypoint":"${vm.entrypoint}","env_vars":{${env_vars.join(',')}},"cpu":${vm.cpu},"memory":${vm.memory}, "rootfs_size": ${vm.rootfs_size}}'
|
||||
}
|
||||
// return '{"name":"${vm.name}","flist":"${vm.flist}","entrypoint":"${vm.entrypoint}","env_vars":{${env_vars.join(',')}},"cpu":${vm.cpu},"memory":${vm.memory}, "rootfs_size": ${vm.rootfs_size}}'
|
||||
// }
|
||||
|
||||
pub fn (vm VM) json_encode() string {
|
||||
return json.encode(vm)
|
||||
}
|
||||
@@ -108,10 +108,29 @@ pub fn (z Znet) to_workload(args WorkloadArgs) Workload {
|
||||
}
|
||||
|
||||
pub fn rand_port(takenPorts []u16) !u16 {
|
||||
mut port := u16(rand.u32n(u32(6000))! + 2000)
|
||||
|
||||
for takenPorts.any(it == port) {
|
||||
port = u16(rand.u32n(u32(6000))! + 2000)
|
||||
// Define the port range
|
||||
const min_port = u16(2000)
|
||||
const max_port = u16(8000)
|
||||
const port_range = u32(max_port - min_port)
|
||||
|
||||
// Set a maximum number of attempts to avoid infinite loop
|
||||
const max_attempts = 100
|
||||
|
||||
// Check if there are too many taken ports
|
||||
if takenPorts.len >= int(port_range) {
|
||||
return error('All ports in range ${min_port}-${max_port} are taken')
|
||||
}
|
||||
|
||||
mut attempts := 0
|
||||
mut port := u16(rand.u32n(port_range)! + min_port)
|
||||
|
||||
for takenPorts.any(it == port) {
|
||||
attempts++
|
||||
if attempts >= max_attempts {
|
||||
return error('Failed to find an available port after ${max_attempts} attempts')
|
||||
}
|
||||
port = u16(rand.u32n(port_range)! + min_port)
|
||||
}
|
||||
|
||||
return port
|
||||
}
|
||||
|
||||
41
lib/threefold/grid4/cloudslices/cloudbox.v
Normal file
41
lib/threefold/grid4/cloudslices/cloudbox.v
Normal file
@@ -0,0 +1,41 @@
|
||||
module cloudslices
|
||||
|
||||
import time
|
||||
|
||||
pub struct CloudBox {
|
||||
pub mut:
|
||||
amount int
|
||||
description string
|
||||
storage_gb f64
|
||||
passmark int
|
||||
vcores int
|
||||
mem_gb f64
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
ssd_nr int
|
||||
}
|
||||
|
||||
pub struct AIBox {
|
||||
pub mut:
|
||||
amount int
|
||||
gpu_brand string
|
||||
gpu_version string
|
||||
description string
|
||||
storage_gb f64
|
||||
passmark int
|
||||
vcores int
|
||||
mem_gb f64
|
||||
mem_gb_gpu f64
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
hdd_nr int
|
||||
ssd_nr int
|
||||
}
|
||||
|
||||
pub struct StorageBox {
|
||||
pub mut:
|
||||
amount int
|
||||
description string
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
module cloudslices
|
||||
|
||||
import time
|
||||
|
||||
pub struct Node {
|
||||
pub mut:
|
||||
id int
|
||||
name string
|
||||
cost f64
|
||||
deliverytime time.Time
|
||||
description string
|
||||
cpu_brand string
|
||||
cpu_version string
|
||||
inca_reward int
|
||||
image string
|
||||
mem string
|
||||
hdd string
|
||||
ssd string
|
||||
url string
|
||||
reputation int
|
||||
uptime int // 0..100
|
||||
continent string
|
||||
country string
|
||||
passmark int
|
||||
cloudbox []CloudBox
|
||||
aibox []AIBox
|
||||
storagebox []StorageBox
|
||||
vendor string
|
||||
grant NodeGrant
|
||||
}
|
||||
|
||||
pub struct NodeGrant {
|
||||
pub mut:
|
||||
grant_month_usd string
|
||||
grant_month_inca string
|
||||
grant_max_nrnodes int
|
||||
}
|
||||
|
||||
pub struct CloudBox {
|
||||
pub mut:
|
||||
amount int
|
||||
description string
|
||||
storage_gb f64
|
||||
passmark int
|
||||
vcores int
|
||||
mem_gb f64
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
ssd_nr int
|
||||
}
|
||||
|
||||
pub struct AIBox {
|
||||
pub mut:
|
||||
amount int
|
||||
gpu_brand string
|
||||
gpu_version string
|
||||
description string
|
||||
storage_gb f64
|
||||
passmark int
|
||||
vcores int
|
||||
mem_gb f64
|
||||
mem_gb_gpu f64
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
hdd_nr int
|
||||
ssd_nr int
|
||||
}
|
||||
|
||||
pub struct StorageBox {
|
||||
pub mut:
|
||||
amount int
|
||||
description string
|
||||
price_range []f64 = [0.0, 0.0]
|
||||
price_simulation f64
|
||||
}
|
||||
|
||||
fn (mut n Node) validate_percentage(v int) ! {
|
||||
if v < 0 || v > 100 {
|
||||
return error('Value must be between 0 and 100')
|
||||
}
|
||||
}
|
||||
|
||||
pub fn preprocess_value(v string) string {
|
||||
// Implement the preprocessing logic here
|
||||
return v
|
||||
}
|
||||
|
||||
pub fn (mut n Node) preprocess_location(v string) ! {
|
||||
n.continent = preprocess_value(v)
|
||||
n.country = preprocess_value(v)
|
||||
}
|
||||
|
||||
// pub fn (mut n Node) parse_deliverytime(v string) ! {
|
||||
// n.deliverytime = time.parse(v, 'YYYY-MM-DD')!
|
||||
// }
|
||||
@@ -2,74 +2,64 @@ module cloudslices
|
||||
|
||||
import time
|
||||
|
||||
// NodeTotal represents the aggregated data for a node, including hardware specifications, pricing, and location details.
|
||||
pub struct NodeTotal {
|
||||
pub mut:
|
||||
id int
|
||||
name string
|
||||
cost f64
|
||||
deliverytime time.Time
|
||||
description string
|
||||
cpu_brand string
|
||||
cpu_version string
|
||||
inca_reward int
|
||||
image string
|
||||
mem string
|
||||
hdd string
|
||||
ssd string
|
||||
url string
|
||||
reputation int
|
||||
uptime int
|
||||
continent string
|
||||
country string
|
||||
|
||||
storage_gb f64
|
||||
mem_gb f64
|
||||
mem_gb_gpu f64
|
||||
price_simulation f64
|
||||
passmark int
|
||||
vcores int
|
||||
pub mut:
|
||||
id int // Unique identifier for the node
|
||||
cost f64 // Total cost of the node
|
||||
deliverytime time.Time // Expected delivery time
|
||||
inca_reward int // Incentive reward for the node
|
||||
reputation int // Reputation score of the node
|
||||
uptime int // Uptime percentage
|
||||
price_simulation f64 // Simulated price for the node
|
||||
info NodeInfo // Descriptive information about the node
|
||||
capacity NodeCapacity // Hardware capacity details
|
||||
}
|
||||
|
||||
// node_total calculates the total values for storage, memory, price simulation, passmark, and vcores by summing up the contributions from different types of boxes.
|
||||
pub fn (n Node) node_total() NodeTotal {
|
||||
mut total := NodeTotal{
|
||||
id: n.id
|
||||
name: n.name
|
||||
cost: n.cost
|
||||
deliverytime: n.deliverytime
|
||||
description: n.description
|
||||
cpu_brand: n.cpu_brand
|
||||
cpu_version: n.cpu_version
|
||||
inca_reward: n.inca_reward
|
||||
image: n.image
|
||||
mem: n.mem
|
||||
hdd: n.hdd
|
||||
ssd: n.ssd
|
||||
url: n.url
|
||||
reputation: n.reputation
|
||||
uptime: n.uptime
|
||||
continent: n.continent
|
||||
country: n.country
|
||||
}
|
||||
for box in n.cloudbox {
|
||||
total.storage_gb += box.storage_gb * f64(box.amount)
|
||||
total.mem_gb += box.mem_gb * f64(box.amount)
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
total.passmark += box.passmark * box.amount
|
||||
total.vcores += box.vcores * box.amount
|
||||
}
|
||||
mut total := NodeTotal{
|
||||
id: n.id
|
||||
cost: n.cost
|
||||
deliverytime: n.deliverytime
|
||||
inca_reward: n.inca_reward
|
||||
reputation: n.reputation
|
||||
uptime: n.uptime
|
||||
info: NodeInfo{
|
||||
name: n.name
|
||||
description: n.description
|
||||
cpu_brand: n.cpu_brand
|
||||
cpu_version: n.cpu_version
|
||||
image: n.image
|
||||
mem: n.mem
|
||||
hdd: n.hdd
|
||||
ssd: n.ssd
|
||||
url: n.url
|
||||
continent: n.continent
|
||||
country: n.country
|
||||
},
|
||||
capacity: NodeCapacity{}
|
||||
}
|
||||
for box in n.cloudbox {
|
||||
total.capacity.storage_gb += box.storage_gb * f64(box.amount)
|
||||
total.capacity.mem_gb += box.mem_gb * f64(box.amount)
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
total.capacity.passmark += box.passmark * box.amount
|
||||
total.capacity.vcores += box.vcores * box.amount
|
||||
}
|
||||
|
||||
for box in n.aibox {
|
||||
total.storage_gb += box.storage_gb * f64(box.amount)
|
||||
total.mem_gb += box.mem_gb * f64(box.amount)
|
||||
total.mem_gb_gpu += box.mem_gb_gpu * f64(box.amount)
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
total.passmark += box.passmark * box.amount
|
||||
total.vcores += box.vcores * box.amount
|
||||
}
|
||||
for box in n.aibox {
|
||||
total.capacity.storage_gb += box.storage_gb * f64(box.amount)
|
||||
total.capacity.mem_gb += box.mem_gb * f64(box.amount)
|
||||
total.capacity.mem_gb_gpu += box.mem_gb_gpu * f64(box.amount)
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
total.capacity.passmark += box.passmark * box.amount
|
||||
total.capacity.vcores += box.vcores * box.amount
|
||||
}
|
||||
|
||||
for box in n.storagebox {
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
}
|
||||
for box in n.storagebox {
|
||||
total.price_simulation += box.price_simulation * f64(box.amount)
|
||||
}
|
||||
|
||||
return total
|
||||
return total
|
||||
}
|
||||
|
||||
73
lib/threefold/grid4/cloudslices/model_node.v
Normal file
73
lib/threefold/grid4/cloudslices/model_node.v
Normal file
@@ -0,0 +1,73 @@
|
||||
module cloudslices
|
||||
|
||||
import time
|
||||
|
||||
pub struct Node {
|
||||
pub mut:
|
||||
id int
|
||||
cost f64
|
||||
deliverytime time.Time
|
||||
inca_reward int
|
||||
reputation int
|
||||
uptime int // 0..100
|
||||
cloudbox []CloudBox
|
||||
aibox []AIBox
|
||||
storagebox []StorageBox
|
||||
vendor string
|
||||
grant NodeGrant
|
||||
info NodeInfo // Descriptive information about the node
|
||||
capacity NodeCapacity // Hardware capacity details
|
||||
}
|
||||
|
||||
|
||||
// NodeInfo represents the descriptive information about a node.
|
||||
pub struct NodeInfo {
|
||||
pub mut:
|
||||
cpu_brand string // Brand of the CPU
|
||||
cpu_version string // Version of the CPU
|
||||
mem string // Memory specification
|
||||
hdd string // HDD specification
|
||||
ssd string // SSD specification
|
||||
url string // URL for more information
|
||||
continent string // Continent where the node is located
|
||||
country string // Country where the node is located
|
||||
}
|
||||
|
||||
// NodeCapacity represents the hardware capacity details of a node.
|
||||
pub struct NodeCapacity {
|
||||
pub mut:
|
||||
storage_gb f64 // Total storage in gigabytes
|
||||
mem_gb f64 // Total memory in gigabytes
|
||||
mem_gb_gpu f64 // Total GPU memory in gigabytes
|
||||
passmark int // Passmark score for the node
|
||||
vcores int // Total virtual cores
|
||||
}
|
||||
|
||||
|
||||
pub struct NodeGrant {
|
||||
pub mut:
|
||||
grant_month_usd string
|
||||
grant_month_inca string
|
||||
grant_max_nrnodes int
|
||||
}
|
||||
|
||||
|
||||
fn (mut n Node) validate_percentage(v int) ! {
|
||||
if v < 0 || v > 100 {
|
||||
return error('Value must be between 0 and 100')
|
||||
}
|
||||
}
|
||||
|
||||
pub fn preprocess_value(v string) string {
|
||||
// Implement the preprocessing logic here
|
||||
return v
|
||||
}
|
||||
|
||||
pub fn (mut n Node) preprocess_location(v string) ! {
|
||||
n.info.continent = preprocess_value(v)
|
||||
n.info.country = preprocess_value(v)
|
||||
}
|
||||
|
||||
// pub fn (mut n Node) parse_deliverytime(v string) ! {
|
||||
// n.deliverytime = time.parse(v, 'YYYY-MM-DD')!
|
||||
// }
|
||||
12
lib/threefold/grid4/cloudslices/model_node_template.v
Normal file
12
lib/threefold/grid4/cloudslices/model_node_template.v
Normal file
@@ -0,0 +1,12 @@
|
||||
module cloudslices
|
||||
|
||||
import time
|
||||
|
||||
pub struct NodeTemplate {
|
||||
Node
|
||||
pub mut:
|
||||
name string
|
||||
description string // Description of the node
|
||||
image_url string // Image url associated with the node
|
||||
|
||||
}
|
||||
@@ -30,7 +30,6 @@ fn obj_init(mycfg_ CaddyServer) !CaddyServer {
|
||||
// user needs to us switch to make sure we get the right object
|
||||
fn configure() ! {
|
||||
mut cfg := get()!
|
||||
|
||||
if !os.exists('/etc/caddy/Caddyfile') {
|
||||
// set the default caddyfile
|
||||
configure_examples(path: cfg.path)!
|
||||
|
||||
Reference in New Issue
Block a user