Merge branch 'development' of https://github.com/freeflowuniverse/herolib into development
This commit is contained in:
@@ -7,7 +7,7 @@ pub struct BuilderFactory {
|
||||
}
|
||||
|
||||
pub fn new() !BuilderFactory {
|
||||
mut c := base.context()!
|
||||
_ := base.context()!
|
||||
mut bf := BuilderFactory{}
|
||||
return bf
|
||||
}
|
||||
|
||||
@@ -4,7 +4,10 @@ import freeflowuniverse.herolib.data.ipaddress
|
||||
|
||||
// get node connection to local machine
|
||||
pub fn (mut bldr BuilderFactory) node_local() !&Node {
|
||||
return bldr.node_new(name: 'localhost')
|
||||
return bldr.node_new(
|
||||
name: 'localhost'
|
||||
ipaddr: '127.0.0.1'
|
||||
)
|
||||
}
|
||||
|
||||
// format ipaddr: localhost:7777 .
|
||||
@@ -64,7 +67,6 @@ pub fn (mut bldr BuilderFactory) node_new(args_ NodeArguments) !&Node {
|
||||
mut iadd := ipaddress.new(args.ipaddr)!
|
||||
node.name = iadd.toname()!
|
||||
}
|
||||
|
||||
wasincache := node.load()!
|
||||
|
||||
if wasincache && args.reload {
|
||||
|
||||
8
lib/clients/ipapi/.heroscript
Normal file
8
lib/clients/ipapi/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'ipapi'
|
||||
classname:'IPApi'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
29
lib/clients/ipapi/client.v
Normal file
29
lib/clients/ipapi/client.v
Normal file
@@ -0,0 +1,29 @@
|
||||
module ipapi
|
||||
|
||||
import json
|
||||
|
||||
pub struct IPInfo {
|
||||
pub:
|
||||
query string
|
||||
status string
|
||||
country string
|
||||
country_code string @[json: 'countryCode']
|
||||
region string
|
||||
region_name string @[json: 'regionName']
|
||||
city string
|
||||
zip string
|
||||
lat f32
|
||||
lon f32
|
||||
timezone string
|
||||
isp string
|
||||
org string
|
||||
as string
|
||||
}
|
||||
|
||||
pub fn (mut a IPApi) get_ip_info(ip string) !IPInfo {
|
||||
mut conn := a.connection()!
|
||||
res := conn.get_json(prefix: 'json/${ip}')!
|
||||
info := json.decode(IPInfo, res)!
|
||||
|
||||
return info
|
||||
}
|
||||
102
lib/clients/ipapi/ipapi_factory_.v
Normal file
102
lib/clients/ipapi/ipapi_factory_.v
Normal file
@@ -0,0 +1,102 @@
|
||||
module ipapi
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
__global (
|
||||
ipapi_global map[string]&IPApi
|
||||
ipapi_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = ipapi_default
|
||||
}
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&IPApi {
|
||||
mut args := args_get(args_)
|
||||
if args.name !in ipapi_global {
|
||||
if args.name == 'default' {
|
||||
if !config_exists(args) {
|
||||
if default {
|
||||
config_save(args)!
|
||||
}
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
}
|
||||
return ipapi_global[args.name] or {
|
||||
println(ipapi_global)
|
||||
panic('could not get config for ipapi with name:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('ipapi', args.name)
|
||||
}
|
||||
|
||||
fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('ipapi', args.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_set('ipapi', args.name, heroscript_default()!)!
|
||||
}
|
||||
|
||||
fn set(o IPApi) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
ipapi_global[o.name] = &o2
|
||||
ipapi_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
if args.heroscript == '' {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'ipapi.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for ipapi
|
||||
pub fn switch(name string) {
|
||||
ipapi_default = name
|
||||
}
|
||||
58
lib/clients/ipapi/ipapi_model.v
Normal file
58
lib/clients/ipapi/ipapi_model.v
Normal file
@@ -0,0 +1,58 @@
|
||||
module ipapi
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
pub const version = '1.14.3'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT
|
||||
pub fn heroscript_default() !string {
|
||||
heroscript := "
|
||||
!!ipapi.configure
|
||||
name:'default'
|
||||
"
|
||||
|
||||
return heroscript
|
||||
}
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
|
||||
@[heap]
|
||||
pub struct IPApi {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
|
||||
conn ?&httpconnection.HTTPConnection @[str: skip]
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := IPApi{
|
||||
name: p.get_default('name', 'default')!
|
||||
}
|
||||
set(mycfg)!
|
||||
}
|
||||
|
||||
fn obj_init(obj_ IPApi) !IPApi {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
return obj
|
||||
}
|
||||
|
||||
pub fn (mut client IPApi) connection() !&httpconnection.HTTPConnection {
|
||||
mut c := client.conn or {
|
||||
mut c2 := httpconnection.new(
|
||||
name: 'ipapi_${client.name}'
|
||||
url: 'http://ip-api.com'
|
||||
cache: false
|
||||
retry: 20
|
||||
)!
|
||||
c2
|
||||
}
|
||||
|
||||
client.conn = c
|
||||
return c
|
||||
}
|
||||
30
lib/clients/ipapi/readme.md
Normal file
30
lib/clients/ipapi/readme.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# ipapi
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.clients. ipapi
|
||||
|
||||
mut client:= ipapi.get()!
|
||||
|
||||
client...
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!ipapi.configure
|
||||
secret: '...'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
```
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ module livekit
|
||||
// App struct with `livekit.Client`, API keys, and other shared data
|
||||
pub struct Client {
|
||||
pub:
|
||||
url string @[required]
|
||||
api_key string @[required]
|
||||
api_secret string @[required]
|
||||
url string @[required]
|
||||
api_key string @[required]
|
||||
api_secret string @[required]
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
|
||||
module livekit
|
||||
|
||||
pub fn new(client Client) Client {
|
||||
return Client{...client}
|
||||
}
|
||||
return Client{
|
||||
...client
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,47 +5,46 @@ import json
|
||||
|
||||
@[params]
|
||||
pub struct ListRoomsParams {
|
||||
names []string
|
||||
names []string
|
||||
}
|
||||
|
||||
pub struct ListRoomsResponse {
|
||||
pub:
|
||||
rooms []Room
|
||||
rooms []Room
|
||||
}
|
||||
|
||||
pub fn (c Client) list_rooms(params ListRoomsParams) !ListRoomsResponse {
|
||||
// Prepare request body
|
||||
request := params
|
||||
request_json := json.encode(request)
|
||||
// Prepare request body
|
||||
request := params
|
||||
request_json := json.encode(request)
|
||||
|
||||
|
||||
// create token and give grant to list rooms
|
||||
// create token and give grant to list rooms
|
||||
mut token := c.new_access_token()!
|
||||
token.grants.video.room_list = true
|
||||
token.grants.video.room_list = true
|
||||
|
||||
// make POST request
|
||||
url := '${c.url}/twirp/livekit.RoomService/ListRooms'
|
||||
// Configure HTTP request
|
||||
mut headers := http.new_header_from_map({
|
||||
http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}',
|
||||
http.CommonHeader.content_type: 'application/json'
|
||||
})
|
||||
// make POST request
|
||||
url := '${c.url}/twirp/livekit.RoomService/ListRooms'
|
||||
// Configure HTTP request
|
||||
mut headers := http.new_header_from_map({
|
||||
http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}'
|
||||
http.CommonHeader.content_type: 'application/json'
|
||||
})
|
||||
|
||||
response := http.fetch(http.FetchConfig{
|
||||
url: url
|
||||
method: .post
|
||||
header: headers
|
||||
data: request_json
|
||||
})!
|
||||
response := http.fetch(http.FetchConfig{
|
||||
url: url
|
||||
method: .post
|
||||
header: headers
|
||||
data: request_json
|
||||
})!
|
||||
|
||||
if response.status_code != 200 {
|
||||
return error('Failed to list rooms: $response.status_code')
|
||||
}
|
||||
if response.status_code != 200 {
|
||||
return error('Failed to list rooms: ${response.status_code}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
rooms_response := json.decode(ListRoomsResponse, response.body) or {
|
||||
return error('Failed to parse response: $err')
|
||||
}
|
||||
|
||||
return rooms_response
|
||||
// Parse response
|
||||
rooms_response := json.decode(ListRoomsResponse, response.body) or {
|
||||
return error('Failed to parse response: ${err}')
|
||||
}
|
||||
|
||||
return rooms_response
|
||||
}
|
||||
|
||||
@@ -5,29 +5,29 @@ import json
|
||||
|
||||
pub struct Codec {
|
||||
pub:
|
||||
fmtp_line string
|
||||
mime string
|
||||
fmtp_line string
|
||||
mime string
|
||||
}
|
||||
|
||||
pub struct Version {
|
||||
pub:
|
||||
ticks u64
|
||||
unix_micro string
|
||||
ticks u64
|
||||
unix_micro string
|
||||
}
|
||||
|
||||
pub struct Room {
|
||||
pub:
|
||||
active_recording bool
|
||||
creation_time string
|
||||
departure_timeout int
|
||||
empty_timeout int
|
||||
enabled_codecs []Codec
|
||||
max_participants int
|
||||
metadata string
|
||||
name string
|
||||
num_participants int
|
||||
num_publishers int
|
||||
sid string
|
||||
turn_password string
|
||||
version Version
|
||||
}
|
||||
active_recording bool
|
||||
creation_time string
|
||||
departure_timeout int
|
||||
empty_timeout int
|
||||
enabled_codecs []Codec
|
||||
max_participants int
|
||||
metadata string
|
||||
name string
|
||||
num_participants int
|
||||
num_publishers int
|
||||
sid string
|
||||
turn_password string
|
||||
version Version
|
||||
}
|
||||
|
||||
@@ -6,20 +6,20 @@ import freeflowuniverse.herolib.osal
|
||||
const env_file = '${os.dir(@FILE)}/.env'
|
||||
|
||||
fn testsuite_begin() ! {
|
||||
if os.exists(env_file) {
|
||||
osal.load_env_file(env_file)!
|
||||
}
|
||||
if os.exists(env_file) {
|
||||
osal.load_env_file(env_file)!
|
||||
}
|
||||
}
|
||||
|
||||
fn new_test_client() Client {
|
||||
return new(
|
||||
url: os.getenv('LIVEKIT_URL')
|
||||
api_key: os.getenv('LIVEKIT_API_KEY')
|
||||
api_secret: os.getenv('LIVEKIT_API_SECRET')
|
||||
)
|
||||
return new(
|
||||
url: os.getenv('LIVEKIT_URL')
|
||||
api_key: os.getenv('LIVEKIT_API_KEY')
|
||||
api_secret: os.getenv('LIVEKIT_API_SECRET')
|
||||
)
|
||||
}
|
||||
|
||||
fn test_client_list_rooms() ! {
|
||||
client := new_test_client()
|
||||
rooms := client.list_rooms()!
|
||||
client := new_test_client()
|
||||
rooms := client.list_rooms()!
|
||||
}
|
||||
|
||||
@@ -10,25 +10,25 @@ import json
|
||||
// Define AccessTokenOptions struct
|
||||
@[params]
|
||||
pub struct AccessTokenOptions {
|
||||
pub mut:
|
||||
ttl int = 21600// TTL in seconds
|
||||
name string // Display name for the participant
|
||||
identity string // Identity of the user
|
||||
metadata string // Custom metadata to be passed to participants
|
||||
pub mut:
|
||||
ttl int = 21600 // TTL in seconds
|
||||
name string // Display name for the participant
|
||||
identity string // Identity of the user
|
||||
metadata string // Custom metadata to be passed to participants
|
||||
}
|
||||
|
||||
// Constructor for AccessToken
|
||||
pub fn (client Client) new_access_token(options AccessTokenOptions) !AccessToken {
|
||||
return AccessToken{
|
||||
api_key: client.api_key
|
||||
api_key: client.api_key
|
||||
api_secret: client.api_secret
|
||||
identity: options.identity
|
||||
ttl: options.ttl
|
||||
grants: ClaimGrants{
|
||||
exp: time.now().unix()+ options.ttl
|
||||
iss: client.api_key
|
||||
sub: options.name
|
||||
identity: options.identity
|
||||
ttl: options.ttl
|
||||
grants: ClaimGrants{
|
||||
exp: time.now().unix() + options.ttl
|
||||
iss: client.api_key
|
||||
sub: options.name
|
||||
name: options.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,23 +10,23 @@ import json
|
||||
// Struct representing grants
|
||||
pub struct ClaimGrants {
|
||||
pub mut:
|
||||
video VideoGrant
|
||||
iss string
|
||||
exp i64
|
||||
nbf int
|
||||
sub string
|
||||
name string
|
||||
video VideoGrant
|
||||
iss string
|
||||
exp i64
|
||||
nbf int
|
||||
sub string
|
||||
name string
|
||||
}
|
||||
|
||||
// VideoGrant struct placeholder
|
||||
pub struct VideoGrant {
|
||||
pub mut:
|
||||
room string
|
||||
room_join bool @[json: 'roomJoin']
|
||||
room_list bool @[json: 'roomList']
|
||||
can_publish bool @[json: 'canPublish']
|
||||
can_publish_data bool @[json: 'canPublishData']
|
||||
can_subscribe bool @[json: 'canSubscribe']
|
||||
room string
|
||||
room_join bool @[json: 'roomJoin']
|
||||
room_list bool @[json: 'roomList']
|
||||
can_publish bool @[json: 'canPublish']
|
||||
can_publish_data bool @[json: 'canPublishData']
|
||||
can_subscribe bool @[json: 'canSubscribe']
|
||||
}
|
||||
|
||||
// SIPGrant struct placeholder
|
||||
@@ -34,12 +34,12 @@ struct SIPGrant {}
|
||||
|
||||
// AccessToken class
|
||||
pub struct AccessToken {
|
||||
mut:
|
||||
api_key string
|
||||
api_secret string
|
||||
grants ClaimGrants
|
||||
identity string
|
||||
ttl int
|
||||
mut:
|
||||
api_key string
|
||||
api_secret string
|
||||
grants ClaimGrants
|
||||
identity string
|
||||
ttl int
|
||||
}
|
||||
|
||||
// Method to add a video grant to the token
|
||||
@@ -65,7 +65,8 @@ pub fn (token AccessToken) to_jwt() !string {
|
||||
unsigned_token := '${header_encoded}.${payload_encoded}'
|
||||
|
||||
// Create the HMAC-SHA256 signature
|
||||
signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
|
||||
signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum,
|
||||
sha256.block_size)
|
||||
|
||||
// Encode the signature in base64
|
||||
signature_encoded := base64.url_encode(signature)
|
||||
@@ -73,4 +74,4 @@ pub fn (token AccessToken) to_jwt() !string {
|
||||
// Create the final JWT
|
||||
jwt := '${unsigned_token}.${signature_encoded}'
|
||||
return jwt
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
module mailclient
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
// import freeflowuniverse.herolib.core.playbook
|
||||
|
||||
// __global (
|
||||
// mailclient_global map[string]&MailClient
|
||||
// mailclient_default string
|
||||
// )
|
||||
|
||||
// /////////FACTORY
|
||||
|
||||
// @[params]
|
||||
// pub struct ArgsGet {
|
||||
// pub mut:
|
||||
// name string = 'default'
|
||||
// }
|
||||
|
||||
// fn args_get(args_ ArgsGet) ArgsGet {
|
||||
// mut args := args_
|
||||
// if args.name == '' {
|
||||
// args.name = mailclient_default
|
||||
// }
|
||||
// if args.name == '' {
|
||||
// args.name = 'default'
|
||||
// }
|
||||
// return args
|
||||
// }
|
||||
|
||||
// pub fn get(args_ ArgsGet) !&MailClient {
|
||||
// mut args := args_get(args_)
|
||||
// if args.name !in mailclient_global {
|
||||
// if !config_exists() {
|
||||
// if default {
|
||||
// config_save()!
|
||||
// }
|
||||
// }
|
||||
// config_load()!
|
||||
// }
|
||||
// return mailclient_global[args.name] or { panic('bug') }
|
||||
// }
|
||||
|
||||
// // switch instance to be used for mailclient
|
||||
// pub fn switch(name string) {
|
||||
// mailclient_default = name
|
||||
// }
|
||||
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('mailclient', args.name)
|
||||
}
|
||||
|
||||
// fn config_load(args_ ArgsGet) ! {
|
||||
// mut args := args_get(args_)
|
||||
// mut context := base.context()!
|
||||
// mut heroscript := context.hero_config_get('mailclient', args.name)!
|
||||
// play(heroscript: heroscript)!
|
||||
// }
|
||||
|
||||
// fn config_save(args_ ArgsGet) ! {
|
||||
// mut args := args_get(args_)
|
||||
// mut context := base.context()!
|
||||
// context.hero_config_set('mailclient', args.name, heroscript_default())!
|
||||
// }
|
||||
|
||||
// fn set(o MailClient) ! {
|
||||
// mut o2 := obj_init(o)!
|
||||
// mailclient_global['default'] = &o2
|
||||
// }
|
||||
|
||||
// @[params]
|
||||
// pub struct InstallPlayArgs {
|
||||
// pub mut:
|
||||
// name string = 'default'
|
||||
// heroscript string // if filled in then plbook will be made out of it
|
||||
// plbook ?playbook.PlayBook
|
||||
// reset bool
|
||||
// start bool
|
||||
// stop bool
|
||||
// restart bool
|
||||
// delete bool
|
||||
// configure bool // make sure there is at least one installed
|
||||
// }
|
||||
|
||||
// pub fn play(args_ InstallPlayArgs) ! {
|
||||
// mut args := args_
|
||||
// println('debguzo1')
|
||||
// mut plbook := args.plbook or {
|
||||
// println('debguzo2')
|
||||
// heroscript := if args.heroscript == '' {
|
||||
// heroscript_default()
|
||||
// } else {
|
||||
// args.heroscript
|
||||
// }
|
||||
// playbook.new(text: heroscript)!
|
||||
// }
|
||||
|
||||
// mut install_actions := plbook.find(filter: 'mailclient.configure')!
|
||||
// println('debguzo3 ${install_actions}')
|
||||
// if install_actions.len > 0 {
|
||||
// for install_action in install_actions {
|
||||
// mut p := install_action.params
|
||||
// cfg_play(p)!
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
@@ -3,7 +3,6 @@ module mailclient
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
|
||||
__global (
|
||||
mailclient_global map[string]&MailClient
|
||||
@@ -19,64 +18,62 @@ pub mut:
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut model := args_
|
||||
if model.name == '' {
|
||||
model.name = mailclient_default
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = mailclient_default
|
||||
}
|
||||
if model.name == '' {
|
||||
model.name = 'default'
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return model
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&MailClient {
|
||||
mut args := args_get(args_)
|
||||
if args.name !in mailclient_global {
|
||||
if args.name == 'default' {
|
||||
if !config_exists(args) {
|
||||
if default {
|
||||
mut context := base.context() or { panic('bug') }
|
||||
context.hero_config_set('mailclient', args.name, heroscript_default())!
|
||||
}
|
||||
}
|
||||
load(args)!
|
||||
if !config_exists(args) {
|
||||
config_save(args)!
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
return mailclient_global[args.name] or {
|
||||
println(mailclient_global)
|
||||
panic('could not get config for ${args.name} with name:${args.name}')
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for mailclient with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// set the model in mem and the config on the filesystem
|
||||
pub fn set(o MailClient) ! {
|
||||
pub fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('mailclient', args.name)
|
||||
}
|
||||
|
||||
pub fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('mailclient', args.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
pub fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_set('mailclient', args.name, heroscript_default(instance: args.name)!)!
|
||||
}
|
||||
|
||||
pub fn config_delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('mailclient', args.name)!
|
||||
}
|
||||
|
||||
fn set(o MailClient) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
mailclient_global[o.name] = &o2
|
||||
mailclient_default = o.name
|
||||
}
|
||||
|
||||
// check we find the config on the filesystem
|
||||
pub fn exists(args_ ArgsGet) bool {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('mailclient', model.name)
|
||||
}
|
||||
|
||||
// load the config error if it doesn't exist
|
||||
pub fn load(args_ ArgsGet) ! {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('mailclient', model.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
// // save the config to the filesystem in the context
|
||||
// pub fn save(o MailClient) ! {
|
||||
// mut context := base.context()!
|
||||
// heroscript := encoderhero.encode[MailClient](o)!
|
||||
// context.hero_config_set('mailclient', model.name, heroscript)!
|
||||
// }
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
@@ -86,18 +83,30 @@ pub mut:
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut model := args_
|
||||
mut args := args_
|
||||
|
||||
if model.heroscript == '' {
|
||||
model.heroscript = heroscript_default()
|
||||
if args.heroscript == '' {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := model.plbook or { playbook.new(text: model.heroscript)! }
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut configure_actions := plbook.find(filter: 'mailclient.configure')!
|
||||
if configure_actions.len > 0 {
|
||||
for config_action in configure_actions {
|
||||
mut p := config_action.params
|
||||
mut install_actions := plbook.find(filter: 'mailclient.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for mailclient
|
||||
pub fn switch(name string) {
|
||||
mailclient_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -3,19 +3,19 @@ module mailclient
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import os
|
||||
|
||||
pub const version = '1.0.0'
|
||||
pub const version = '0.0.0'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
pub fn heroscript_default() string {
|
||||
pub fn heroscript_default(args DefaultConfigArgs) !string {
|
||||
mail_from := os.getenv_opt('MAIL_FROM') or { 'info@example.com' }
|
||||
mail_password := os.getenv_opt('MAIL_PASSWORD') or { 'secretpassword' }
|
||||
mail_port := (os.getenv_opt('MAIL_PORT') or { '465' }).int()
|
||||
mail_server := os.getenv_opt('MAIL_SERVER') or { 'smtp-relay.brevo.com' }
|
||||
mail_username := os.getenv_opt('MAIL_USERNAME') or { 'kristof@incubaid.com' }
|
||||
mail_username := os.getenv_opt('MAIL_USERNAME') or { 'mail@incubaid.com' }
|
||||
|
||||
heroscript := "
|
||||
!!mailclient.configure name:'default'
|
||||
!!mailclient.configure name:'${args.instance}'
|
||||
mail_from: '${mail_from}'
|
||||
mail_password: '${mail_password}'
|
||||
mail_port: ${mail_port}
|
||||
@@ -26,6 +26,7 @@ pub fn heroscript_default() string {
|
||||
return heroscript
|
||||
}
|
||||
|
||||
@[heap]
|
||||
pub struct MailClient {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
@@ -51,19 +52,6 @@ fn cfg_play(p paramsparser.Params) ! {
|
||||
}
|
||||
|
||||
fn obj_init(obj_ MailClient) !MailClient {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
return obj
|
||||
}
|
||||
|
||||
// user needs to us switch to make sure we get the right object
|
||||
pub fn configure(config MailClient) !MailClient {
|
||||
client := MailClient{
|
||||
...config
|
||||
}
|
||||
set(client)!
|
||||
return client
|
||||
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED
|
||||
|
||||
// implement if steps need to be done for configuration
|
||||
}
|
||||
|
||||
@@ -1,16 +1,29 @@
|
||||
# mailclient
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
import freeflowuniverse.herolib.clients. mailclient
|
||||
import freeflowuniverse.herolib.clients.mailclient
|
||||
|
||||
mut client:= mailclient.get()!
|
||||
|
||||
client.send(subject:'this is a test',to:'kds@something.com,kds2@else.com',body:'
|
||||
//remove the previous one, otherwise the env variables are not read
|
||||
mailclient.config_delete(name:"test")!
|
||||
|
||||
// env variables which need to be set are:
|
||||
// - MAIL_FROM=...
|
||||
// - MAIL_PASSWORD=...
|
||||
// - MAIL_PORT=465
|
||||
// - MAIL_SERVER=...
|
||||
// - MAIL_USERNAME=...
|
||||
|
||||
|
||||
mut client:= mailclient.get(name:"test")!
|
||||
|
||||
println(client)
|
||||
|
||||
client.send(subject:'this is a test',to:'kristof@incubaid.com',body:'
|
||||
this is my email content
|
||||
')!
|
||||
|
||||
|
||||
@@ -1,39 +1,65 @@
|
||||
module mycelium
|
||||
|
||||
import json
|
||||
import encoding.base64
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
|
||||
// Represents a destination for a message, can be either IP or public key
|
||||
pub struct MessageDestination {
|
||||
pub:
|
||||
pk string
|
||||
ip string @[omitempty] // IP in the subnet of the receiver node
|
||||
pk string @[omitempty] // hex encoded public key of the receiver node
|
||||
}
|
||||
|
||||
// Body of a message to be sent
|
||||
pub struct PushMessageBody {
|
||||
pub:
|
||||
dst MessageDestination
|
||||
payload string
|
||||
topic ?string // optional message topic
|
||||
payload string // base64 encoded message
|
||||
}
|
||||
|
||||
// Response containing message ID after pushing
|
||||
pub struct PushMessageResponseId {
|
||||
pub:
|
||||
id string // hex encoded message ID
|
||||
}
|
||||
|
||||
// A message received by the system
|
||||
pub struct InboundMessage {
|
||||
pub:
|
||||
id string
|
||||
src_ip string @[json: 'srcIP']
|
||||
src_pk string @[json: 'srcPk']
|
||||
dst_ip string @[json: 'dstIp']
|
||||
dst_pk string @[json: 'dstPk']
|
||||
payload string
|
||||
src_ip string @[json: 'srcIp'] // Sender overlay IP address
|
||||
src_pk string @[json: 'srcPk'] // Sender public key, hex encoded
|
||||
dst_ip string @[json: 'dstIp'] // Receiver overlay IP address
|
||||
dst_pk string @[json: 'dstPk'] // Receiver public key, hex encoded
|
||||
topic string // Optional message topic
|
||||
payload string // Message payload, base64 encoded
|
||||
}
|
||||
|
||||
// Information about an outbound message
|
||||
pub struct MessageStatusResponse {
|
||||
pub:
|
||||
id string
|
||||
dst string
|
||||
state string
|
||||
created string
|
||||
deadline string
|
||||
msg_len string @[json: 'msgLen']
|
||||
dst string // IP address of receiving node
|
||||
state string // pending, received, read, aborted or sending object
|
||||
created i64 // Unix timestamp of creation
|
||||
deadline i64 // Unix timestamp of expiry
|
||||
msg_len int @[json: 'msgLen'] // Length in bytes
|
||||
}
|
||||
|
||||
// General information about a node
|
||||
pub struct Info {
|
||||
pub:
|
||||
node_subnet string @[json: 'nodeSubnet'] // subnet owned by node
|
||||
}
|
||||
|
||||
// Response containing public key for a node IP
|
||||
pub struct PublicKeyResponse {
|
||||
pub:
|
||||
node_pub_key string @[json: 'NodePubKey'] // hex encoded public key
|
||||
}
|
||||
|
||||
// Get connection to mycelium server
|
||||
pub fn (mut self Mycelium) connection() !&httpconnection.HTTPConnection {
|
||||
mut c := self.conn or {
|
||||
mut c2 := httpconnection.new(
|
||||
@@ -47,30 +73,63 @@ pub fn (mut self Mycelium) connection() !&httpconnection.HTTPConnection {
|
||||
return c
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) send_msg(pk string, payload string, wait bool) !InboundMessage {
|
||||
@[params]
|
||||
pub struct SendMessageArgs {
|
||||
pub mut:
|
||||
public_key string @[required]
|
||||
payload string @[required]
|
||||
topic ?string
|
||||
wait bool
|
||||
}
|
||||
|
||||
// Send a message to a node identified by public key
|
||||
pub fn (mut self Mycelium) send_msg(args SendMessageArgs) !InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut params := {
|
||||
'dst': json.encode(MessageDestination{ pk: pk })
|
||||
'payload': payload
|
||||
mut body := PushMessageBody{
|
||||
dst: MessageDestination{
|
||||
pk: args.public_key
|
||||
ip: ''
|
||||
}
|
||||
payload: base64.encode_str(args.payload)
|
||||
topic: if v := args.topic {
|
||||
base64.encode_str(v)
|
||||
} else {
|
||||
none
|
||||
}
|
||||
}
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?reply_timeout=120'
|
||||
mut prefix := '/api/v1/messages'
|
||||
if args.wait {
|
||||
prefix += '?reply_timeout=120'
|
||||
}
|
||||
return conn.post_json_generic[InboundMessage](
|
||||
method: .post
|
||||
prefix: prefix
|
||||
params: params
|
||||
data: json.encode(body)
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) receive_msg(wait bool) !InboundMessage {
|
||||
@[params]
|
||||
pub struct ReceiveMessageArgs {
|
||||
pub mut:
|
||||
topic ?string
|
||||
wait bool
|
||||
peek bool
|
||||
}
|
||||
|
||||
// Receive a message from the queue
|
||||
pub fn (mut self Mycelium) receive_msg(args ReceiveMessageArgs) !InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?timeout=60'
|
||||
mut prefix := '/api/v1/messages?peek=${args.peek}&'
|
||||
|
||||
if args.wait {
|
||||
prefix += 'timeout=120&'
|
||||
}
|
||||
|
||||
if v := args.topic {
|
||||
prefix += 'topic=${base64.encode_str(v)}'
|
||||
}
|
||||
|
||||
return conn.get_json_generic[InboundMessage](
|
||||
method: .get
|
||||
prefix: prefix
|
||||
@@ -78,17 +137,9 @@ pub fn (mut self Mycelium) receive_msg(wait bool) !InboundMessage {
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) receive_msg_opt(wait bool) ?InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?timeout=60'
|
||||
}
|
||||
res := conn.get_json_generic[InboundMessage](
|
||||
method: .get
|
||||
prefix: prefix
|
||||
dataformat: .json
|
||||
) or {
|
||||
// Optional version of receive_msg that returns none on 204
|
||||
pub fn (mut self Mycelium) receive_msg_opt(args ReceiveMessageArgs) ?InboundMessage {
|
||||
res := self.receive_msg(args) or {
|
||||
if err.msg().contains('204') {
|
||||
return none
|
||||
}
|
||||
@@ -97,25 +148,62 @@ pub fn (mut self Mycelium) receive_msg_opt(wait bool) ?InboundMessage {
|
||||
return res
|
||||
}
|
||||
|
||||
// Get status of a message by ID
|
||||
pub fn (mut self Mycelium) get_msg_status(id string) !MessageStatusResponse {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[MessageStatusResponse](
|
||||
method: .get
|
||||
prefix: 'status/${id}'
|
||||
prefix: '/api/v1/messages/status/${id}'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) reply_msg(id string, pk string, payload string) ! {
|
||||
@[params]
|
||||
pub struct ReplyMessageArgs {
|
||||
pub mut:
|
||||
id string @[required]
|
||||
public_key string @[required]
|
||||
payload string @[required]
|
||||
topic ?string
|
||||
}
|
||||
|
||||
// Reply to a message
|
||||
pub fn (mut self Mycelium) reply_msg(args ReplyMessageArgs) ! {
|
||||
mut conn := self.connection()!
|
||||
mut params := {
|
||||
'dst': json.encode(MessageDestination{ pk: pk })
|
||||
'payload': payload
|
||||
mut body := PushMessageBody{
|
||||
dst: MessageDestination{
|
||||
pk: args.public_key
|
||||
ip: ''
|
||||
}
|
||||
payload: base64.encode_str(args.payload)
|
||||
topic: if v := args.topic { base64.encode_str(v) } else { none }
|
||||
}
|
||||
conn.post_json_generic[json.Any](
|
||||
_ := conn.post_json_str(
|
||||
method: .post
|
||||
prefix: 'reply/${id}'
|
||||
params: params
|
||||
prefix: '/api/v1/messages/reply/${args.id}'
|
||||
data: json.encode(body)
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
// curl -v -H 'Content-Type: application/json' -d '{"dst": {"pk": "be4bf135d60b7e43a46be1ad68f955cdc1209a3c55dc30d00c4463b1dace4377"}, "payload": "xuV+"}' http://localhost:8989/api/v1/messages\
|
||||
|
||||
// Get node info
|
||||
pub fn (mut self Mycelium) get_info() !Info {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[Info](
|
||||
method: .get
|
||||
prefix: '/api/v1/admin'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
// Get public key for a node IP
|
||||
pub fn (mut self Mycelium) get_pubkey_from_ip(ip string) !PublicKeyResponse {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[PublicKeyResponse](
|
||||
method: .get
|
||||
prefix: '/api/v1/pubkey/${ip}'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
71
lib/clients/mycelium/mycelium_check.v
Normal file
71
lib/clients/mycelium/mycelium_check.v
Normal file
@@ -0,0 +1,71 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
|
||||
pub fn check() bool {
|
||||
// if core.is_osx()! {
|
||||
// mut scr := screen.new(reset: false) or {return False}
|
||||
// name := 'mycelium'
|
||||
// if !scr.exists(name) {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
|
||||
// if !(osal.process_exists_byname('mycelium') or {return False}) {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// TODO: might be dangerous if that one goes out
|
||||
ping_result := osal.ping(address: '40a:152c:b85b:9646:5b71:d03a:eb27:2462', retry: 2) or {
|
||||
return false
|
||||
}
|
||||
if ping_result == .ok {
|
||||
console.print_debug('could reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return true
|
||||
}
|
||||
console.print_stderr('could not reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return false
|
||||
}
|
||||
|
||||
pub struct MyceliumInspectResult {
|
||||
pub:
|
||||
public_key string @[json: publicKey]
|
||||
address string
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct MyceliumInspectArgs {
|
||||
pub:
|
||||
key_file_path string = '/root/hero/cfg/priv_key.bin'
|
||||
}
|
||||
|
||||
pub fn inspect(args MyceliumInspectArgs) !MyceliumInspectResult {
|
||||
command := 'mycelium inspect --key-file ${args.key_file_path} --json'
|
||||
result := os.execute(command)
|
||||
|
||||
if result.exit_code != 0 {
|
||||
return error('Command failed: ${result.output}')
|
||||
}
|
||||
|
||||
inspect_result := json.decode(MyceliumInspectResult, result.output) or {
|
||||
return error('Failed to parse JSON: ${err}')
|
||||
}
|
||||
|
||||
return inspect_result
|
||||
}
|
||||
|
||||
// if returns empty then probably mycelium is not installed
|
||||
pub fn ipaddr() string {
|
||||
r := inspect() or { MyceliumInspectResult{} }
|
||||
return r.address
|
||||
}
|
||||
@@ -2,8 +2,6 @@ module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
|
||||
__global (
|
||||
mycelium_global map[string]&Mycelium
|
||||
@@ -12,35 +10,71 @@ __global (
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
// set the model in mem and the config on the filesystem
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&Mycelium {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := Mycelium{}
|
||||
if args.name !in mycelium_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('mycelium', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return mycelium_global[args.name] or {
|
||||
println(mycelium_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for mycelium with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o Mycelium) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('mycelium', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('mycelium', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('mycelium', args.name)!
|
||||
if args.name in mycelium_global {
|
||||
// del mycelium_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o Mycelium) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
mycelium_global[o.name] = &o2
|
||||
mycelium_default = o.name
|
||||
}
|
||||
|
||||
// check we find the config on the filesystem
|
||||
pub fn exists(args_ ArgsGet) bool {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('mycelium', model.name)
|
||||
}
|
||||
|
||||
// load the config error if it doesn't exist
|
||||
pub fn load(args_ ArgsGet) ! {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('mycelium', model.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
// save the config to the filesystem in the context
|
||||
pub fn save(o Mycelium) ! {
|
||||
mut context := base.context()!
|
||||
heroscript := encoderhero.encode[Mycelium](o)!
|
||||
context.hero_config_set('mycelium', model.name, heroscript)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
@@ -50,21 +84,28 @@ pub mut:
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut model := args_
|
||||
mut args := args_
|
||||
|
||||
if model.heroscript == '' {
|
||||
model.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := model.plbook or { playbook.new(text: model.heroscript)! }
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut configure_actions := plbook.find(filter: 'mycelium.configure')!
|
||||
if configure_actions.len > 0 {
|
||||
for config_action in configure_actions {
|
||||
mut p := config_action.params
|
||||
mycfg := cfg_play(p)!
|
||||
console.print_debug('install action mycelium.configure\n${mycfg}')
|
||||
set(mycfg)!
|
||||
save(mycfg)!
|
||||
mut install_actions := plbook.find(filter: 'mycelium.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for mycelium
|
||||
pub fn switch(name string) {
|
||||
mycelium_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -1,39 +1,33 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
|
||||
pub const version = '0.0.0'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
pub fn heroscript_default() !string {
|
||||
heroscript := "
|
||||
!!mycelium.configure
|
||||
name:'mycelium'
|
||||
"
|
||||
return heroscript
|
||||
}
|
||||
|
||||
@[heap]
|
||||
pub struct Mycelium {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
server_url string
|
||||
conn ?&httpconnection.HTTPConnection
|
||||
server_url string = 'http://localhost:8989'
|
||||
conn ?&httpconnection.HTTPConnection @[skip; str: skip]
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
mut mycfg := Mycelium{
|
||||
name: p.get_default('name', 'default')!
|
||||
server_url: p.get_default('server_url', 'http://localhost:8989/api/v1/messages')!
|
||||
}
|
||||
set(mycfg)!
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ Mycelium) !Mycelium {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
fn obj_init(obj_ Mycelium) !Mycelium {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj Mycelium) !string {
|
||||
return encoderhero.encode[Mycelium](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !Mycelium {
|
||||
mut obj := encoderhero.decode[Mycelium](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
602
lib/clients/mycelium/openapi.yaml
Normal file
602
lib/clients/mycelium/openapi.yaml
Normal file
@@ -0,0 +1,602 @@
|
||||
openapi: 3.0.2
|
||||
info:
|
||||
version: '1.0.0'
|
||||
|
||||
title: Mycelium management
|
||||
contact:
|
||||
url: 'https://github.com/threefoldtech/mycelium'
|
||||
license:
|
||||
name: Apache 2.0
|
||||
url: 'https://github.com/threefoldtech/mycelium/blob/master/LICENSE'
|
||||
|
||||
description: |
|
||||
This is the specification of the **mycelium** management API. It is used to perform admin tasks on the system, and
|
||||
to perform administrative duties.
|
||||
|
||||
externalDocs:
|
||||
description: For full documentation, check out the mycelium github repo.
|
||||
url: 'https://github.com/threefoldtech/mycelium'
|
||||
|
||||
tags:
|
||||
- name: Admin
|
||||
description: Administrative operations
|
||||
- name: Peer
|
||||
description: Operations related to peer management
|
||||
- name: Route
|
||||
description: Operations related to network routes
|
||||
- name: Message
|
||||
description: Operations on the embedded message subsystem
|
||||
|
||||
servers:
|
||||
- url: 'http://localhost:8989'
|
||||
|
||||
paths:
|
||||
'/api/v1/admin':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
summary: Get general info about the node
|
||||
description: |
|
||||
Get general info about the node, which is not related to other more specific functionality
|
||||
operationId: getInfo
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Info'
|
||||
|
||||
'/api/v1/admin/peers':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: List known peers
|
||||
description: |
|
||||
List all peers known in the system, and info about their connection.
|
||||
This includes the endpoint, how we know about the peer, the connection state, and if the connection is alive the amount
|
||||
of bytes we've sent to and received from the peer.
|
||||
operationId: getPeers
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/PeerStats'
|
||||
post:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: Add a new peer
|
||||
description: |
|
||||
Add a new peer identified by the provided endpoint.
|
||||
The peer is added to the list of known peers. It will eventually be connected
|
||||
to by the standard connection loop of the peer manager. This means that a peer
|
||||
which can't be connected to will stay in the system, as it might be reachable
|
||||
later on.
|
||||
operationId: addPeer
|
||||
responses:
|
||||
'204':
|
||||
description: Peer added
|
||||
'400':
|
||||
description: Malformed endpoint
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: Details about why the endpoint is not valid
|
||||
'409':
|
||||
description: Peer already exists
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: message saying we already know this peer
|
||||
|
||||
'/api/v1/admin/peers/{endpoint}':
|
||||
delete:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: Remove an existing peer
|
||||
description: |
|
||||
Remove an existing peer identified by the provided endpoint.
|
||||
The peer is removed from the list of known peers. If a connection to it
|
||||
is currently active, it will be closed.
|
||||
operationId: deletePeer
|
||||
responses:
|
||||
'204':
|
||||
description: Peer removed
|
||||
'400':
|
||||
description: Malformed endpoint
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: Details about why the endpoint is not valid
|
||||
'404':
|
||||
description: Peer doesn't exist
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: message saying we don't know this peer
|
||||
|
||||
'/api/v1/admin/routes/selected':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Route
|
||||
summary: List all selected routes
|
||||
description: |
|
||||
List all selected routes in the system, and their next hop identifier, metric and sequence number.
|
||||
It is possible for a route to be selected and have an infinite metric. This route will however not forward packets.
|
||||
operationId: getSelectedRoutes
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Route'
|
||||
|
||||
'/api/v1/admin/routes/fallback':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Route
|
||||
summary: List all active fallback routes
|
||||
description: |
|
||||
List all fallback routes in the system, and their next hop identifier, metric and sequence number.
|
||||
These routes are available to be selected in case the selected route for a destination suddenly fails, or gets retracted.
|
||||
operationId: getSelectedRoutes
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Route'
|
||||
|
||||
'/api/v1/messages':
|
||||
get:
|
||||
tags:
|
||||
- Message
|
||||
summary: Get a message from the inbound message queue
|
||||
description: |
|
||||
Get a message from the inbound message queue. By default, the message is removed from the queue and won't be shown again.
|
||||
If the peek query parameter is set to true, the message will be peeked, and the next call to this endpoint will show the same message.
|
||||
This method returns immediately by default: a message is returned if one is ready, and if there isn't nothing is returned. If the timeout
|
||||
query parameter is set, this call won't return for the given amount of seconds, unless a message is received
|
||||
operationId: popMessage
|
||||
parameters:
|
||||
- in: query
|
||||
name: peek
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
description: Whether to peek the message or not. If this is true, the message won't be removed from the inbound queue when it is read
|
||||
example: true
|
||||
- in: query
|
||||
name: timeout
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: |
|
||||
Amount of seconds to wait for a message to arrive if one is not available. Setting this to 0 is valid and will return
|
||||
a message if present, or return immediately if there isn't
|
||||
example: 60
|
||||
- in: query
|
||||
name: topic
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
description: |
|
||||
Optional filter for loading messages. If set, the system checks if the message has the given string at the start. This way
|
||||
a topic can be encoded.
|
||||
example: example.topic
|
||||
responses:
|
||||
'200':
|
||||
description: Message retrieved
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InboundMessage'
|
||||
'204':
|
||||
description: No message ready
|
||||
post:
|
||||
tags:
|
||||
- Message
|
||||
summary: Submit a new message to the system.
|
||||
description: |
|
||||
Push a new message to the systems outbound message queue. The system will continuously attempt to send the message until
|
||||
it is either fully transmitted, or the send deadline is expired.
|
||||
operationId: pushMessage
|
||||
parameters:
|
||||
- in: query
|
||||
name: reply_timeout
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: |
|
||||
Amount of seconds to wait for a reply to this message to come in. If not set, the system won't wait for a reply and return
|
||||
the ID of the message, which can be used later. If set, the system will wait for at most the given amount of seconds for a reply
|
||||
to come in. If a reply arrives, it is returned to the client. If not, the message ID is returned for later use.
|
||||
example: 120
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageBody'
|
||||
responses:
|
||||
'200':
|
||||
description: We received a reply within the specified timeout
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InboundMessage'
|
||||
|
||||
'201':
|
||||
description: Message pushed successfully, and not waiting for a reply
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageResponseId'
|
||||
'408':
|
||||
description: The system timed out waiting for a reply to the message
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageResponseId'
|
||||
|
||||
'/api/v1/messsages/reply/{id}':
|
||||
post:
|
||||
tags:
|
||||
- Message
|
||||
summary: Reply to a message with the given ID
|
||||
description: |
|
||||
Submits a reply message to the system, where ID is an id of a previously received message. If the sender is waiting
|
||||
for a reply, it will bypass the queue of open messages.
|
||||
operationId: pushMessageReply
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: abcdef0123456789
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageBody'
|
||||
responses:
|
||||
'204':
|
||||
description: successfully submitted the reply
|
||||
|
||||
'/api/v1/messages/status/{id}':
|
||||
get:
|
||||
tags:
|
||||
- Message
|
||||
summary: Get the status of an outbound message
|
||||
description: |
|
||||
Get information about the current state of an outbound message. This can be used to check the transmission
|
||||
state, size and destination of the message.
|
||||
operationId: getMessageInfo
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: abcdef0123456789
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/MessageStatusResponse'
|
||||
'404':
|
||||
description: Message not found
|
||||
|
||||
'/api/v1/pubkey/{mycelium_ip}':
|
||||
get:
|
||||
summary: Get the pubkey from node ip
|
||||
description: |
|
||||
Get the node's public key from it's IP address.
|
||||
operationId: getPublicKeyFromIp
|
||||
parameters:
|
||||
- in: path
|
||||
name: mycelium_ip
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 5fd:7636:b80:9ad0::1
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PublicKeyResponse'
|
||||
'404':
|
||||
description: Public key not found
|
||||
|
||||
|
||||
components:
|
||||
schemas:
|
||||
Info:
|
||||
description: General information about a node
|
||||
type: object
|
||||
properties:
|
||||
nodeSubnet:
|
||||
description: The subnet owned by the node and advertised to peers
|
||||
type: string
|
||||
example: 54f:b680:ba6e:7ced::/64
|
||||
|
||||
Endpoint:
|
||||
description: Identification to connect to a peer
|
||||
type: object
|
||||
properties:
|
||||
proto:
|
||||
description: Protocol used
|
||||
type: string
|
||||
enum:
|
||||
- 'tcp'
|
||||
- 'quic'
|
||||
example: tcp
|
||||
socketAddr:
|
||||
description: The socket address used
|
||||
type: string
|
||||
example: 192.0.2.6:9651
|
||||
|
||||
PeerStats:
|
||||
description: Info about a peer
|
||||
type: object
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: '#/components/schemas/Endpoint'
|
||||
type:
|
||||
description: How we know about this peer
|
||||
type: string
|
||||
enum:
|
||||
- 'static'
|
||||
- 'inbound'
|
||||
- 'linkLocalDiscovery'
|
||||
example: static
|
||||
connectionState:
|
||||
description: The current state of the connection to the peer
|
||||
type: string
|
||||
enum:
|
||||
- 'alive'
|
||||
- 'connecting'
|
||||
- 'dead'
|
||||
example: alive
|
||||
txBytes:
|
||||
description: The amount of bytes transmitted to this peer
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
example: 464531564
|
||||
rxBytes:
|
||||
description: The amount of bytes received from this peer
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
example: 64645089
|
||||
|
||||
Route:
|
||||
description: Information about a route
|
||||
type: object
|
||||
properties:
|
||||
subnet:
|
||||
description: The overlay subnet for which this is the route
|
||||
type: string
|
||||
example: 469:1348:ab0c:a1d8::/64
|
||||
nextHop:
|
||||
description: A way to identify the next hop of the route, where forwarded packets will be sent
|
||||
type: string
|
||||
example: TCP 203.0.113.2:60128 <-> 198.51.100.27:9651
|
||||
metric:
|
||||
description: The metric of the route, an estimation of how long the packet will take to arrive at its final destination
|
||||
oneOf:
|
||||
- description: A finite metric value
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65534
|
||||
example: 13
|
||||
- description: An infinite (unreachable) metric. This is always `infinite`
|
||||
type: string
|
||||
example: infinite
|
||||
seqno:
|
||||
description: the sequence number advertised with this route by the source
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
example: 1
|
||||
|
||||
InboundMessage:
|
||||
description: A message received by the system
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
description: Id of the message, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: 0123456789abcdef
|
||||
srcIp:
|
||||
description: Sender overlay IP address
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
srcPk:
|
||||
description: Sender public key, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: fedbca9876543210fedbca9876543210fedbca9876543210fedbca9876543210
|
||||
dstIp:
|
||||
description: Receiver overlay IP address
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 34f:b680:ba6e:7ced:355f:346f:d97b:eecb
|
||||
dstPk:
|
||||
description: Receiver public key, hex encoded. This is the public key of the system
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
|
||||
topic:
|
||||
description: An optional message topic
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
example: hpV+
|
||||
payload:
|
||||
description: The message payload, encoded in standard alphabet base64
|
||||
type: string
|
||||
format: byte
|
||||
example: xuV+
|
||||
|
||||
PushMessageBody:
|
||||
description: A message to send to a given receiver
|
||||
type: object
|
||||
properties:
|
||||
dst:
|
||||
$ref: '#/components/schemas/MessageDestination'
|
||||
topic:
|
||||
description: An optional message topic
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
example: hpV+
|
||||
payload:
|
||||
description: The message to send, base64 encoded
|
||||
type: string
|
||||
format: byte
|
||||
example: xuV+
|
||||
|
||||
MessageDestination:
|
||||
oneOf:
|
||||
- description: An IP in the subnet of the receiver node
|
||||
type: object
|
||||
properties:
|
||||
ip:
|
||||
description: The target IP of the message
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
- description: The hex encoded public key of the receiver node
|
||||
type: object
|
||||
properties:
|
||||
pk:
|
||||
description: The hex encoded public key of the target node
|
||||
type: string
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32
|
||||
|
||||
PushMessageResponseId:
|
||||
description: The ID generated for a message after pushing it to the system
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
description: Id of the message, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: 0123456789abcdef
|
||||
|
||||
MessageStatusResponse:
|
||||
description: Information about an outbound message
|
||||
type: object
|
||||
properties:
|
||||
dst:
|
||||
description: IP address of the receiving node
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
state:
|
||||
$ref: '#/components/schemas/TransmissionState'
|
||||
created:
|
||||
description: Unix timestamp of when this message was created
|
||||
type: integer
|
||||
format: int64
|
||||
example: 1649512789
|
||||
deadline:
|
||||
description: Unix timestamp of when this message will expire. If the message is not received before this, the system will give up
|
||||
type: integer
|
||||
format: int64
|
||||
example: 1649513089
|
||||
msgLen:
|
||||
description: Length of the message in bytes
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 27
|
||||
|
||||
TransmissionState:
|
||||
description: The state of an outbound message in it's lifetime
|
||||
oneOf:
|
||||
- type: string
|
||||
enum: ['pending', 'received', 'read', 'aborted']
|
||||
example: 'received'
|
||||
- type: object
|
||||
properties:
|
||||
sending:
|
||||
type: object
|
||||
properties:
|
||||
pending:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 5
|
||||
sent:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 17
|
||||
acked:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 3
|
||||
example: 'received'
|
||||
|
||||
PublicKeyResponse:
|
||||
description: Public key requested based on a node's IP
|
||||
type: object
|
||||
properties:
|
||||
NodePubKey:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
|
||||
@@ -1,6 +1,13 @@
|
||||
# Mycelium Client
|
||||
|
||||
A V client library for interacting with the Mycelium messaging system. This client provides functionality for sending, receiving, and managing messages through a Mycelium server.
|
||||
A V client library for interacting with the Mycelium messaging system. This client provides functionality for configuring and inspecting a Mycelium node.
|
||||
|
||||
## Components
|
||||
|
||||
The Mycelium integration consists of two main components:
|
||||
|
||||
1. **Mycelium Client** (this package) - For interacting with a running Mycelium node
|
||||
2. **Mycelium Installer** (in `installers/net/mycelium/`) - For installing and managing Mycelium nodes
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -11,131 +18,101 @@ The client can be configured either through V code or using heroscript.
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Get default client instance
|
||||
mut client := mycelium.get()!
|
||||
|
||||
// By default connects to http://localhost:8989/api/v1/messages
|
||||
// To use a different server:
|
||||
mut client := mycelium.get(name: "custom", server_url: "http://myserver:8989/api/v1/messages")!
|
||||
// Get named client instance
|
||||
mut client := mycelium.get(name: "custom")!
|
||||
```
|
||||
|
||||
### Heroscript Configuration
|
||||
## Core Functions
|
||||
|
||||
```hero
|
||||
!!mycelium.configure
|
||||
name:'custom' # optional, defaults to 'default'
|
||||
server_url:'http://myserver:8989/api/v1/messages' # optional, defaults to localhost:8989
|
||||
```
|
||||
### Inspect Node
|
||||
|
||||
Note: Configuration is not needed if using a locally running Mycelium server with default settings.
|
||||
|
||||
## Example Script
|
||||
|
||||
Save as `mycelium_example.vsh`:
|
||||
Get information about the local Mycelium node:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Get node info including public key and address
|
||||
result := mycelium.inspect()!
|
||||
println('Public Key: ${result.public_key}')
|
||||
println('Address: ${result.address}')
|
||||
|
||||
// Get just the IP address
|
||||
addr := mycelium.ipaddr()
|
||||
println('IP Address: ${addr}')
|
||||
```
|
||||
|
||||
### Check Node Status
|
||||
|
||||
Check if the Mycelium node is running and reachable:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
is_running := mycelium.check()
|
||||
if is_running {
|
||||
println('Mycelium node is running and reachable')
|
||||
} else {
|
||||
println('Mycelium node is not running or unreachable')
|
||||
}
|
||||
```
|
||||
|
||||
### Sending and Receiving Messages
|
||||
|
||||
The client provides several functions for sending and receiving messages between nodes:
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Initialize client
|
||||
mut client := mycelium.get()!
|
||||
|
||||
// Send a message and wait for reply
|
||||
// Send a message to a node by public key
|
||||
// Parameters: public_key, payload, topic, wait_for_reply
|
||||
msg := client.send_msg(
|
||||
pk: "recipient_public_key"
|
||||
payload: "Hello!"
|
||||
wait: true // wait for reply (timeout 120s)
|
||||
'abc123...', // destination public key
|
||||
'Hello World', // message payload
|
||||
'greetings', // optional topic
|
||||
true // wait for reply
|
||||
)!
|
||||
println('Sent message ID: ${msg.id}')
|
||||
|
||||
// Receive messages
|
||||
// Parameters: wait_for_message, peek_only, topic_filter
|
||||
received := client.receive_msg(true, false, 'greetings')!
|
||||
println('Received message from: ${received.src_pk}')
|
||||
println('Message payload: ${received.payload}')
|
||||
|
||||
// Reply to a message
|
||||
client.reply_msg(
|
||||
received.id, // original message ID
|
||||
received.src_pk, // sender's public key
|
||||
'Got your message!', // reply payload
|
||||
'greetings' // topic
|
||||
)!
|
||||
println('Message sent with ID: ${msg.id}')
|
||||
|
||||
// Check message status
|
||||
status := client.get_msg_status(msg.id)!
|
||||
println('Message status: ${status.state}')
|
||||
|
||||
// Receive messages with timeout
|
||||
if incoming := client.receive_msg_opt(wait: true) {
|
||||
println('Received message: ${incoming.payload}')
|
||||
println('From: ${incoming.src_pk}')
|
||||
|
||||
// Reply to the message
|
||||
client.reply_msg(
|
||||
id: incoming.id
|
||||
pk: incoming.src_pk
|
||||
payload: "Got your message!"
|
||||
)!
|
||||
}
|
||||
println('Created at: ${status.created}')
|
||||
println('Expires at: ${status.deadline}')
|
||||
```
|
||||
|
||||
## API Reference
|
||||
The messaging API supports:
|
||||
- Sending messages to nodes identified by public key
|
||||
- Optional message topics for filtering
|
||||
- Waiting for replies when sending messages
|
||||
- Peeking at messages without removing them from the queue
|
||||
- Tracking message delivery status
|
||||
- Base64 encoded message payloads for binary data
|
||||
|
||||
### Sending Messages
|
||||
## Installation and Management
|
||||
|
||||
```v
|
||||
// Send a message to a specific public key
|
||||
// wait=true means wait for reply (timeout 120s)
|
||||
msg := client.send_msg(pk: "recipient_public_key", payload: "Hello!", wait: true)!
|
||||
For installing and managing Mycelium nodes, use the Mycelium Installer package located in `installers/net/mycelium/`. The installer provides functionality for:
|
||||
|
||||
// Get status of a sent message
|
||||
status := client.get_msg_status(id: "message_id")!
|
||||
```
|
||||
|
||||
### Receiving Messages
|
||||
|
||||
```v
|
||||
// Receive a message (non-blocking)
|
||||
msg := client.receive_msg(wait: false)!
|
||||
|
||||
// Receive a message with timeout (blocking for 60s)
|
||||
msg := client.receive_msg(wait: true)!
|
||||
|
||||
// Receive a message (returns none if no message available)
|
||||
if msg := client.receive_msg_opt(wait: false) {
|
||||
println('Received: ${msg.payload}')
|
||||
}
|
||||
```
|
||||
|
||||
### Replying to Messages
|
||||
|
||||
```v
|
||||
// Reply to a specific message
|
||||
client.reply_msg(
|
||||
id: "original_message_id",
|
||||
pk: "sender_public_key",
|
||||
payload: "Reply message"
|
||||
)!
|
||||
```
|
||||
|
||||
## Message Types
|
||||
|
||||
### InboundMessage
|
||||
```v
|
||||
struct InboundMessage {
|
||||
id string
|
||||
src_ip string
|
||||
src_pk string
|
||||
dst_ip string
|
||||
dst_pk string
|
||||
payload string
|
||||
}
|
||||
```
|
||||
|
||||
### MessageStatusResponse
|
||||
```v
|
||||
struct MessageStatusResponse {
|
||||
id string
|
||||
dst string
|
||||
state string
|
||||
created string
|
||||
deadline string
|
||||
msg_len string
|
||||
}
|
||||
```
|
||||
|
||||
## Heroscript Complete Example
|
||||
|
||||
```hero
|
||||
!!mycelium.configure
|
||||
name:'mycelium'
|
||||
server_url:'http://localhost:8989/api/v1/messages'
|
||||
|
||||
# More heroscript commands can be added here as the API expands
|
||||
- Installing Mycelium nodes
|
||||
- Starting/stopping nodes
|
||||
- Managing node configuration
|
||||
- Setting up TUN interfaces
|
||||
- Configuring peer connections
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
module postgresql_client
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import db.pg
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
// pub struct PostgresClient {
|
||||
// base.BaseConfig
|
||||
// pub mut:
|
||||
// config Config
|
||||
// db pg.DB
|
||||
// }
|
||||
|
||||
// @[params]
|
||||
// pub struct ClientArgs {
|
||||
// pub mut:
|
||||
// instance string @[required]
|
||||
// // playargs ?play.PlayArgs
|
||||
// }
|
||||
|
||||
// pub fn get(clientargs ClientArgs) !PostgresClient {
|
||||
// // mut plargs := clientargs.playargs or {
|
||||
// // // play.PlayArgs
|
||||
// // // {
|
||||
// // // }
|
||||
// // }
|
||||
|
||||
// // mut cfg := configurator(clientargs.instance, plargs)!
|
||||
// // mut args := cfg.get()!
|
||||
|
||||
// args.instance = texttools.name_fix(args.instance)
|
||||
// if args.instance == '' {
|
||||
// args.instance = 'default'
|
||||
// }
|
||||
// // console.print_debug(args)
|
||||
// mut db := pg.connect(
|
||||
// host: args.host
|
||||
// user: args.user
|
||||
// port: args.port
|
||||
// password: args.password
|
||||
// dbname: args.dbname
|
||||
// )!
|
||||
// // console.print_debug(postgres_client)
|
||||
// return PostgresClient{
|
||||
// instance: args.instance
|
||||
// db: db
|
||||
// config: args
|
||||
// }
|
||||
// }
|
||||
|
||||
// struct LocalConfig {
|
||||
// name string
|
||||
// path string
|
||||
// passwd string
|
||||
// }
|
||||
@@ -55,7 +55,7 @@ fn obj_init(obj_ PostgresClient) !PostgresClient {
|
||||
return obj
|
||||
}
|
||||
|
||||
fn (mut self PostgresClient) db() !pg.DB {
|
||||
pub fn (mut self PostgresClient) db() !pg.DB {
|
||||
// console.print_debug(args)
|
||||
mut db := self.db_ or {
|
||||
mut db_ := pg.connect(
|
||||
|
||||
@@ -9,29 +9,52 @@ The PostgreSQL client can be configured using HeroScript. Configuration settings
|
||||
### Basic Configuration Example
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core
|
||||
import os
|
||||
import freeflowuniverse.herolib.clients.postgresql_client
|
||||
|
||||
|
||||
// Configure PostgreSQL client
|
||||
heroscript := "
|
||||
!!postgresql_client.configure
|
||||
name:'test'
|
||||
user: 'root'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: '1234'
|
||||
dbname: 'postgres'
|
||||
name:'test'
|
||||
user: 'postgres'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: '1234'
|
||||
dbname: 'postgres'
|
||||
"
|
||||
|
||||
// Process the heroscript
|
||||
postgresql_client.play(heroscript:heroscript)!
|
||||
// Process the heroscript configuration
|
||||
postgresql_client.play(heroscript: heroscript)!
|
||||
|
||||
// Get the configured client
|
||||
mut db_client := postgresql_client.get(name:"test")!
|
||||
mut db_client := postgresql_client.get(name: "test")!
|
||||
|
||||
// Check if test database exists, create if not
|
||||
if !db_client.db_exists('test')! {
|
||||
println('Creating database test...')
|
||||
db_client.db_create('test')!
|
||||
}
|
||||
|
||||
// Switch to test database
|
||||
db_client.dbname = 'test'
|
||||
|
||||
// Create table if not exists
|
||||
create_table_sql := "CREATE TABLE IF NOT EXISTS users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)"
|
||||
|
||||
println('Creating table users if not exists...')
|
||||
db_client.exec(create_table_sql)!
|
||||
|
||||
println('Database and table setup completed successfully!')
|
||||
|
||||
|
||||
println(db_client)
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
@@ -94,20 +117,3 @@ db_client.backup(dest: '/path/to/backup/dir')!
|
||||
|
||||
Backups are created in custom PostgreSQL format (.bak files) which can be restored using pg_restore.
|
||||
|
||||
## Default Configuration
|
||||
|
||||
If no configuration is provided, the client uses these default settings:
|
||||
|
||||
```v
|
||||
heroscript := "
|
||||
!!postgresql_client.configure
|
||||
name:'default'
|
||||
user: 'root'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: ''
|
||||
dbname: 'postgres'
|
||||
"
|
||||
```
|
||||
|
||||
You can override these defaults by providing your own configuration using the HeroScript configure command.
|
||||
|
||||
8
lib/clients/runpod/.heroscript
Normal file
8
lib/clients/runpod/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'runpod'
|
||||
classname:'RunPod'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
169
lib/clients/runpod/client.v
Normal file
169
lib/clients/runpod/client.v
Normal file
@@ -0,0 +1,169 @@
|
||||
module runpod
|
||||
|
||||
import json
|
||||
|
||||
pub struct EnvironmentVariableInput {
|
||||
pub mut:
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// Represents the nested machine structure in the response
|
||||
pub struct Machine {
|
||||
pub:
|
||||
pod_host_id string @[json: 'podHostId']
|
||||
}
|
||||
|
||||
// Response structure for the mutation
|
||||
pub struct PodResult {
|
||||
pub:
|
||||
id string @[json: 'id']
|
||||
image_name string @[json: 'imageName']
|
||||
env []string @[json: 'env']
|
||||
machine_id int @[json: 'machineId']
|
||||
machine Machine @[json: 'machine']
|
||||
desired_status string @[json: 'desiredStatus']
|
||||
}
|
||||
|
||||
// Input structure for the mutation
|
||||
@[params]
|
||||
pub struct PodFindAndDeployOnDemandRequest {
|
||||
pub mut:
|
||||
cloud_type string @[json: 'cloudType']
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
volume_in_gb int @[json: 'volumeInGb']
|
||||
container_disk_in_gb int @[json: 'containerDiskInGb']
|
||||
min_vcpu_count int @[json: 'minVcpuCount']
|
||||
min_memory_in_gb int @[json: 'minMemoryInGb']
|
||||
gpu_type_id string @[json: 'gpuTypeId']
|
||||
name string @[json: 'name']
|
||||
image_name string @[json: 'imageName']
|
||||
docker_args string @[json: 'dockerArgs']
|
||||
ports string @[json: 'ports']
|
||||
volume_mount_path string @[json: 'volumeMountPath']
|
||||
env []EnvironmentVariableInput @[json: 'env']
|
||||
}
|
||||
|
||||
pub fn (p PodFindAndDeployOnDemandRequest) json_str() string {
|
||||
return json.encode(p)
|
||||
}
|
||||
|
||||
// Create On-Demand Pod
|
||||
pub fn (mut rp RunPod) create_on_demand_pod(input PodFindAndDeployOnDemandRequest) !PodResult {
|
||||
return rp.create_on_demand_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodRentInterruptableInput {
|
||||
pub mut:
|
||||
port int @[json: 'port']
|
||||
network_volume_id string @[json: 'networkVolumeId'; omitempty]
|
||||
start_jupyter bool @[json: 'startJupyter']
|
||||
start_ssh bool @[json: 'startSsh']
|
||||
bid_per_gpu f32 @[json: 'bidPerGpu']
|
||||
cloud_type string @[json: 'cloudType']
|
||||
container_disk_in_gb int @[json: 'containerDiskInGb']
|
||||
country_code string @[json: 'countryCode'; omitempty]
|
||||
docker_args string @[json: 'dockerArgs'; omitempty]
|
||||
env []EnvironmentVariableInput @[json: 'env']
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
gpu_type_id string @[json: 'gpuTypeId'; omitempty]
|
||||
image_name string @[json: 'imageName'; omitempty]
|
||||
min_disk int @[json: 'minDisk']
|
||||
min_download int @[json: 'minDownload']
|
||||
min_memory_in_gb int @[json: 'minMemoryInGb']
|
||||
min_upload int @[json: 'minUpload']
|
||||
min_vcpu_count int @[json: 'minVcpuCount']
|
||||
name string @[json: 'name'; omitempty]
|
||||
ports string @[json: 'ports'; omitempty]
|
||||
stop_after string @[json: 'stopAfter'; omitempty]
|
||||
support_public_ip bool @[json: 'supportPublicIp']
|
||||
template_id string @[json: 'templateId'; omitempty]
|
||||
terminate_after string @[json: 'terminateAfter'; omitempty]
|
||||
volume_in_gb int @[json: 'volumeInGb']
|
||||
volume_key string @[json: 'volumeKey'; omitempty]
|
||||
volume_mount_path string @[json: 'volumeMountPath'; omitempty]
|
||||
data_center_id string @[json: 'dataCenterId'; omitempty]
|
||||
cuda_version string @[json: 'cudeVersion'; omitempty]
|
||||
allowed_cuda_versions []string @[json: 'allowedCudaVersions']
|
||||
}
|
||||
|
||||
pub fn (p PodRentInterruptableInput) json_str() string {
|
||||
return json.encode(p)
|
||||
}
|
||||
|
||||
// Create Spot Pod
|
||||
pub fn (mut rp RunPod) create_spot_pod(input PodRentInterruptableInput) !PodResult {
|
||||
return rp.create_spot_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodResumeInput {
|
||||
pub mut:
|
||||
pod_id string @[json: 'podId'; required]
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
sync_machine bool @[json: 'syncMachine']
|
||||
compute_type string @[json: 'computeType'; omitempty]
|
||||
}
|
||||
|
||||
pub fn (p PodResumeInput) json_str() string {
|
||||
return json.encode(p)
|
||||
}
|
||||
|
||||
// Start On-Demand Pod
|
||||
pub fn (mut rp RunPod) start_on_demand_pod(input PodResumeInput) !PodResult {
|
||||
return rp.start_on_demand_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodBidResumeInput {
|
||||
pub mut:
|
||||
pod_id string @[json: 'podId'; required]
|
||||
gpu_count int @[json: 'gpuCount']
|
||||
bid_per_gpu f32 @[json: 'bidPerGpu']
|
||||
}
|
||||
|
||||
pub fn (p PodBidResumeInput) json_str() string {
|
||||
return json.encode(p)
|
||||
}
|
||||
|
||||
// Start Spot Pod
|
||||
pub fn (mut rp RunPod) start_spot_pod(input PodBidResumeInput) !PodResult {
|
||||
return rp.start_spot_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodStopInput {
|
||||
pub:
|
||||
pod_id string @[json: 'podId']
|
||||
increment_version bool @[json: 'incrementVersion']
|
||||
}
|
||||
|
||||
pub fn (p PodStopInput) json_str() string {
|
||||
return json.encode(p)
|
||||
}
|
||||
|
||||
// Stop Pod
|
||||
pub fn (mut rp RunPod) stop_pod(input PodStopInput) !PodResult {
|
||||
return rp.stop_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodTerminateInput {
|
||||
pub:
|
||||
pod_id string @[json: 'podId']
|
||||
}
|
||||
|
||||
pub fn (mut rp RunPod) terminate_pod(input PodTerminateInput) ! {
|
||||
rp.terminate_pod_request(input)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PodFilter {
|
||||
pub:
|
||||
pod_id string @[json: 'podId'; required]
|
||||
}
|
||||
|
||||
pub fn (mut rp RunPod) get_pod(input PodFilter) !PodResult {
|
||||
return rp.get_pod_request(input)!
|
||||
}
|
||||
42
lib/clients/runpod/readme.md
Normal file
42
lib/clients/runpod/readme.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# runpod
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
|
||||
import freeflowuniverse.crystallib.clients. runpod
|
||||
|
||||
mut client:= runpod.get()!
|
||||
|
||||
client...
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
|
||||
```hero
|
||||
!!runpod.configure
|
||||
secret: '...'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
```
|
||||
|
||||
**RunPod API Example**
|
||||
|
||||
This script demonstrates creating, stopping, starting, and terminating RunPod pods using the RunPod API. It creates both on-demand and spot pods.
|
||||
|
||||
**Requirements**
|
||||
|
||||
* Environment variable `RUNPOD_API_KEY` set with your RunPod API key
|
||||
|
||||
**How to Run**
|
||||
|
||||
- Find out our example in: examples/develop/runpod/runpod_example.vsh
|
||||
102
lib/clients/runpod/runpod_factory_.v
Normal file
102
lib/clients/runpod/runpod_factory_.v
Normal file
@@ -0,0 +1,102 @@
|
||||
module runpod
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
__global (
|
||||
runpod_global map[string]&RunPod
|
||||
runpod_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = runpod_default
|
||||
}
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&RunPod {
|
||||
mut args := args_get(args_)
|
||||
if args.name !in runpod_global {
|
||||
if args.name == 'default' {
|
||||
if !config_exists(args) {
|
||||
if default {
|
||||
config_save(args)!
|
||||
}
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
}
|
||||
return runpod_global[args.name] or {
|
||||
println(runpod_global)
|
||||
panic('could not get config for runpod with name:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('runpod', args.name)
|
||||
}
|
||||
|
||||
fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('runpod', args.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_set('runpod', args.name, heroscript_default()!)!
|
||||
}
|
||||
|
||||
fn set(o RunPod) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
runpod_global[o.name] = &o2
|
||||
runpod_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
if args.heroscript == '' {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'runpod.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for runpod
|
||||
pub fn switch(name string) {
|
||||
runpod_default = name
|
||||
}
|
||||
354
lib/clients/runpod/runpod_http.v
Normal file
354
lib/clients/runpod/runpod_http.v
Normal file
@@ -0,0 +1,354 @@
|
||||
module runpod
|
||||
|
||||
import x.json2
|
||||
import net.http { Method }
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
|
||||
// GraphQL response wrapper
|
||||
struct GqlResponse[T] {
|
||||
pub mut:
|
||||
data map[string]T
|
||||
errors []map[string]string
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to create a new on-demand pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) create_on_demand_pod_request(input PodFindAndDeployOnDemandRequest) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'podFindAndDeployOnDemand'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodFindAndDeployOnDemandInput'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['podFindAndDeployOnDemand'] or {
|
||||
return error('Could not find "podFindAndDeployOnDemand" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to create a new spot pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) create_spot_pod_request(input PodRentInterruptableInput) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'podRentInterruptable'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodRentInterruptableInput!'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['podRentInterruptable'] or {
|
||||
return error('Could not find "podRentInterruptable" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to start on demand pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) start_on_demand_pod_request(input PodResumeInput) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'podResume'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodResumeInput!'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['podResume'] or {
|
||||
return error('Could not find "podResume" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to start spot pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) start_spot_pod_request(input PodBidResumeInput) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'podBidResume'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodBidResumeInput!'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['podBidResume'] or {
|
||||
return error('Could not find "podBidResume" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// #### Internally method doing a network call to stop a pod.
|
||||
// - Build the required query based pn the input sent by the user and send the request.
|
||||
// - Decode the response received from the API into two objects `Data` and `Error`.
|
||||
// - The data field should contains the pod details same as `PodResult` struct.
|
||||
// - The error field should contain the error message.
|
||||
fn (mut rp RunPod) stop_pod_request(input PodStopInput) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'podStop'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodStopInput!'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['podStop'] or {
|
||||
return error('Could not find "podStop" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut rp RunPod) terminate_pod_request(input PodTerminateInput) ! {
|
||||
mut fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
fields << new_field(
|
||||
name: 'podTerminate'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .mutation
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodTerminateInput!'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
_ := response.data['podTerminate'] or {
|
||||
return error('Could not find "podTerminate" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut rp RunPod) get_pod_request(input PodFilter) !PodResult {
|
||||
mut fields := []Field{}
|
||||
mut machine_fields := []Field{}
|
||||
mut output_fields := []Field{}
|
||||
mut builder := QueryBuilder{}
|
||||
|
||||
machine_fields << new_field(name: 'podHostId')
|
||||
output_fields << new_field(name: 'id')
|
||||
output_fields << new_field(name: 'imageName')
|
||||
output_fields << new_field(name: 'env')
|
||||
output_fields << new_field(name: 'machineId')
|
||||
output_fields << new_field(name: 'desiredStatus')
|
||||
output_fields << new_field(name: 'machine', sub_fields: machine_fields)
|
||||
fields << new_field(
|
||||
name: 'pod'
|
||||
arguments: {
|
||||
'input': '\$arguments'
|
||||
}
|
||||
sub_fields: output_fields
|
||||
)
|
||||
|
||||
builder.add_operation(
|
||||
operation: .query
|
||||
fields: fields
|
||||
variables: {
|
||||
'\$arguments': 'PodFilter'
|
||||
}
|
||||
)
|
||||
mut variables := {
|
||||
'arguments': json2.Any(type_to_map(input)!)
|
||||
}
|
||||
query := builder.build_query(variables: variables)
|
||||
|
||||
response := rp.make_request[GqlResponse[PodResult]](.post, '/graphql', query)!
|
||||
return response.data['pod'] or {
|
||||
return error('Could not find "pod" in response data: ${response.data}')
|
||||
}
|
||||
}
|
||||
|
||||
// Represents the main structure for interacting with the RunPod API.
|
||||
// Provides utilities to manage HTTP connections and perform GraphQL queries.
|
||||
fn (mut rp RunPod) httpclient() !&httpconnection.HTTPConnection {
|
||||
mut http_conn := httpconnection.new(
|
||||
name: 'runpod_vclient_${rp.name}'
|
||||
url: rp.base_url
|
||||
cache: true
|
||||
retry: 3
|
||||
)!
|
||||
http_conn.default_header.add(.authorization, 'Bearer ${rp.api_key}')
|
||||
return http_conn
|
||||
}
|
||||
|
||||
// Sends an HTTP request to the RunPod API with the specified method, path, and data.
|
||||
fn (mut rp RunPod) make_request[T](method Method, path string, data string) !T {
|
||||
mut request := httpconnection.Request{
|
||||
prefix: path
|
||||
data: data
|
||||
debug: true
|
||||
dataformat: .json
|
||||
}
|
||||
|
||||
mut http_client := rp.httpclient()!
|
||||
mut response := T{}
|
||||
|
||||
match method {
|
||||
.get {
|
||||
request.method = .get
|
||||
response = http_client.get_json_generic[T](request)!
|
||||
}
|
||||
.post {
|
||||
request.method = .post
|
||||
response = http_client.post_json_generic[T](request)!
|
||||
}
|
||||
.put {
|
||||
request.method = .put
|
||||
response = http_client.put_json_generic[T](request)!
|
||||
}
|
||||
.delete {
|
||||
request.method = .delete
|
||||
response = http_client.delete_json_generic[T](request)!
|
||||
}
|
||||
else {
|
||||
return error('unsupported method: ${method}')
|
||||
}
|
||||
}
|
||||
|
||||
if response.errors.len > 0 {
|
||||
return error('Error while sending the request due to: ${response.errors[0]['message']}')
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
43
lib/clients/runpod/runpod_model.v
Normal file
43
lib/clients/runpod/runpod_model.v
Normal file
@@ -0,0 +1,43 @@
|
||||
module runpod
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import os
|
||||
|
||||
pub const version = '1.14.3'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
// heroscript_default returns the default heroscript configuration for RunPod
|
||||
pub fn heroscript_default() !string {
|
||||
return "
|
||||
!!runpod.configure
|
||||
name:'default'
|
||||
api_key:'${os.getenv('RUNPOD_API_KEY')}'
|
||||
base_url:'https://api.runpod.io/'
|
||||
"
|
||||
}
|
||||
|
||||
// RunPod represents a RunPod client instance
|
||||
@[heap]
|
||||
pub struct RunPod {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
api_key string
|
||||
base_url string = 'https://api.runpod.io/'
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := RunPod{
|
||||
name: p.get_default('name', 'default')!
|
||||
api_key: p.get_default('api_key', os.getenv('RUNPOD_API_KEY'))!
|
||||
base_url: p.get_default('base_url', 'https://api.runpod.io/')!
|
||||
}
|
||||
set(mycfg)!
|
||||
}
|
||||
|
||||
fn obj_init(obj_ RunPod) !RunPod {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
return obj
|
||||
}
|
||||
118
lib/clients/runpod/utils.v
Normal file
118
lib/clients/runpod/utils.v
Normal file
@@ -0,0 +1,118 @@
|
||||
module runpod
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import x.json2
|
||||
|
||||
enum OperationType {
|
||||
query
|
||||
mutation
|
||||
}
|
||||
|
||||
struct QueryBuilder {
|
||||
pub mut:
|
||||
operation OperationType
|
||||
fields []Field
|
||||
variables map[string]string
|
||||
}
|
||||
|
||||
struct Field {
|
||||
name string
|
||||
arguments map[string]string
|
||||
sub_fields []Field
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct NewFieldArgs {
|
||||
pub:
|
||||
name string
|
||||
arguments map[string]string
|
||||
sub_fields []Field
|
||||
}
|
||||
|
||||
fn new_field(args NewFieldArgs) Field {
|
||||
return Field{
|
||||
name: args.name
|
||||
arguments: args.arguments
|
||||
sub_fields: args.sub_fields
|
||||
}
|
||||
}
|
||||
|
||||
fn build_arguments(args map[string]string) string {
|
||||
if args.len == 0 {
|
||||
return ''
|
||||
}
|
||||
|
||||
mut sb := ''
|
||||
sb += '('
|
||||
|
||||
for key, value in args {
|
||||
if value.len == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sb += '${key}: ${value}, '
|
||||
}
|
||||
|
||||
return sb.trim_right(', ') + ')'
|
||||
}
|
||||
|
||||
fn build_fields(fields []Field) string {
|
||||
mut sb := ' { '
|
||||
for field in fields {
|
||||
sb += field.name
|
||||
if field.arguments.len > 0 {
|
||||
sb += build_arguments(field.arguments)
|
||||
}
|
||||
|
||||
if field.sub_fields.len > 0 {
|
||||
sb += build_fields(field.sub_fields)
|
||||
}
|
||||
|
||||
sb += ' '
|
||||
}
|
||||
sb += ' } '
|
||||
return sb
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct AddOperationArgs {
|
||||
pub:
|
||||
operation OperationType
|
||||
fields []Field
|
||||
variables map[string]string
|
||||
}
|
||||
|
||||
fn (mut q QueryBuilder) add_operation(args AddOperationArgs) {
|
||||
q.operation = args.operation
|
||||
q.fields = args.fields
|
||||
q.variables = args.variables.clone()
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct BuildQueryArgs {
|
||||
pub:
|
||||
variables map[string]json2.Any
|
||||
}
|
||||
|
||||
fn (q QueryBuilder) build_query(args BuildQueryArgs) string {
|
||||
mut query := ''
|
||||
query += '${q.operation}' + ' myOperation'
|
||||
|
||||
if q.variables.len > 0 {
|
||||
query += build_arguments(q.variables)
|
||||
}
|
||||
|
||||
query += build_fields(q.fields)
|
||||
|
||||
mut q_map := {
|
||||
'query': json2.Any(query)
|
||||
'variables': json2.Any(args.variables)
|
||||
}
|
||||
|
||||
return json2.encode(q_map)
|
||||
}
|
||||
|
||||
fn type_to_map[T](t T) !map[string]json2.Any {
|
||||
encoded_input := json2.encode(t)
|
||||
return json2.raw_decode(encoded_input)!.as_map()
|
||||
}
|
||||
8
lib/clients/vastai/.heroscript
Normal file
8
lib/clients/vastai/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'vastai'
|
||||
classname:'VastAI'
|
||||
singleton:1
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
360
lib/clients/vastai/client.v
Normal file
360
lib/clients/vastai/client.v
Normal file
@@ -0,0 +1,360 @@
|
||||
module vastai
|
||||
|
||||
import json
|
||||
|
||||
// Represents a GPU offer from Vast.ai
|
||||
pub struct GPUOffer {
|
||||
pub:
|
||||
id int // Unique instance ID
|
||||
cuda_max_good int // Maximum reliable CUDA version
|
||||
gpu_name string // Name of the GPU
|
||||
gpu_ram int // GPU RAM in MB
|
||||
num_gpus int // Number of GPUs
|
||||
dlperf f64 // Deep Learning Performance score
|
||||
dlperf_per_dphtotal f64 // Performance per dollar per hour
|
||||
reliability f64 // Instance reliability score
|
||||
total_flops f64 // Total FLOPS
|
||||
credit_discount f64 // Credit discount
|
||||
rented bool // Whether instance is currently rented
|
||||
rentable bool // Whether instance can be rented
|
||||
verification string // Verification status
|
||||
external bool // Whether instance is external
|
||||
dph_total f64 // Total dollars per hour
|
||||
storage_total int // Total storage in GB
|
||||
inet_up f64 // Upload bandwidth in Mbps
|
||||
inet_down f64 // Download bandwidth in Mbps
|
||||
}
|
||||
|
||||
// Search parameters for filtering GPU offers
|
||||
@[params]
|
||||
pub struct SearchParams {
|
||||
pub mut:
|
||||
order ?string // Sort order (default: score descending)
|
||||
query ?string // Search query string
|
||||
min_gpu_ram ?int // Minimum GPU RAM in MB
|
||||
min_num_gpus ?int // Minimum number of GPUs
|
||||
min_dlperf ?f64 // Minimum deep learning performance score
|
||||
max_dph ?f64 // Maximum dollars per hour
|
||||
min_reliability ?f64 // Minimum reliability score
|
||||
verified_only ?bool // Only show verified instances
|
||||
external ?bool // Include external instances
|
||||
rentable ?bool // Show only rentable instances
|
||||
rented ?bool // Show only rented instances
|
||||
}
|
||||
|
||||
// Response from the search API
|
||||
struct SearchResponse {
|
||||
success bool
|
||||
offers []GPUOffer
|
||||
}
|
||||
|
||||
// Searches for GPU offers based on the provided parameters
|
||||
pub fn (mut va VastAI) search_offers(params SearchParams) ![]GPUOffer {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(method: .put, prefix: '/search/asks/?', data: json.encode(params))!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
// Parse response
|
||||
search_resp := json.decode(SearchResponse, resp.data)!
|
||||
|
||||
return search_resp.offers
|
||||
}
|
||||
|
||||
// Helper method to get top N offers sorted by performance/price
|
||||
pub fn (mut v VastAI) get_top_offers(count int) ![]GPUOffer {
|
||||
params := SearchParams{
|
||||
order: 'dlperf_per_dphtotal-' // Sort by performance per dollar (descending)
|
||||
rentable: true // Only show available instances
|
||||
min_reliability: 0.98 // High reliability
|
||||
}
|
||||
|
||||
offers := v.search_offers(params)!
|
||||
|
||||
if offers.len <= count {
|
||||
return offers
|
||||
}
|
||||
return offers[..count]
|
||||
}
|
||||
|
||||
// Helper method to find cheapest offers meeting minimum requirements
|
||||
pub fn (mut va VastAI) find_cheapest_offers(min_gpu_ram int, min_gpus int, count int) ![]GPUOffer {
|
||||
params := SearchParams{
|
||||
order: 'dph_total' // Sort by price (ascending)
|
||||
min_gpu_ram: min_gpu_ram
|
||||
min_num_gpus: min_gpus
|
||||
rentable: true // Only show available instances
|
||||
min_reliability: 0.95 // Reasonable reliability threshold
|
||||
}
|
||||
|
||||
offers := va.search_offers(params)!
|
||||
|
||||
if offers.len <= count {
|
||||
return offers
|
||||
}
|
||||
return offers[..count]
|
||||
}
|
||||
|
||||
// Helper method to find most powerful GPUs available
|
||||
pub fn (mut va VastAI) find_most_powerful(count int) ![]GPUOffer {
|
||||
params := SearchParams{
|
||||
order: 'dlperf-' // Sort by deep learning performance (descending)
|
||||
rentable: true // Only show available instances
|
||||
min_reliability: 0.95 // Reasonable reliability threshold
|
||||
}
|
||||
|
||||
offers := va.search_offers(params)!
|
||||
|
||||
if offers.len <= count {
|
||||
return offers
|
||||
}
|
||||
return offers[..count]
|
||||
}
|
||||
|
||||
// CreateInstanceConfig represents the configuration for creating a new instance from an offer
|
||||
@[params]
|
||||
pub struct CreateInstanceConfig {
|
||||
pub:
|
||||
template_id ?string
|
||||
template_hash_id ?string
|
||||
image ?string // Docker image name
|
||||
disk ?int
|
||||
extra_env ?map[string]string // Environment variables
|
||||
runtype ?string // "args" or "ssh"
|
||||
onstart ?string
|
||||
label ?string
|
||||
image_login ?string
|
||||
price ?f32
|
||||
target_state ?string // "running" or "stopped"
|
||||
cancel_unavail ?bool
|
||||
vm ?bool
|
||||
client_id ?string
|
||||
apikey_id ?string
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CreateInstanceArgs {
|
||||
pub:
|
||||
id int
|
||||
config CreateInstanceConfig
|
||||
}
|
||||
|
||||
// CreateInstanceResponse represents the response from creating a new instance
|
||||
pub struct CreateInstanceResponse {
|
||||
pub:
|
||||
success bool
|
||||
new_contract int
|
||||
}
|
||||
|
||||
// Creates a new instance by accepting a provider offer
|
||||
pub fn (mut va VastAI) create_instance(args CreateInstanceArgs) !CreateInstanceResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .put
|
||||
prefix: '/asks/${args.id}/?'
|
||||
data: json.encode(args.config)
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(CreateInstanceResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StopInstanceArgs {
|
||||
pub:
|
||||
id int @[required]
|
||||
state string
|
||||
}
|
||||
|
||||
pub struct StopInstanceResponse {
|
||||
pub:
|
||||
success bool
|
||||
msg string
|
||||
}
|
||||
|
||||
// Stops a running container and updates its status to 'stopped'.
|
||||
pub fn (mut va VastAI) stop_instance(args StopInstanceArgs) !StopInstanceResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
payload := {
|
||||
'state': args.state
|
||||
}
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .put
|
||||
prefix: '/instances/${args.id}/?'
|
||||
data: json.encode(payload)
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(StopInstanceResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DestroyInstanceArgs {
|
||||
pub:
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
pub struct DestroyInstanceResponse {
|
||||
pub:
|
||||
success bool
|
||||
msg string
|
||||
}
|
||||
|
||||
// Destroys an instance.
|
||||
pub fn (mut va VastAI) destroy_instance(args DestroyInstanceArgs) !DestroyInstanceResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .delete
|
||||
prefix: '/instances/${args.id}/?'
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(DestroyInstanceResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct AttachSshKeyToInstanceArgs {
|
||||
pub:
|
||||
id int @[required]
|
||||
ssh_key string
|
||||
}
|
||||
|
||||
pub struct AttachSshKeyToInstanceResponse {
|
||||
pub:
|
||||
success bool
|
||||
msg string
|
||||
}
|
||||
|
||||
// Attach SSH Key to Instance
|
||||
pub fn (mut va VastAI) attach_sshkey_to_instance(args AttachSshKeyToInstanceArgs) !AttachSshKeyToInstanceResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
payload := {
|
||||
'ssh_key': args.ssh_key
|
||||
}
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .post
|
||||
prefix: '/instances/${args.id}/ssh/?'
|
||||
data: json.encode(payload)
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(AttachSshKeyToInstanceResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct LaunchInstanceArgs {
|
||||
pub:
|
||||
num_gpus int @[required]
|
||||
gpu_name string @[required]
|
||||
region string @[required]
|
||||
image string @[required]
|
||||
disk int @[required]
|
||||
env ?string
|
||||
args ?[]string
|
||||
}
|
||||
|
||||
// Launch an instance, This endpoint launches an instance based on the specified parameters, selecting the first available offer that meets the criteria.
|
||||
pub fn (mut va VastAI) launch_instance(args LaunchInstanceArgs) !CreateInstanceResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .put
|
||||
prefix: '/launch_instance/?'
|
||||
data: json.encode(args)
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(CreateInstanceResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StartInstancesArgs {
|
||||
pub:
|
||||
ids []int @[json: 'IDs'; required]
|
||||
}
|
||||
|
||||
pub struct StartInstancesResponse {
|
||||
pub:
|
||||
success bool
|
||||
msg string
|
||||
}
|
||||
|
||||
// Start Instances, Start a list of instances specified by their IDs.
|
||||
pub fn (mut va VastAI) start_instances(args StartInstancesArgs) !StartInstancesResponse {
|
||||
// Get HTTP client
|
||||
mut http_client := va.httpclient()!
|
||||
// Make request
|
||||
resp := http_client.send(
|
||||
method: .post
|
||||
prefix: '/instances/start'
|
||||
data: json.encode(args)
|
||||
)!
|
||||
|
||||
if resp.code != 200 {
|
||||
return error('request failed with code ${resp.code}: ${resp.data}')
|
||||
}
|
||||
|
||||
// Parse response
|
||||
instance_resp := json.decode(StartInstancesResponse, resp.data)!
|
||||
|
||||
return instance_resp
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StartInstanceArgs {
|
||||
pub:
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
// Start Instance, Start an instance specified by its ID.
|
||||
pub fn (mut va VastAI) start_instance(args StartInstanceArgs) !StartInstancesResponse {
|
||||
return va.start_instances(StartInstancesArgs{ ids: [args.id] })
|
||||
}
|
||||
30
lib/clients/vastai/readme.md
Normal file
30
lib/clients/vastai/readme.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# vastai
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.clients. vastai
|
||||
|
||||
mut client:= vastai.get()!
|
||||
|
||||
client...
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!vastai.configure
|
||||
secret: '...'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
```
|
||||
|
||||
|
||||
102
lib/clients/vastai/vastai_factory_.v
Normal file
102
lib/clients/vastai/vastai_factory_.v
Normal file
@@ -0,0 +1,102 @@
|
||||
module vastai
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
__global (
|
||||
vastai_global map[string]&VastAI
|
||||
vastai_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = vastai_default
|
||||
}
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&VastAI {
|
||||
mut args := args_get(args_)
|
||||
if args.name !in vastai_global {
|
||||
if args.name == 'default' {
|
||||
if !config_exists(args) {
|
||||
if default {
|
||||
config_save(args)!
|
||||
}
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
}
|
||||
return vastai_global[args.name] or {
|
||||
println(vastai_global)
|
||||
panic('could not get config for vastai with name:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('vastai', args.name)
|
||||
}
|
||||
|
||||
fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('vastai', args.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_set('vastai', args.name, heroscript_default()!)!
|
||||
}
|
||||
|
||||
fn set(o VastAI) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
vastai_global[o.name] = &o2
|
||||
vastai_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
if args.heroscript == '' {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'vastai.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for vastai
|
||||
pub fn switch(name string) {
|
||||
vastai_default = name
|
||||
}
|
||||
59
lib/clients/vastai/vastai_model.v
Normal file
59
lib/clients/vastai/vastai_model.v
Normal file
@@ -0,0 +1,59 @@
|
||||
module vastai
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
pub const version = '1.14.3'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT
|
||||
pub fn heroscript_default() !string {
|
||||
heroscript := "
|
||||
!!vastai.configure
|
||||
name:'default'
|
||||
api_key:'${os.getenv('VASTAI_API_KEY')}'
|
||||
base_url:'https://console.vast.ai/api/v0/'
|
||||
"
|
||||
return heroscript
|
||||
}
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
|
||||
@[heap]
|
||||
pub struct VastAI {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
api_key string
|
||||
base_url string
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := VastAI{
|
||||
name: p.get_default('name', 'default')!
|
||||
api_key: p.get_default('api_key', '${os.getenv('VASTAI_API_KEY')}')!
|
||||
base_url: p.get_default('base_url', 'https://console.vast.ai/api/v0/')!
|
||||
}
|
||||
set(mycfg)!
|
||||
}
|
||||
|
||||
fn obj_init(obj_ VastAI) !VastAI {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
return obj
|
||||
}
|
||||
|
||||
fn (mut v VastAI) httpclient() !&httpconnection.HTTPConnection {
|
||||
mut http_conn := httpconnection.new(
|
||||
name: 'vastai_client_${v.name}'
|
||||
url: v.base_url
|
||||
cache: true
|
||||
retry: 3
|
||||
)!
|
||||
http_conn.default_header.add(.authorization, 'Bearer ${v.api_key}')
|
||||
http_conn.default_header.add(.accept, 'application/json')
|
||||
|
||||
return http_conn
|
||||
}
|
||||
8
lib/clients/wireguard/.heroscript
Normal file
8
lib/clients/wireguard/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'wireguard'
|
||||
classname:'WireGuard'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
114
lib/clients/wireguard/client.v
Normal file
114
lib/clients/wireguard/client.v
Normal file
@@ -0,0 +1,114 @@
|
||||
module wireguard
|
||||
|
||||
import os
|
||||
|
||||
pub struct WGPeer {
|
||||
pub mut:
|
||||
endpoint string
|
||||
allowed_ips string
|
||||
latest_handshake string
|
||||
transfer string
|
||||
persistent_keepalive string
|
||||
}
|
||||
|
||||
pub struct WGInterface {
|
||||
pub mut:
|
||||
name string
|
||||
public_key string
|
||||
listening_port int
|
||||
}
|
||||
|
||||
pub struct WGInfo {
|
||||
pub mut:
|
||||
interface_ WGInterface
|
||||
peers map[string]WGPeer
|
||||
}
|
||||
|
||||
pub struct WGShow {
|
||||
pub mut:
|
||||
configs map[string]WGInfo
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) show() !WGShow {
|
||||
cmd := 'sudo wg show'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return error('failed to execute show command due to: ${res.output}')
|
||||
}
|
||||
|
||||
return wg.parse_show_command_output(res.output)
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ShowConfigArgs {
|
||||
pub:
|
||||
interface_name string @[required]
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) show_config(args ShowConfigArgs) !WGInfo {
|
||||
configs := wg.show()!.configs
|
||||
config := configs[args.interface_name] or {
|
||||
return error('key ${args.interface_name} does not exists.')
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StartArgs {
|
||||
pub:
|
||||
config_file_path string @[required]
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) start(args StartArgs) ! {
|
||||
if !os.exists(args.config_file_path) {
|
||||
return error('File ${args.config_file_path} does not exists.')
|
||||
}
|
||||
|
||||
cmd := 'sudo wg-quick up ${args.config_file_path}'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return error('failed to execute start command due to: ${res.output}')
|
||||
}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DownArgs {
|
||||
pub:
|
||||
config_file_path string @[required]
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) down(args DownArgs) ! {
|
||||
if !os.exists(args.config_file_path) {
|
||||
return error('File ${args.config_file_path} does not exists.')
|
||||
}
|
||||
|
||||
cmd := 'sudo wg-quick down ${args.config_file_path}'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return error('failed to execute down command due to: ${res.output}')
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) generate_private_key() !string {
|
||||
cmd := 'wg genkey'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return error('failed to execute genkey command due to: ${res.output}')
|
||||
}
|
||||
return res.output.trim_space()
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct GetPublicKeyArgs {
|
||||
pub:
|
||||
private_key string @[required]
|
||||
}
|
||||
|
||||
pub fn (wg WireGuard) get_public_key(args GetPublicKeyArgs) !string {
|
||||
cmd := 'echo ${args.private_key} | wg pubkey'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return error('failed to execute pubkey command due to: ${res.output}')
|
||||
}
|
||||
return res.output.trim_space()
|
||||
}
|
||||
30
lib/clients/wireguard/readme.md
Normal file
30
lib/clients/wireguard/readme.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# wireguard
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.clients. wireguard
|
||||
|
||||
mut client:= wireguard.get()!
|
||||
|
||||
client...
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!wireguard.configure
|
||||
secret: '...'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
```
|
||||
|
||||
|
||||
72
lib/clients/wireguard/utils.v
Normal file
72
lib/clients/wireguard/utils.v
Normal file
@@ -0,0 +1,72 @@
|
||||
module wireguard
|
||||
|
||||
fn (wg WireGuard) parse_show_command_output(res string) !WGShow {
|
||||
mut configs := map[string]WGInfo{}
|
||||
mut lines := res.split('\n')
|
||||
mut current_interface := ''
|
||||
mut current_peers := map[string]WGPeer{}
|
||||
mut iface := WGInterface{}
|
||||
mut peer_key := ''
|
||||
|
||||
for line in lines {
|
||||
mut parts := line.trim_space().split(': ')
|
||||
if parts.len < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := parts[0]
|
||||
value := parts[1]
|
||||
|
||||
if key.starts_with('interface') {
|
||||
if current_interface != '' {
|
||||
configs[current_interface] = WGInfo{
|
||||
interface_: iface
|
||||
peers: current_peers.clone()
|
||||
}
|
||||
current_peers.clear()
|
||||
}
|
||||
|
||||
current_interface = value
|
||||
iface = WGInterface{
|
||||
name: current_interface
|
||||
public_key: ''
|
||||
listening_port: 0
|
||||
}
|
||||
} else if key == 'public key' {
|
||||
iface.public_key = value
|
||||
} else if key == 'listening port' {
|
||||
iface.listening_port = value.int()
|
||||
} else if key.starts_with('peer') {
|
||||
peer_key = value
|
||||
mut peer := WGPeer{
|
||||
endpoint: ''
|
||||
allowed_ips: ''
|
||||
latest_handshake: ''
|
||||
transfer: ''
|
||||
persistent_keepalive: ''
|
||||
}
|
||||
current_peers[peer_key] = peer
|
||||
} else if key == 'endpoint' {
|
||||
current_peers[peer_key].endpoint = value
|
||||
} else if key == 'allowed ips' {
|
||||
current_peers[peer_key].allowed_ips = value
|
||||
} else if key == 'latest handshake' {
|
||||
current_peers[peer_key].latest_handshake = value
|
||||
} else if key == 'transfer' {
|
||||
current_peers[peer_key].transfer = value
|
||||
} else if key == 'persistent keepalive' {
|
||||
current_peers[peer_key].persistent_keepalive = value
|
||||
}
|
||||
}
|
||||
|
||||
if current_interface != '' {
|
||||
configs[current_interface] = WGInfo{
|
||||
interface_: iface
|
||||
peers: current_peers.clone()
|
||||
}
|
||||
}
|
||||
|
||||
return WGShow{
|
||||
configs: configs
|
||||
}
|
||||
}
|
||||
102
lib/clients/wireguard/wireguard_factory_.v
Normal file
102
lib/clients/wireguard/wireguard_factory_.v
Normal file
@@ -0,0 +1,102 @@
|
||||
module wireguard
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
|
||||
__global (
|
||||
wireguard_global map[string]&WireGuard
|
||||
wireguard_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = wireguard_default
|
||||
}
|
||||
if args.name == '' {
|
||||
args.name = 'wireguard'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&WireGuard {
|
||||
mut args := args_get(args_)
|
||||
if args.name !in wireguard_global {
|
||||
if args.name == 'wireguard' {
|
||||
if !config_exists(args) {
|
||||
if default {
|
||||
println('When saving')
|
||||
config_save(args)!
|
||||
}
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
}
|
||||
return wireguard_global[args.name] or {
|
||||
println(wireguard_global)
|
||||
panic('could not get config for wireguard with name:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('wireguard', args.name)
|
||||
}
|
||||
|
||||
fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('wireguard', args.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_set('wireguard', args.name, heroscript_default()!)!
|
||||
}
|
||||
|
||||
fn set(o WireGuard) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
wireguard_global[o.name] = &o2
|
||||
wireguard_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
if args.heroscript == '' {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'wireguard.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for wireguard
|
||||
pub fn switch(name string) {
|
||||
wireguard_default = name
|
||||
}
|
||||
38
lib/clients/wireguard/wireguard_model.v
Normal file
38
lib/clients/wireguard/wireguard_model.v
Normal file
@@ -0,0 +1,38 @@
|
||||
module wireguard
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
|
||||
pub const version = '1.14.3'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT
|
||||
pub fn heroscript_default() !string {
|
||||
heroscript := "
|
||||
!!wireguard.configure
|
||||
name:'wireguard'
|
||||
"
|
||||
return heroscript
|
||||
}
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
|
||||
@[heap]
|
||||
pub struct WireGuard {
|
||||
pub mut:
|
||||
name string = 'wireguard'
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := WireGuard{
|
||||
name: p.get_default('name', 'wireguard')!
|
||||
}
|
||||
set(mycfg)!
|
||||
}
|
||||
|
||||
fn obj_init(obj_ WireGuard) !WireGuard {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
return obj
|
||||
}
|
||||
@@ -52,7 +52,7 @@ this is to make distinction between processing at compile time (pre-compile) or
|
||||
to call in code
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.code.generator.generic
|
||||
|
||||
|
||||
@@ -152,11 +152,16 @@ fn build_() ! {
|
||||
// if core.platform()!= .ubuntu {
|
||||
// return error('only support ubuntu for now')
|
||||
// }
|
||||
// golang.install()!
|
||||
|
||||
//mut g:=golang.get()!
|
||||
//g.install()!
|
||||
|
||||
//console.print_header('build coredns')
|
||||
|
||||
//mut gs := gittools.new(coderoot: '~/code')!
|
||||
// console.print_header('build ${model.name}')
|
||||
|
||||
// gitpath := gittools.get_repo(coderoot: '/tmp/builder', url: url, reset: true, pull: true)!
|
||||
// gitpath := gittools.get_repo(url: url, reset: true, pull: true)!
|
||||
|
||||
// cmd := '
|
||||
// cd ??{gitpath}
|
||||
|
||||
@@ -10,7 +10,7 @@ How to use
|
||||
## example
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import import freeflowuniverse.herolib.conversiontools.docsorter
|
||||
|
||||
@@ -138,6 +138,12 @@ pub fn (mut self Context) hero_config_set(cat string, name string, content_ stri
|
||||
config_file.write(content)!
|
||||
}
|
||||
|
||||
pub fn (mut self Context) hero_config_delete(cat string, name string) ! {
|
||||
path := '${self.path()!.path}/${cat}__${name}.yaml'
|
||||
mut config_file := pathlib.get_file(path: path)!
|
||||
config_file.delete()!
|
||||
}
|
||||
|
||||
pub fn (mut self Context) hero_config_exists(cat string, name string) bool {
|
||||
path := '${os.home_dir()}/hero/context/${self.config.name}/${cat}__${name}.yaml'
|
||||
return os.exists(path)
|
||||
|
||||
@@ -69,7 +69,7 @@ this is to make distinction between processing at compile time (pre-compile) or
|
||||
to call in code
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.generator.generic
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.osal.systemd
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
@end
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
|
||||
@if args.build
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
import freeflowuniverse.herolib.installers.lang.golang
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
import freeflowuniverse.herolib.installers.lang.python
|
||||
@@ -82,7 +82,7 @@ fn stop_post()!{
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// res := os.execute('??{osal.profile_path_source_and()} ${args.name} version')
|
||||
// res := os.execute('??{osal.profile_path_source_and()!} ${args.name} version')
|
||||
// if res.exit_code != 0 {
|
||||
// return false
|
||||
// }
|
||||
@@ -115,11 +115,11 @@ fn install() ! {
|
||||
console.print_header('install ${args.name}')
|
||||
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// mut url := ''
|
||||
// if osal.is_linux_arm() {
|
||||
// if core.is_linux_arm() {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_linux_arm64.tar.gz'
|
||||
// } else if osal.is_linux_intel() {
|
||||
// } else if core.is_linux_intel() {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_linux_amd64.tar.gz'
|
||||
// } else if osal.is_osx_arm() {
|
||||
// } else if core.is_osx_arm() {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_darwin_arm64.tar.gz'
|
||||
// } else if osal.is_osx_intel() {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_darwin_amd64.tar.gz'
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
module ${args.name}
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
|
||||
@if args.cat == .installer
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
@@ -27,9 +27,6 @@ pub mut:
|
||||
@if args.hasconfig
|
||||
fn args_get (args_ ArgsGet) ArgsGet {
|
||||
mut args:=args_
|
||||
if args.name == ""{
|
||||
args.name = ${args.name}_default
|
||||
}
|
||||
if args.name == ""{
|
||||
args.name = "default"
|
||||
}
|
||||
@@ -37,23 +34,55 @@ fn args_get (args_ ArgsGet) ArgsGet {
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&${args.classname} {
|
||||
mut context:=base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := ${args.classname}{}
|
||||
if !(args.name in ${args.name}_global) {
|
||||
if args.name=="default"{
|
||||
if ! config_exists(args){
|
||||
if default{
|
||||
config_save(args)!
|
||||
}
|
||||
}
|
||||
config_load(args)!
|
||||
}
|
||||
if ! exists(args)!{
|
||||
set(obj)!
|
||||
}else{
|
||||
heroscript := context.hero_config_get("${args.name}",args.name)!
|
||||
mut obj_:=heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return ${args.name}_global[args.name] or {
|
||||
return ${args.name}_global[args.name] or {
|
||||
println(${args.name}_global)
|
||||
panic("could not get config for ${args.name} with name:??{args.name}")
|
||||
//bug if we get here because should be in globals
|
||||
panic("could not get config for ${args.name} with name, is bug:??{args.name}")
|
||||
}
|
||||
}
|
||||
|
||||
//register the config for the future
|
||||
pub fn set(o ${args.classname})! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set("${args.name}", o.name, heroscript)!
|
||||
}
|
||||
|
||||
//does the config exists?
|
||||
pub fn exists(args_ ArgsGet)! bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists("${args.name}", args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet)! {
|
||||
mut args := args_get(args_)
|
||||
mut context:=base.context()!
|
||||
context.hero_config_delete("${args.name}",args.name)!
|
||||
if args.name in ${args.name}_global {
|
||||
//del ${args.name}_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
//only sets in mem, does not set as config
|
||||
fn set_in_mem(o ${args.classname})! {
|
||||
mut o2:=obj_init(o)!
|
||||
${args.name}_global[o.name] = &o2
|
||||
${args.name}_default = o.name
|
||||
}
|
||||
|
||||
@else
|
||||
pub fn get(args_ ArgsGet) !&${args.classname} {
|
||||
@@ -61,34 +90,6 @@ pub fn get(args_ ArgsGet) !&${args.classname} {
|
||||
}
|
||||
@end
|
||||
|
||||
@if args.hasconfig
|
||||
fn config_exists(args_ ArgsGet) bool {
|
||||
mut args := args_get(args_)
|
||||
mut context:=base.context() or { panic("bug") }
|
||||
return context.hero_config_exists("${args.name}",args.name)
|
||||
}
|
||||
|
||||
fn config_load(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context:=base.context()!
|
||||
mut heroscript := context.hero_config_get("${args.name}",args.name)!
|
||||
play(heroscript:heroscript)!
|
||||
}
|
||||
|
||||
fn config_save(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context:=base.context()!
|
||||
context.hero_config_set("${args.name}",args.name,heroscript_default()!)!
|
||||
}
|
||||
|
||||
|
||||
fn set(o ${args.classname})! {
|
||||
mut o2:=obj_init(o)!
|
||||
${args.name}_global[o.name] = &o2
|
||||
${args.name}_default = o.name
|
||||
}
|
||||
|
||||
|
||||
^^[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
@@ -102,9 +103,7 @@ pub fn play(args_ PlayArgs) ! {
|
||||
mut args:=args_
|
||||
|
||||
@if args.hasconfig
|
||||
if args.heroscript == "" {
|
||||
args.heroscript = heroscript_default()!
|
||||
}
|
||||
|
||||
@end
|
||||
mut plbook := args.plbook or {
|
||||
playbook.new(text: args.heroscript)!
|
||||
@@ -114,8 +113,9 @@ pub fn play(args_ PlayArgs) ! {
|
||||
mut install_actions := plbook.find(filter: '${args.name}.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
cfg_play(p)!
|
||||
heroscript:=install_action.heroscript()
|
||||
mut obj2:=heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
@end
|
||||
@@ -161,8 +161,6 @@ pub fn play(args_ PlayArgs) ! {
|
||||
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@if args.cat == .installer
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -309,3 +307,11 @@ pub fn (mut self ${args.classname}) destroy() ! {
|
||||
pub fn switch(name string) {
|
||||
${args.name}_default = name
|
||||
}
|
||||
|
||||
|
||||
//helpers
|
||||
|
||||
^^[params]
|
||||
pub struct DefaultConfigArgs{
|
||||
instance string = 'default'
|
||||
}
|
||||
@@ -1,47 +1,12 @@
|
||||
module ${args.name}
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
|
||||
pub const version = '1.14.3'
|
||||
pub const version = '0.0.0'
|
||||
const singleton = ${args.singleton}
|
||||
const default = ${args.default}
|
||||
|
||||
@if args.hasconfig
|
||||
//TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT
|
||||
pub fn heroscript_default() !string {
|
||||
@if args.cat == .installer
|
||||
heroscript:="
|
||||
!!${args.name}.configure
|
||||
name:'${args.name}'
|
||||
homedir: '{HOME}/hero/var/${args.name}'
|
||||
configpath: '{HOME}/.config/${args.name}/admin.yaml'
|
||||
username: 'admin'
|
||||
password: 'secretpassword'
|
||||
secret: ''
|
||||
title: 'My Hero DAG'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
|
||||
"
|
||||
@else
|
||||
heroscript:="
|
||||
!!${args.name}.configure
|
||||
name:'${args.name}'
|
||||
mail_from: 'info@@example.com'
|
||||
mail_password: 'secretpassword'
|
||||
mail_port: 587
|
||||
mail_server: 'smtp-relay.brevo.com'
|
||||
mail_username: 'kristof@@incubaid.com'
|
||||
|
||||
"
|
||||
|
||||
@end
|
||||
|
||||
return heroscript
|
||||
|
||||
}
|
||||
@end
|
||||
|
||||
//THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
@if args.cat == .installer
|
||||
^^[heap]
|
||||
@@ -53,33 +18,11 @@ pub mut:
|
||||
configpath string
|
||||
username string
|
||||
password string @@[secret]
|
||||
secret string @@[secret]
|
||||
title string
|
||||
host string
|
||||
port int
|
||||
@end
|
||||
}
|
||||
@if args.hasconfig
|
||||
fn cfg_play(p paramsparser.Params) !${args.classname} {
|
||||
//THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := ${args.classname}{
|
||||
name: p.get_default('name', 'default')!
|
||||
homedir: p.get_default('homedir', '{HOME}/hero/var/${args.name}')!
|
||||
configpath: p.get_default('configpath', '{HOME}/hero/var/${args.name}/admin.yaml')!
|
||||
username: p.get_default('username', 'admin')!
|
||||
password: p.get_default('password', '')!
|
||||
secret: p.get_default('secret', '')!
|
||||
title: p.get_default('title', 'HERO DAG')!
|
||||
host: p.get_default('host', 'localhost')!
|
||||
port: p.get_int_default('port', 8888)!
|
||||
}
|
||||
|
||||
if mycfg.password == '' && mycfg.secret == '' {
|
||||
return error('password or secret needs to be filled in for ${args.name}')
|
||||
}
|
||||
return mycfg
|
||||
}
|
||||
@end
|
||||
|
||||
@else
|
||||
|
||||
@@ -94,27 +37,16 @@ pub mut:
|
||||
mail_username string
|
||||
}
|
||||
|
||||
@if args.hasconfig
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
//THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
|
||||
mut mycfg := ${args.classname}{
|
||||
name: p.get_default('name', 'default')!
|
||||
mail_from: p.get('mail_from')!
|
||||
mail_password: p.get('mail_password')!
|
||||
mail_port: p.get_int_default('mail_port', 8888)!
|
||||
mail_server: p.get('mail_server')!
|
||||
mail_username: p.get('mail_username')!
|
||||
}
|
||||
set(mycfg)!
|
||||
}
|
||||
@end
|
||||
|
||||
@end
|
||||
|
||||
fn obj_init(obj_ ${args.classname})!${args.classname}{
|
||||
//never call get here, only thing we can do here is work on object itself
|
||||
mut obj:=obj_
|
||||
return obj
|
||||
//your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ ${args.classname})!${args.classname}{
|
||||
mut mycfg:=mycfg_
|
||||
if mycfg.password == '' && mycfg.secret == '' {
|
||||
return error('password or secret needs to be filled in for ??{mycfg.name}')
|
||||
}
|
||||
return mycfg
|
||||
}
|
||||
|
||||
@if args.cat == .installer
|
||||
@@ -135,3 +67,13 @@ fn configure() ! {
|
||||
@end
|
||||
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj ${args.classname}) !string {
|
||||
return encoderhero.encode[${args.classname} ](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !${args.classname} {
|
||||
mut obj := encoderhero.decode[${args.classname}](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
111
lib/core/herocmds/docusaurus.v
Normal file
111
lib/core/herocmds/docusaurus.v
Normal file
@@ -0,0 +1,111 @@
|
||||
module herocmds
|
||||
|
||||
import freeflowuniverse.herolib.web.docusaurus
|
||||
import os
|
||||
import cli { Command, Flag }
|
||||
|
||||
pub fn cmd_docusaurus(mut cmdroot Command) {
|
||||
mut cmd_run := Command{
|
||||
name: 'docusaurus'
|
||||
description: 'Generate, build, run docusaurus sites.'
|
||||
required_args: 0
|
||||
execute: cmd_docusaurus_execute
|
||||
}
|
||||
|
||||
// cmd_run.add_flag(Flag{
|
||||
// flag: .bool
|
||||
// required: false
|
||||
// name: 'reset'
|
||||
// abbrev: 'r'
|
||||
// description: 'will reset.'
|
||||
// })
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .string
|
||||
required: false
|
||||
name: 'url'
|
||||
abbrev: 'u'
|
||||
// default: ''
|
||||
description: 'Url where docusaurus source is.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'build'
|
||||
abbrev: 'b'
|
||||
description: 'build and publish.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'builddev'
|
||||
abbrev: 'bd'
|
||||
description: 'build dev version and publish.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'update'
|
||||
abbrev: 'p'
|
||||
description: 'update your environment the template and the repo you are working on (git pull).'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'dev'
|
||||
abbrev: 'd'
|
||||
description: 'Run your dev environment on local browser.'
|
||||
})
|
||||
|
||||
cmdroot.add_command(cmd_run)
|
||||
}
|
||||
|
||||
fn cmd_docusaurus_execute(cmd Command) ! {
|
||||
mut update := cmd.flags.get_bool('update') or { false }
|
||||
mut url := cmd.flags.get_string('url') or { '' }
|
||||
|
||||
// mut path := cmd.flags.get_string('path') or { '' }
|
||||
// if path == '' {
|
||||
// path = os.getwd()
|
||||
// }
|
||||
// path = path.replace('~', os.home_dir())
|
||||
|
||||
mut build := cmd.flags.get_bool('build') or { false }
|
||||
mut builddev := cmd.flags.get_bool('builddev') or { false }
|
||||
mut dev := cmd.flags.get_bool('dev') or { false }
|
||||
|
||||
// if build== false && build== false && build== false {
|
||||
// eprintln("specify build, builddev or dev")
|
||||
// exit(1)
|
||||
// }
|
||||
|
||||
mut docs := docusaurus.new(update: update)!
|
||||
|
||||
if build {
|
||||
// Create a new docusaurus site
|
||||
_ := docs.build(
|
||||
url: url
|
||||
update: update
|
||||
)!
|
||||
}
|
||||
|
||||
if builddev {
|
||||
// Create a new docusaurus site
|
||||
_ := docs.build_dev(
|
||||
url: url
|
||||
update: update
|
||||
)!
|
||||
}
|
||||
|
||||
if dev {
|
||||
// Create a new docusaurus site
|
||||
_ := docs.dev(
|
||||
url: url
|
||||
update: update
|
||||
)!
|
||||
}
|
||||
}
|
||||
@@ -104,6 +104,14 @@ pub fn cmd_git(mut cmdroot Command) {
|
||||
abbrev: 's'
|
||||
description: 'be silent.'
|
||||
})
|
||||
|
||||
c.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'load'
|
||||
abbrev: 'l'
|
||||
description: 'reload the data in cache.'
|
||||
})
|
||||
}
|
||||
|
||||
mut allcmdscommit := [&push_command, &pull_command, &commit_command]
|
||||
@@ -217,8 +225,10 @@ pub fn cmd_git(mut cmdroot Command) {
|
||||
}
|
||||
|
||||
fn cmd_git_execute(cmd Command) ! {
|
||||
mut silent := cmd.flags.get_bool('silent') or { false }
|
||||
if silent || cmd.name == 'cd' {
|
||||
mut is_silent := cmd.flags.get_bool('silent') or { false }
|
||||
mut reload := cmd.flags.get_bool('load') or { false }
|
||||
|
||||
if is_silent || cmd.name == 'cd' {
|
||||
console.silent_set()
|
||||
}
|
||||
mut coderoot := cmd.flags.get_string('coderoot') or { '' }
|
||||
@@ -262,6 +272,7 @@ fn cmd_git_execute(cmd Command) ! {
|
||||
mypath := gs.do(
|
||||
filter: filter
|
||||
repo: repo
|
||||
reload: reload
|
||||
account: account
|
||||
provider: provider
|
||||
branch: branch
|
||||
|
||||
@@ -189,6 +189,7 @@ pub fn (mut h HTTPConnection) get(req_ Request) !string {
|
||||
req.debug = true
|
||||
req.method = .get
|
||||
result := h.send(req)!
|
||||
println(result)
|
||||
return result.data
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,20 @@ pub fn (mut h HTTPConnection) post_json_generic[T](req Request) !T {
|
||||
return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") }
|
||||
}
|
||||
|
||||
// TODO
|
||||
pub fn (mut h HTTPConnection) put_json_generic[T](req Request) !T {
|
||||
// data := h.put_json_str(req)!
|
||||
// return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") }
|
||||
return T{}
|
||||
}
|
||||
|
||||
// TODO
|
||||
pub fn (mut h HTTPConnection) delete_json_generic[T](req Request) !T {
|
||||
// data := h.delete_json_str(req)!
|
||||
// return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") }
|
||||
return T{}
|
||||
}
|
||||
|
||||
pub fn (mut h HTTPConnection) get_json_list_generic[T](req Request) ![]T {
|
||||
mut r := []T{}
|
||||
for item in h.get_json_list(req)! {
|
||||
|
||||
60
lib/core/jobs/model/agent.v
Normal file
60
lib/core/jobs/model/agent.v
Normal file
@@ -0,0 +1,60 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
|
||||
// Agent represents a service provider that can execute jobs
|
||||
pub struct Agent {
|
||||
pub mut:
|
||||
pubkey string // pubkey using ed25519
|
||||
address string // where we can find the agent
|
||||
port int // default 9999
|
||||
description string // optional
|
||||
status AgentStatus
|
||||
services []AgentService // these are the public services
|
||||
signature string // signature as done by private key of $address+$port+$description+$status
|
||||
}
|
||||
|
||||
// AgentStatus represents the current state of an agent
|
||||
pub struct AgentStatus {
|
||||
pub mut:
|
||||
guid string // unique id for the job
|
||||
timestamp_first ourtime.OurTime // when agent came online
|
||||
timestamp_last ourtime.OurTime // last time agent let us know that he is working
|
||||
status AgentState // current state of the agent
|
||||
}
|
||||
|
||||
// AgentService represents a service provided by an agent
|
||||
pub struct AgentService {
|
||||
pub mut:
|
||||
actor string // name of the actor providing the service
|
||||
actions []AgentServiceAction // available actions for this service
|
||||
description string // optional description
|
||||
status AgentServiceState // current state of the service
|
||||
}
|
||||
|
||||
// AgentServiceAction represents an action that can be performed by a service
|
||||
pub struct AgentServiceAction {
|
||||
pub mut:
|
||||
action string // which action
|
||||
description string // optional description
|
||||
params map[string]string // e.g. name:'name of the vm' ...
|
||||
params_example map[string]string // e.g. name:'myvm'
|
||||
status AgentServiceState // current state of the action
|
||||
public bool // if everyone can use then true, if restricted means only certain people can use
|
||||
}
|
||||
|
||||
// AgentState represents the possible states of an agent
|
||||
pub enum AgentState {
|
||||
ok // agent is functioning normally
|
||||
down // agent is not responding
|
||||
error // agent encountered an error
|
||||
halted // agent has been manually stopped
|
||||
}
|
||||
|
||||
// AgentServiceState represents the possible states of an agent service or action
|
||||
pub enum AgentServiceState {
|
||||
ok // service/action is functioning normally
|
||||
down // service/action is not available
|
||||
error // service/action encountered an error
|
||||
halted // service/action has been manually stopped
|
||||
}
|
||||
91
lib/core/jobs/model/agent_manager.v
Normal file
91
lib/core/jobs/model/agent_manager.v
Normal file
@@ -0,0 +1,91 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
import json
|
||||
|
||||
const agents_key = 'herorunner:agents' // Redis key for storing agents
|
||||
|
||||
// AgentManager handles all agent-related operations
|
||||
pub struct AgentManager {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
}
|
||||
|
||||
// new creates a new Agent instance
|
||||
pub fn (mut m AgentManager) new() Agent {
|
||||
return Agent{
|
||||
pubkey: '' // Empty pubkey to be filled by caller
|
||||
port: 9999 // Default port
|
||||
status: AgentStatus{
|
||||
guid: ''
|
||||
timestamp_first: ourtime.now()
|
||||
timestamp_last: ourtime.OurTime{}
|
||||
status: .ok
|
||||
}
|
||||
services: []AgentService{}
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new agent to Redis
|
||||
pub fn (mut m AgentManager) set(agent Agent) ! {
|
||||
// Store agent in Redis hash where key is agent.pubkey and value is JSON of agent
|
||||
agent_json := json.encode(agent)
|
||||
m.redis.hset(agents_key, agent.pubkey, agent_json)!
|
||||
}
|
||||
|
||||
// get retrieves an agent by its public key
|
||||
pub fn (mut m AgentManager) get(pubkey string) !Agent {
|
||||
agent_json := m.redis.hget(agents_key, pubkey)!
|
||||
return json.decode(Agent, agent_json)
|
||||
}
|
||||
|
||||
// list returns all agents
|
||||
pub fn (mut m AgentManager) list() ![]Agent {
|
||||
mut agents := []Agent{}
|
||||
|
||||
// Get all agents from Redis hash
|
||||
agents_map := m.redis.hgetall(agents_key)!
|
||||
|
||||
// Convert each JSON value to Agent struct
|
||||
for _, agent_json in agents_map {
|
||||
agent := json.decode(Agent, agent_json)!
|
||||
agents << agent
|
||||
}
|
||||
|
||||
return agents
|
||||
}
|
||||
|
||||
// delete removes an agent by its public key
|
||||
pub fn (mut m AgentManager) delete(pubkey string) ! {
|
||||
m.redis.hdel(agents_key, pubkey)!
|
||||
}
|
||||
|
||||
// update_status updates just the status of an agent
|
||||
pub fn (mut m AgentManager) update_status(pubkey string, status AgentState) ! {
|
||||
mut agent := m.get(pubkey)!
|
||||
agent.status.status = status
|
||||
m.set(agent)!
|
||||
}
|
||||
|
||||
// get_by_service returns all agents that provide a specific service
|
||||
pub fn (mut m AgentManager) get_by_service(actor string, action string) ![]Agent {
|
||||
mut matching_agents := []Agent{}
|
||||
|
||||
agents := m.list()!
|
||||
for agent in agents {
|
||||
for service in agent.services {
|
||||
if service.actor != actor {
|
||||
continue
|
||||
}
|
||||
for act in service.actions {
|
||||
if act.action == action {
|
||||
matching_agents << agent
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matching_agents
|
||||
}
|
||||
74
lib/core/jobs/model/agent_manager_test.v
Normal file
74
lib/core/jobs/model/agent_manager_test.v
Normal file
@@ -0,0 +1,74 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
|
||||
fn test_agents_model() {
|
||||
mut runner := new()!
|
||||
|
||||
// Create a new agent using the manager
|
||||
mut agent := runner.agents.new()
|
||||
agent.pubkey = 'test-agent-1'
|
||||
agent.address = '127.0.0.1'
|
||||
agent.description = 'Test Agent'
|
||||
|
||||
// Create a service action
|
||||
mut action := AgentServiceAction{
|
||||
action: 'start'
|
||||
description: 'Start a VM'
|
||||
params: {
|
||||
'name': 'string'
|
||||
}
|
||||
params_example: {
|
||||
'name': 'myvm'
|
||||
}
|
||||
status: .ok
|
||||
public: true
|
||||
}
|
||||
|
||||
// Create a service
|
||||
mut service := AgentService{
|
||||
actor: 'vm_manager'
|
||||
actions: [action]
|
||||
description: 'VM Management Service'
|
||||
status: .ok
|
||||
}
|
||||
|
||||
agent.services = [service]
|
||||
|
||||
// Add the agent
|
||||
runner.agents.set(agent)!
|
||||
|
||||
// Get the agent and verify fields
|
||||
retrieved_agent := runner.agents.get(agent.pubkey)!
|
||||
assert retrieved_agent.pubkey == agent.pubkey
|
||||
assert retrieved_agent.address == agent.address
|
||||
assert retrieved_agent.description == agent.description
|
||||
assert retrieved_agent.services.len == 1
|
||||
assert retrieved_agent.services[0].actor == 'vm_manager'
|
||||
assert retrieved_agent.status.status == .ok
|
||||
|
||||
// Update agent status
|
||||
runner.agents.update_status(agent.pubkey, .down)!
|
||||
updated_agent := runner.agents.get(agent.pubkey)!
|
||||
assert updated_agent.status.status == .down
|
||||
|
||||
// Test get_by_service
|
||||
agents := runner.agents.get_by_service('vm_manager', 'start')!
|
||||
assert agents.len > 0
|
||||
assert agents[0].pubkey == agent.pubkey
|
||||
|
||||
// List all agents
|
||||
all_agents := runner.agents.list()!
|
||||
assert all_agents.len > 0
|
||||
assert all_agents[0].pubkey == agent.pubkey
|
||||
|
||||
// Delete the agent
|
||||
runner.agents.delete(agent.pubkey)!
|
||||
|
||||
// Verify deletion
|
||||
agents_after := runner.agents.list()!
|
||||
for a in agents_after {
|
||||
assert a.pubkey != agent.pubkey
|
||||
}
|
||||
}
|
||||
37
lib/core/jobs/model/factory.v
Normal file
37
lib/core/jobs/model/factory.v
Normal file
@@ -0,0 +1,37 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
// HeroRunner is the main factory for managing jobs, agents, services and groups
|
||||
pub struct HeroRunner {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
pub mut:
|
||||
jobs &JobManager
|
||||
agents &AgentManager
|
||||
services &ServiceManager
|
||||
groups &GroupManager
|
||||
}
|
||||
|
||||
// new creates a new HeroRunner instance
|
||||
pub fn new() !&HeroRunner {
|
||||
mut redis := redisclient.core_get()!
|
||||
|
||||
mut hr := &HeroRunner{
|
||||
redis: redis
|
||||
jobs: &JobManager{
|
||||
redis: redis
|
||||
}
|
||||
agents: &AgentManager{
|
||||
redis: redis
|
||||
}
|
||||
services: &ServiceManager{
|
||||
redis: redis
|
||||
}
|
||||
groups: &GroupManager{
|
||||
redis: redis
|
||||
}
|
||||
}
|
||||
|
||||
return hr
|
||||
}
|
||||
10
lib/core/jobs/model/group.v
Normal file
10
lib/core/jobs/model/group.v
Normal file
@@ -0,0 +1,10 @@
|
||||
module model
|
||||
|
||||
// Group represents a collection of members (users or other groups)
|
||||
pub struct Group {
|
||||
pub mut:
|
||||
guid string // unique id
|
||||
name string // name of the group
|
||||
description string // optional description
|
||||
members []string // can be other group or member which is defined by pubkey
|
||||
}
|
||||
99
lib/core/jobs/model/group_manager.v
Normal file
99
lib/core/jobs/model/group_manager.v
Normal file
@@ -0,0 +1,99 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import json
|
||||
|
||||
const groups_key = 'herorunner:groups' // Redis key for storing groups
|
||||
|
||||
// GroupManager handles all group-related operations
|
||||
pub struct GroupManager {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
}
|
||||
|
||||
// new creates a new Group instance
|
||||
pub fn (mut m GroupManager) new() Group {
|
||||
return Group{
|
||||
guid: '' // Empty GUID to be filled by caller
|
||||
members: []string{}
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new group to Redis
|
||||
pub fn (mut m GroupManager) set(group Group) ! {
|
||||
// Store group in Redis hash where key is group.guid and value is JSON of group
|
||||
group_json := json.encode(group)
|
||||
m.redis.hset(groups_key, group.guid, group_json)!
|
||||
}
|
||||
|
||||
// get retrieves a group by its GUID
|
||||
pub fn (mut m GroupManager) get(guid string) !Group {
|
||||
group_json := m.redis.hget(groups_key, guid)!
|
||||
return json.decode(Group, group_json)
|
||||
}
|
||||
|
||||
// list returns all groups
|
||||
pub fn (mut m GroupManager) list() ![]Group {
|
||||
mut groups := []Group{}
|
||||
|
||||
// Get all groups from Redis hash
|
||||
groups_map := m.redis.hgetall(groups_key)!
|
||||
|
||||
// Convert each JSON value to Group struct
|
||||
for _, group_json in groups_map {
|
||||
group := json.decode(Group, group_json)!
|
||||
groups << group
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
// delete removes a group by its GUID
|
||||
pub fn (mut m GroupManager) delete(guid string) ! {
|
||||
m.redis.hdel(groups_key, guid)!
|
||||
}
|
||||
|
||||
// add_member adds a member (user pubkey or group GUID) to a group
|
||||
pub fn (mut m GroupManager) add_member(guid string, member string) ! {
|
||||
mut group := m.get(guid)!
|
||||
if member !in group.members {
|
||||
group.members << member
|
||||
m.set(group)!
|
||||
}
|
||||
}
|
||||
|
||||
// remove_member removes a member from a group
|
||||
pub fn (mut m GroupManager) remove_member(guid string, member string) ! {
|
||||
mut group := m.get(guid)!
|
||||
group.members = group.members.filter(it != member)
|
||||
m.set(group)!
|
||||
}
|
||||
|
||||
pub fn (mut m GroupManager) get_user_groups(user_pubkey string) ![]Group {
|
||||
mut user_groups := []Group{}
|
||||
mut checked_groups := map[string]bool{}
|
||||
groups := m.list()!
|
||||
// Check each group
|
||||
for group in groups {
|
||||
check_group_membership(group, user_pubkey, groups, mut checked_groups, mut user_groups)
|
||||
}
|
||||
return user_groups
|
||||
}
|
||||
|
||||
// Recursive function to check group membership
|
||||
fn check_group_membership(group Group, user string, groups []Group, mut checked map[string]bool, mut result []Group) {
|
||||
if group.guid in checked {
|
||||
return
|
||||
}
|
||||
checked[group.guid] = true
|
||||
|
||||
if user in group.members {
|
||||
result << group
|
||||
// Check parent groups
|
||||
for parent_group in groups {
|
||||
if group.guid in parent_group.members {
|
||||
check_group_membership(parent_group, user, groups, mut checked, mut result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
67
lib/core/jobs/model/group_manager_test.v
Normal file
67
lib/core/jobs/model/group_manager_test.v
Normal file
@@ -0,0 +1,67 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
fn test_groups() {
|
||||
mut runner := new()!
|
||||
|
||||
// Create a new group using the manager
|
||||
mut group := runner.groups.new()
|
||||
group.guid = 'admin-group'
|
||||
group.name = 'Administrators'
|
||||
group.description = 'Administrator group with full access'
|
||||
|
||||
// Add the group
|
||||
runner.groups.set(group)!
|
||||
|
||||
// Create a subgroup
|
||||
mut subgroup := runner.groups.new()
|
||||
subgroup.guid = 'vm-admins'
|
||||
subgroup.name = 'VM Administrators'
|
||||
subgroup.description = 'VM management administrators'
|
||||
|
||||
runner.groups.set(subgroup)!
|
||||
|
||||
// Add subgroup to main group
|
||||
runner.groups.add_member(group.guid, subgroup.guid)!
|
||||
|
||||
// Add a user to the subgroup
|
||||
runner.groups.add_member(subgroup.guid, 'user-1-pubkey')!
|
||||
|
||||
// Get the groups and verify fields
|
||||
retrieved_group := runner.groups.get(group.guid)!
|
||||
assert retrieved_group.guid == group.guid
|
||||
assert retrieved_group.name == group.name
|
||||
assert retrieved_group.description == group.description
|
||||
assert retrieved_group.members.len == 1
|
||||
assert retrieved_group.members[0] == subgroup.guid
|
||||
|
||||
retrieved_subgroup := runner.groups.get(subgroup.guid)!
|
||||
assert retrieved_subgroup.members.len == 1
|
||||
assert retrieved_subgroup.members[0] == 'user-1-pubkey'
|
||||
|
||||
// Test recursive group membership
|
||||
user_groups := runner.groups.get_user_groups('user-1-pubkey')!
|
||||
assert user_groups.len == 1
|
||||
assert user_groups[0].guid == subgroup.guid
|
||||
|
||||
// Remove member from subgroup
|
||||
runner.groups.remove_member(subgroup.guid, 'user-1-pubkey')!
|
||||
updated_subgroup := runner.groups.get(subgroup.guid)!
|
||||
assert updated_subgroup.members.len == 0
|
||||
|
||||
// List all groups
|
||||
groups := runner.groups.list()!
|
||||
assert groups.len == 2
|
||||
|
||||
// Delete the groups
|
||||
runner.groups.delete(subgroup.guid)!
|
||||
runner.groups.delete(group.guid)!
|
||||
|
||||
// Verify deletion
|
||||
groups_after := runner.groups.list()!
|
||||
for g in groups_after {
|
||||
assert g.guid != group.guid
|
||||
assert g.guid != subgroup.guid
|
||||
}
|
||||
}
|
||||
52
lib/core/jobs/model/job.v
Normal file
52
lib/core/jobs/model/job.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
|
||||
// Job represents a task to be executed by an agent
|
||||
pub struct Job {
|
||||
pub mut:
|
||||
guid string // unique id for the job
|
||||
agents []string // the pub key of the agent(s) which will execute the command, only 1 will execute
|
||||
source string // pubkey from the agent who asked for the job
|
||||
circle string = 'default' // our digital life is organized in circles
|
||||
context string = 'default' // is the high level context in which actors will execute the work inside a circle
|
||||
actor string // e.g. vm_manager
|
||||
action string // e.g. start
|
||||
params map[string]string // e.g. id:10
|
||||
timeout_schedule u16 = 60 // timeout before its picked up
|
||||
timeout u16 = 3600 // timeout in sec
|
||||
log bool = true
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
ignore_error_codes []int // of we want to ignore certain error codes
|
||||
debug bool // if debug will get more context
|
||||
retry int // default there is no debug
|
||||
status JobStatus
|
||||
dependencies []JobDependency // will not execute until other jobs are done
|
||||
}
|
||||
|
||||
// JobStatus represents the current state of a job
|
||||
pub struct JobStatus {
|
||||
pub mut:
|
||||
guid string // unique id for the job
|
||||
created ourtime.OurTime // when we created the job
|
||||
start ourtime.OurTime // when the job needs to start
|
||||
end ourtime.OurTime // when the job ended, can be in error
|
||||
status Status // current status of the job
|
||||
}
|
||||
|
||||
// JobDependency represents a dependency on another job
|
||||
pub struct JobDependency {
|
||||
pub mut:
|
||||
guid string // unique id for the job
|
||||
agents []string // the pub key of the agent(s) which can execute the command
|
||||
}
|
||||
|
||||
// Status represents the possible states of a job
|
||||
pub enum Status {
|
||||
created // initial state
|
||||
scheduled // job has been scheduled
|
||||
planned // arrived where actor will execute the job
|
||||
running // job is currently running
|
||||
error // job encountered an error
|
||||
ok // job completed successfully
|
||||
}
|
||||
68
lib/core/jobs/model/job_manager.v
Normal file
68
lib/core/jobs/model/job_manager.v
Normal file
@@ -0,0 +1,68 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
import json
|
||||
|
||||
const jobs_key = 'herorunner:jobs' // Redis key for storing jobs
|
||||
|
||||
// JobManager handles all job-related operations
|
||||
pub struct JobManager {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
}
|
||||
|
||||
// new creates a new Job instance
|
||||
pub fn (mut m JobManager) new() Job {
|
||||
return Job{
|
||||
guid: '' // Empty GUID to be filled by caller
|
||||
status: JobStatus{
|
||||
guid: ''
|
||||
created: ourtime.now()
|
||||
start: ourtime.OurTime{}
|
||||
end: ourtime.OurTime{}
|
||||
status: .created
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new job to Redis
|
||||
pub fn (mut m JobManager) set(job Job) ! {
|
||||
// Store job in Redis hash where key is job.guid and value is JSON of job
|
||||
job_json := json.encode(job)
|
||||
m.redis.hset(jobs_key, job.guid, job_json)!
|
||||
}
|
||||
|
||||
// get retrieves a job by its GUID
|
||||
pub fn (mut m JobManager) get(guid string) !Job {
|
||||
job_json := m.redis.hget(jobs_key, guid)!
|
||||
return json.decode(Job, job_json)
|
||||
}
|
||||
|
||||
// list returns all jobs
|
||||
pub fn (mut m JobManager) list() ![]Job {
|
||||
mut jobs := []Job{}
|
||||
|
||||
// Get all jobs from Redis hash
|
||||
jobs_map := m.redis.hgetall(jobs_key)!
|
||||
|
||||
// Convert each JSON value to Job struct
|
||||
for _, job_json in jobs_map {
|
||||
job := json.decode(Job, job_json)!
|
||||
jobs << job
|
||||
}
|
||||
|
||||
return jobs
|
||||
}
|
||||
|
||||
// delete removes a job by its GUID
|
||||
pub fn (mut m JobManager) delete(guid string) ! {
|
||||
m.redis.hdel(jobs_key, guid)!
|
||||
}
|
||||
|
||||
// update_status updates just the status of a job
|
||||
pub fn (mut m JobManager) update_status(guid string, status Status) ! {
|
||||
mut job := m.get(guid)!
|
||||
job.status.status = status
|
||||
m.set(job)!
|
||||
}
|
||||
47
lib/core/jobs/model/job_manager_test.v
Normal file
47
lib/core/jobs/model/job_manager_test.v
Normal file
@@ -0,0 +1,47 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import freeflowuniverse.herolib.data.ourtime
|
||||
|
||||
fn test_jobs() {
|
||||
mut runner := new()!
|
||||
|
||||
// Create a new job using the manager
|
||||
mut job := runner.jobs.new()
|
||||
job.guid = 'test-job-1'
|
||||
job.actor = 'vm_manager'
|
||||
job.action = 'start'
|
||||
job.params = {
|
||||
'id': '10'
|
||||
}
|
||||
|
||||
// Add the job
|
||||
runner.jobs.set(job)!
|
||||
|
||||
// Get the job and verify fields
|
||||
retrieved_job := runner.jobs.get(job.guid)!
|
||||
assert retrieved_job.guid == job.guid
|
||||
assert retrieved_job.actor == job.actor
|
||||
assert retrieved_job.action == job.action
|
||||
assert retrieved_job.params['id'] == job.params['id']
|
||||
assert retrieved_job.status.status == .created
|
||||
|
||||
// Update job status
|
||||
runner.jobs.update_status(job.guid, .running)!
|
||||
updated_job := runner.jobs.get(job.guid)!
|
||||
assert updated_job.status.status == .running
|
||||
|
||||
// List all jobs
|
||||
jobs := runner.jobs.list()!
|
||||
assert jobs.len > 0
|
||||
assert jobs[0].guid == job.guid
|
||||
|
||||
// Delete the job
|
||||
runner.jobs.delete(job.guid)!
|
||||
|
||||
// Verify deletion
|
||||
jobs_after := runner.jobs.list()!
|
||||
for j in jobs_after {
|
||||
assert j.guid != job.guid
|
||||
}
|
||||
}
|
||||
44
lib/core/jobs/model/service.v
Normal file
44
lib/core/jobs/model/service.v
Normal file
@@ -0,0 +1,44 @@
|
||||
module model
|
||||
|
||||
// Service represents a service that can be provided by agents
|
||||
pub struct Service {
|
||||
pub mut:
|
||||
actor string // name of the actor providing the service
|
||||
actions []ServiceAction // available actions for this service
|
||||
description string // optional description
|
||||
status ServiceState // current state of the service
|
||||
acl ?ACL // access control list for the service
|
||||
}
|
||||
|
||||
// ServiceAction represents an action that can be performed by a service
|
||||
pub struct ServiceAction {
|
||||
pub mut:
|
||||
action string // which action
|
||||
description string // optional description
|
||||
params map[string]string // e.g. name:'name of the vm' ...
|
||||
params_example map[string]string // e.g. name:'myvm'
|
||||
acl ?ACL // if not used then everyone can use
|
||||
}
|
||||
|
||||
// ACL represents an access control list
|
||||
pub struct ACL {
|
||||
pub mut:
|
||||
name string
|
||||
ace []ACE
|
||||
}
|
||||
|
||||
// ACE represents an access control entry
|
||||
pub struct ACE {
|
||||
pub mut:
|
||||
groups []string // guid's of the groups who have access
|
||||
users []string // in case groups are not used then is users
|
||||
right string // e.g. read, write, admin, block
|
||||
}
|
||||
|
||||
// ServiceState represents the possible states of a service
|
||||
pub enum ServiceState {
|
||||
ok // service is functioning normally
|
||||
down // service is not available
|
||||
error // service encountered an error
|
||||
halted // service has been manually stopped
|
||||
}
|
||||
122
lib/core/jobs/model/service_manager.v
Normal file
122
lib/core/jobs/model/service_manager.v
Normal file
@@ -0,0 +1,122 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import json
|
||||
|
||||
const services_key = 'herorunner:services' // Redis key for storing services
|
||||
|
||||
// ServiceManager handles all service-related operations
|
||||
pub struct ServiceManager {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
}
|
||||
|
||||
// new creates a new Service instance
|
||||
pub fn (mut m ServiceManager) new() Service {
|
||||
return Service{
|
||||
actor: '' // Empty actor name to be filled by caller
|
||||
actions: []ServiceAction{}
|
||||
status: .ok
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new service to Redis
|
||||
pub fn (mut m ServiceManager) set(service Service) ! {
|
||||
// Store service in Redis hash where key is service.actor and value is JSON of service
|
||||
service_json := json.encode(service)
|
||||
m.redis.hset(services_key, service.actor, service_json)!
|
||||
}
|
||||
|
||||
// get retrieves a service by its actor name
|
||||
pub fn (mut m ServiceManager) get(actor string) !Service {
|
||||
service_json := m.redis.hget(services_key, actor)!
|
||||
return json.decode(Service, service_json)
|
||||
}
|
||||
|
||||
// list returns all services
|
||||
pub fn (mut m ServiceManager) list() ![]Service {
|
||||
mut services := []Service{}
|
||||
|
||||
// Get all services from Redis hash
|
||||
services_map := m.redis.hgetall(services_key)!
|
||||
|
||||
// Convert each JSON value to Service struct
|
||||
for _, service_json in services_map {
|
||||
service := json.decode(Service, service_json)!
|
||||
services << service
|
||||
}
|
||||
|
||||
return services
|
||||
}
|
||||
|
||||
// delete removes a service by its actor name
|
||||
pub fn (mut m ServiceManager) delete(actor string) ! {
|
||||
m.redis.hdel(services_key, actor)!
|
||||
}
|
||||
|
||||
// update_status updates just the status of a service
|
||||
pub fn (mut m ServiceManager) update_status(actor string, status ServiceState) ! {
|
||||
mut service := m.get(actor)!
|
||||
service.status = status
|
||||
m.set(service)!
|
||||
}
|
||||
|
||||
// get_by_action returns all services that provide a specific action
|
||||
pub fn (mut m ServiceManager) get_by_action(action string) ![]Service {
|
||||
mut matching_services := []Service{}
|
||||
|
||||
services := m.list()!
|
||||
for service in services {
|
||||
for act in service.actions {
|
||||
if act.action == action {
|
||||
matching_services << service
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matching_services
|
||||
}
|
||||
|
||||
// check_access verifies if a user has access to a service action
|
||||
pub fn (mut m ServiceManager) check_access(actor string, action string, user_pubkey string, groups []string) !bool {
|
||||
service := m.get(actor)!
|
||||
|
||||
// Find the specific action
|
||||
mut service_action := ServiceAction{}
|
||||
mut found := false
|
||||
for act in service.actions {
|
||||
if act.action == action {
|
||||
service_action = act
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return error('Action ${action} not found in service ${actor}')
|
||||
}
|
||||
|
||||
// If no ACL is defined, access is granted
|
||||
if service_action.acl == none {
|
||||
return true
|
||||
}
|
||||
|
||||
acl := service_action.acl or { return true }
|
||||
|
||||
// Check each ACE in the ACL
|
||||
for ace in acl.ace {
|
||||
// Check if user is directly listed
|
||||
if user_pubkey in ace.users {
|
||||
return ace.right != 'block'
|
||||
}
|
||||
|
||||
// Check if any of user's groups are listed
|
||||
for group in groups {
|
||||
if group in ace.groups {
|
||||
return ace.right != 'block'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
87
lib/core/jobs/model/service_manager_test.v
Normal file
87
lib/core/jobs/model/service_manager_test.v
Normal file
@@ -0,0 +1,87 @@
|
||||
module model
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
fn test_services() {
|
||||
mut runner := new()!
|
||||
|
||||
// Create a new service using the manager
|
||||
mut service := runner.services.new()
|
||||
service.actor = 'vm_manager'
|
||||
service.description = 'VM Management Service'
|
||||
|
||||
// Create an ACL
|
||||
mut ace := ACE{
|
||||
groups: ['admin-group']
|
||||
users: ['user-1-pubkey']
|
||||
right: 'write'
|
||||
}
|
||||
|
||||
mut acl := ACL{
|
||||
name: 'vm-acl'
|
||||
ace: [ace]
|
||||
}
|
||||
|
||||
// Create a service action
|
||||
mut action := ServiceAction{
|
||||
action: 'start'
|
||||
description: 'Start a VM'
|
||||
params: {
|
||||
'name': 'string'
|
||||
}
|
||||
params_example: {
|
||||
'name': 'myvm'
|
||||
}
|
||||
acl: acl
|
||||
}
|
||||
|
||||
service.actions = [action]
|
||||
|
||||
// Add the service
|
||||
runner.services.set(service)!
|
||||
|
||||
// Get the service and verify fields
|
||||
retrieved_service := runner.services.get(service.actor)!
|
||||
assert retrieved_service.actor == service.actor
|
||||
assert retrieved_service.description == service.description
|
||||
assert retrieved_service.actions.len == 1
|
||||
assert retrieved_service.actions[0].action == 'start'
|
||||
assert retrieved_service.status == .ok
|
||||
|
||||
// Update service status
|
||||
runner.services.update_status(service.actor, .down)!
|
||||
updated_service := runner.services.get(service.actor)!
|
||||
assert updated_service.status == .down
|
||||
|
||||
// Test get_by_action
|
||||
services := runner.services.get_by_action('start')!
|
||||
assert services.len > 0
|
||||
assert services[0].actor == service.actor
|
||||
|
||||
// Test access control
|
||||
has_access := runner.services.check_access(service.actor, 'start', 'user-1-pubkey',
|
||||
[])!
|
||||
assert has_access == true
|
||||
|
||||
has_group_access := runner.services.check_access(service.actor, 'start', 'user-2-pubkey',
|
||||
['admin-group'])!
|
||||
assert has_group_access == true
|
||||
|
||||
no_access := runner.services.check_access(service.actor, 'start', 'user-3-pubkey',
|
||||
[])!
|
||||
assert no_access == false
|
||||
|
||||
// List all services
|
||||
all_services := runner.services.list()!
|
||||
assert all_services.len > 0
|
||||
assert all_services[0].actor == service.actor
|
||||
|
||||
// Delete the service
|
||||
runner.services.delete(service.actor)!
|
||||
|
||||
// Verify deletion
|
||||
services_after := runner.services.list()!
|
||||
for s in services_after {
|
||||
assert s.actor != service.actor
|
||||
}
|
||||
}
|
||||
186
lib/core/jobs/model/specs.md
Normal file
186
lib/core/jobs/model/specs.md
Normal file
@@ -0,0 +1,186 @@
|
||||
create a job manager in
|
||||
lib/core/jobs
|
||||
|
||||
|
||||
## some definitions
|
||||
|
||||
- agent: is a self contained set of processes which can execute on actions or actions to be executed by others
|
||||
- action: what needs to be executed
|
||||
- circle: each action happens in a circle
|
||||
- context: a context inside a circle is optional
|
||||
- job, what gets executed by an agent, is one action, can depend on other actions
|
||||
- herorunner: is the process which uses redis to manage all open jobs, checks for timeouts, does the forwards if needed (if remote agent need to schedule, ...)
|
||||
|
||||
## jobs
|
||||
|
||||
are executed by processes can be in different languages and they are identified by agent pub key (the one who executes)
|
||||
as part of heroscript we know what to executed on which actor inside the agent, defined with method and its arguments
|
||||
|
||||
```v
|
||||
|
||||
//the description of what needs to be executed
|
||||
pub struct Job {
|
||||
pub mut:
|
||||
guid string //unique id for the job
|
||||
agents []string //the pub key of the agent(s) which will execute the command, only 1 will execute, the herorunner will try the different agents if needed till it has success
|
||||
source string //pubkey from the agent who asked for the job
|
||||
circle string = "default" //our digital life is organized in circles
|
||||
context string = "default" //is the high level context in which actors will execute the work inside a circle
|
||||
actor string //e.g. vm_manager
|
||||
action string //e.g. start
|
||||
params map[string]string //e.g. id:10
|
||||
timeout_schedule u16 = 60 //timeout before its picked up
|
||||
timeout u16 = 3600 // timeout in sec
|
||||
log bool = true
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
ignore_error_codes []int // of we want to ignore certain error codes
|
||||
debug bool // if debug will get more context
|
||||
retry int // default there is no debug
|
||||
status JobStatus
|
||||
dependencies []JobDependency //will not execute untill other jobs are done
|
||||
|
||||
}
|
||||
|
||||
pub struct JobStatus {
|
||||
pub mut:
|
||||
guid string //unique id for the job
|
||||
created u32 //epoch when we created the job
|
||||
start u32 //epoch when the job needs to start
|
||||
end u32 //epoch when the job ended, can be in error
|
||||
status //ENUM: create scheduled, planned (means arrived where actor will execute the job), running, error, ok
|
||||
}
|
||||
|
||||
pub struct JobDependency {
|
||||
pub mut:
|
||||
guid string //unique id for the job
|
||||
agents []string //the pub key of the agent(s) which can execute the command
|
||||
}
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
the Job object is stored in redis in hset herorunner:jobs where key is the job guid and the val is the json of Job
|
||||
|
||||
## Agent Registration Services
|
||||
|
||||
Each agent (the one who hosts the different actors which execute the methods with params) register themselves to all participants.
|
||||
|
||||
the structs below are available to everyone and are public
|
||||
|
||||
```v
|
||||
|
||||
pub struct Agent {
|
||||
pub mut:
|
||||
pubkey string //pubkey using ed25519
|
||||
address string //where we can gind the agent
|
||||
port int //default 9999
|
||||
description string //optional
|
||||
status AgentStatus
|
||||
services []AgentService //these are the public services
|
||||
signature string //signature as done by private key of $address+$port+$description+$status (this allows everyone to verify that the data is ok)
|
||||
|
||||
|
||||
}
|
||||
|
||||
pub struct AgentStatus {
|
||||
pub mut:
|
||||
guid string //unique id for the job
|
||||
timestamp_first u32 //when agent came online
|
||||
timestamp_last u32 //last time agent let us know that he is working
|
||||
status //ENUM: ok, down, error, halted
|
||||
}
|
||||
|
||||
pub struct AgentService {
|
||||
pub mut:
|
||||
actor string
|
||||
actions []AgentServiceAction
|
||||
description string
|
||||
status //ENUM: ok, down, error, halted
|
||||
}
|
||||
|
||||
pub struct AgentServiceAction {
|
||||
pub mut:
|
||||
action string //which action
|
||||
description string //optional descroption
|
||||
params map[string]string //e.g. name:'name of the vm' ...
|
||||
params_example map[string]string // e.g. name:'myvm'
|
||||
status //ENUM: ok, down, error, halted
|
||||
public bool //if everyone can use then true, if restricted means only certain people can use
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
the Agent object is stored in redis in hset herorunner:agents where key is the agent pubkey and the val is the json of Agent
|
||||
|
||||
|
||||
### Services Info
|
||||
|
||||
The agent and its actors register their capability to the herorunner
|
||||
|
||||
We have a mechanism to be specific on who can execute which, this is sort of ACL system, for now its quite rough
|
||||
|
||||
|
||||
|
||||
```v
|
||||
|
||||
pub struct Group {
|
||||
pub mut:
|
||||
guid string //unique id
|
||||
name string
|
||||
description string
|
||||
members []string //can be other group or member which is defined by pubkey
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
this info is stored in in redis on herorunner:groups
|
||||
|
||||
|
||||
|
||||
```v
|
||||
|
||||
pub struct Service {
|
||||
pub mut:
|
||||
actor string
|
||||
actions []AgentServiceAction
|
||||
description string
|
||||
status //ENUM: ok, down, error, halted
|
||||
acl ?ACL
|
||||
}
|
||||
|
||||
pub struct ServiceAction {
|
||||
pub mut:
|
||||
action string //which action
|
||||
description string //optional descroption
|
||||
params map[string]string //e.g. name:'name of the vm' ...
|
||||
params_example map[string]string // e.g. name:'myvm'
|
||||
acl ?ACL //if not used then everyone can use
|
||||
}
|
||||
|
||||
pub struct ACL {
|
||||
pub mut:
|
||||
name string
|
||||
ace []ACE
|
||||
}
|
||||
|
||||
|
||||
pub struct ACE {
|
||||
pub mut:
|
||||
groups []string //guid's of the groups who have access
|
||||
users []string //in case groups are not used then is users
|
||||
right string e.g. read, write, admin, block
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
The info for the herorunner to function is in redis on herorunner:services
|
||||
|
||||
2
lib/core/jobs/openrpc/.gitignore
vendored
Normal file
2
lib/core/jobs/openrpc/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
server
|
||||
job_client
|
||||
179
lib/core/jobs/openrpc/examples/job_client.vsh
Executable file
179
lib/core/jobs/openrpc/examples/job_client.vsh
Executable file
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import net.websocket
|
||||
import json
|
||||
import rand
|
||||
import time
|
||||
import term
|
||||
|
||||
const ws_url = 'ws://localhost:8080'
|
||||
|
||||
// Helper function to send request and receive response
|
||||
fn send_request(mut ws websocket.Client, request OpenRPCRequest) !OpenRPCResponse {
|
||||
// Send request
|
||||
request_json := json.encode(request)
|
||||
println(request_json)
|
||||
ws.write_string(request_json) or {
|
||||
eprintln(term.red('Failed to send request: ${err}'))
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for response
|
||||
mut msg := ws.read_next_message() or {
|
||||
eprintln(term.red('Failed to read response: ${err}'))
|
||||
return err
|
||||
}
|
||||
|
||||
if msg.opcode != websocket.OPCode.text_frame {
|
||||
return error('Invalid response type: expected text frame')
|
||||
}
|
||||
|
||||
response_text := msg.payload.bytestr()
|
||||
|
||||
// Parse response
|
||||
response := json.decode(OpenRPCResponse, response_text) or {
|
||||
eprintln(term.red('Failed to decode response: ${err}'))
|
||||
return err
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
// OpenRPC request/response structures (copied from handler.v)
|
||||
struct OpenRPCRequest {
|
||||
jsonrpc string @[required]
|
||||
method string @[required]
|
||||
params []string
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
struct OpenRPCResponse {
|
||||
jsonrpc string @[required]
|
||||
result string
|
||||
error string
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
// Initialize and configure WebSocket client
|
||||
fn init_client() !&websocket.Client {
|
||||
mut ws := websocket.new_client(ws_url)!
|
||||
|
||||
ws.on_open(fn (mut ws websocket.Client) ! {
|
||||
println(term.green('Connected to WebSocket server and ready...'))
|
||||
})
|
||||
|
||||
ws.on_error(fn (mut ws websocket.Client, err string) ! {
|
||||
eprintln(term.red('WebSocket error: ${err}'))
|
||||
})
|
||||
|
||||
ws.on_close(fn (mut ws websocket.Client, code int, reason string) ! {
|
||||
println(term.yellow('WebSocket connection closed: ${reason}'))
|
||||
})
|
||||
|
||||
ws.on_message(fn (mut ws websocket.Client, msg &websocket.Message) ! {
|
||||
if msg.payload.len > 0 {
|
||||
println(term.blue('Received message: ${msg.payload.bytestr()}'))
|
||||
}
|
||||
})
|
||||
|
||||
ws.connect() or {
|
||||
eprintln(term.red('Failed to connect: ${err}'))
|
||||
return err
|
||||
}
|
||||
|
||||
spawn ws.listen()
|
||||
return ws
|
||||
}
|
||||
|
||||
// Main client logic
|
||||
mut ws := init_client()!
|
||||
defer {
|
||||
ws.close(1000, 'normal') or { eprintln(term.red('Error closing connection: ${err}')) }
|
||||
}
|
||||
println(term.green('Connected to ${ws_url}'))
|
||||
|
||||
// Create a new job
|
||||
println(term.blue('\nCreating new job...'))
|
||||
new_job := send_request(mut ws, OpenRPCRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'job.new'
|
||||
params: []string{}
|
||||
id: rand.i32_in_range(1, 10000000)!
|
||||
}) or {
|
||||
eprintln(term.red('Failed to create new job: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
println(term.green('Created new job:'))
|
||||
println(json.encode_pretty(new_job))
|
||||
|
||||
// Parse job from response
|
||||
job := json.decode(model.Job, new_job.result) or {
|
||||
eprintln(term.red('Failed to parse job: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Set job properties
|
||||
println(term.blue('\nSetting job properties...'))
|
||||
mut updated_job := job
|
||||
updated_job.guid = 'test-job-1'
|
||||
updated_job.actor = 'vm_manager'
|
||||
updated_job.action = 'start'
|
||||
updated_job.params = {
|
||||
'name': 'test-vm'
|
||||
'memory': '2048'
|
||||
}
|
||||
|
||||
// Save job
|
||||
set_response := send_request(mut ws, OpenRPCRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'job.set'
|
||||
params: [json.encode(updated_job)]
|
||||
id: rand.int()
|
||||
}) or {
|
||||
eprintln(term.red('Failed to save job: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
println(term.green('Saved job:'))
|
||||
println(json.encode_pretty(set_response))
|
||||
|
||||
// Update job status to running
|
||||
println(term.blue('\nUpdating job status...'))
|
||||
update_response := send_request(mut ws, OpenRPCRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'job.update_status'
|
||||
params: ['test-job-1', 'running']
|
||||
id: rand.int()
|
||||
}) or {
|
||||
eprintln(term.red('Failed to update job status: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
println(term.green('Updated job status:'))
|
||||
println(json.encode_pretty(update_response))
|
||||
|
||||
// Get job to verify changes
|
||||
println(term.blue('\nRetrieving job...'))
|
||||
get_response := send_request(mut ws, OpenRPCRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'job.get'
|
||||
params: ['test-job-1']
|
||||
id: rand.int()
|
||||
}) or {
|
||||
eprintln(term.red('Failed to retrieve job: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
println(term.green('Retrieved job:'))
|
||||
println(json.encode_pretty(get_response))
|
||||
|
||||
// List all jobs
|
||||
println(term.blue('\nListing all jobs...'))
|
||||
list_response := send_request(mut ws, OpenRPCRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'job.list'
|
||||
params: []string{}
|
||||
id: rand.int()
|
||||
}) or {
|
||||
eprintln(term.red('Failed to list jobs: ${err}'))
|
||||
exit(1)
|
||||
}
|
||||
println(term.green('All jobs:'))
|
||||
println(json.encode_pretty(list_response))
|
||||
40
lib/core/jobs/openrpc/examples/server.vsh
Executable file
40
lib/core/jobs/openrpc/examples/server.vsh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.openrpc
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import time
|
||||
import sync
|
||||
import os
|
||||
|
||||
fn start_rpc_server(mut wg sync.WaitGroup) ! {
|
||||
defer { wg.done() }
|
||||
|
||||
// Create OpenRPC server
|
||||
openrpc.server_start()!
|
||||
}
|
||||
|
||||
fn start_ws_server(mut wg sync.WaitGroup) ! {
|
||||
defer { wg.done() }
|
||||
|
||||
// Get port from environment variable or use default
|
||||
port := if ws_port := os.getenv_opt('WS_PORT') {
|
||||
ws_port.int()
|
||||
} else {
|
||||
8080
|
||||
}
|
||||
|
||||
// Create and start WebSocket server
|
||||
mut ws_server := openrpc.new_ws_server(port)!
|
||||
ws_server.start()!
|
||||
}
|
||||
|
||||
// Create wait group for servers
|
||||
mut wg := sync.new_waitgroup()
|
||||
wg.add(2)
|
||||
|
||||
// Start servers in separate threads
|
||||
spawn start_rpc_server(mut wg)
|
||||
spawn start_ws_server(mut wg)
|
||||
|
||||
// Wait for servers to finish (they run forever)
|
||||
wg.wait()
|
||||
27
lib/core/jobs/openrpc/factory.v
Normal file
27
lib/core/jobs/openrpc/factory.v
Normal file
@@ -0,0 +1,27 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
|
||||
// Generic OpenRPC server that handles all managers
|
||||
pub struct OpenRPCServer {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
queue &redisclient.RedisQueue
|
||||
runner &model.HeroRunner
|
||||
}
|
||||
|
||||
// Create new OpenRPC server with Redis connection
|
||||
pub fn server_start() ! {
|
||||
redis := redisclient.core_get()!
|
||||
mut runner := model.new()!
|
||||
mut s := &OpenRPCServer{
|
||||
redis: redis
|
||||
queue: &redisclient.RedisQueue{
|
||||
key: rpc_queue
|
||||
redis: redis
|
||||
}
|
||||
runner: runner
|
||||
}
|
||||
s.start()!
|
||||
}
|
||||
68
lib/core/jobs/openrpc/handler.v
Normal file
68
lib/core/jobs/openrpc/handler.v
Normal file
@@ -0,0 +1,68 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import json
|
||||
|
||||
// Start the server and listen for requests
|
||||
pub fn (mut s OpenRPCServer) start() ! {
|
||||
println('Starting OpenRPC server.')
|
||||
|
||||
for {
|
||||
// Get message from queue
|
||||
msg := s.queue.get(5000)!
|
||||
|
||||
if msg.len == 0 {
|
||||
println("queue '${rpc_queue}' empty")
|
||||
continue
|
||||
}
|
||||
|
||||
println("process '${msg}'")
|
||||
|
||||
// Parse OpenRPC request
|
||||
request := json.decode(OpenRPCRequest, msg) or {
|
||||
println('Error decoding request: ${err}')
|
||||
continue
|
||||
}
|
||||
|
||||
// Process request with appropriate handler
|
||||
response := s.handle_request(request)!
|
||||
|
||||
// Send response back to Redis using response queue
|
||||
response_json := json.encode(response)
|
||||
key := '${rpc_queue}:${request.id}'
|
||||
println('response: \n${response}\n put on return queue ${key} ')
|
||||
mut response_queue := &redisclient.RedisQueue{
|
||||
key: key
|
||||
redis: s.redis
|
||||
}
|
||||
response_queue.add(response_json)!
|
||||
}
|
||||
}
|
||||
|
||||
// Get the handler for a specific method based on its prefix
|
||||
fn (mut s OpenRPCServer) handle_request(request OpenRPCRequest) !OpenRPCResponse {
|
||||
method := request.method.to_lower()
|
||||
println("process: method: '${method}'")
|
||||
if method.starts_with('job.') {
|
||||
return s.handle_request_job(request) or {
|
||||
return rpc_response_error(request.id, 'error in request job:\n${err}')
|
||||
}
|
||||
}
|
||||
if method.starts_with('agent.') {
|
||||
return s.handle_request_agent(request) or {
|
||||
return rpc_response_error(request.id, 'error in request agent:\n${err}')
|
||||
}
|
||||
}
|
||||
if method.starts_with('group.') {
|
||||
return s.handle_request_group(request) or {
|
||||
return rpc_response_error(request.id, 'error in request group:\n${err}')
|
||||
}
|
||||
}
|
||||
if method.starts_with('service.') {
|
||||
return s.handle_request_service(request) or {
|
||||
return rpc_response_error(request.id, 'error in request service:\n${err}')
|
||||
}
|
||||
}
|
||||
|
||||
return rpc_response_error(request.id, 'Could not find handler for ${method}')
|
||||
}
|
||||
71
lib/core/jobs/openrpc/handler_agent_manager.v
Normal file
71
lib/core/jobs/openrpc/handler_agent_manager.v
Normal file
@@ -0,0 +1,71 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import json
|
||||
|
||||
pub fn (mut h OpenRPCServer) handle_request_agent(request OpenRPCRequest) !OpenRPCResponse {
|
||||
mut response := rpc_response_new(request.id)
|
||||
|
||||
method := request.method.all_after_first('agent.')
|
||||
|
||||
println("request agent:'${method}'")
|
||||
|
||||
match method {
|
||||
'new' {
|
||||
agent := h.runner.agents.new()
|
||||
response.result = json.encode(agent)
|
||||
}
|
||||
'set' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing agent parameter')
|
||||
}
|
||||
agent := json.decode(model.Agent, request.params[0])!
|
||||
h.runner.agents.set(agent)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing pubkey parameter')
|
||||
}
|
||||
agent := h.runner.agents.get(request.params[0])!
|
||||
response.result = json.encode(agent)
|
||||
}
|
||||
'list' {
|
||||
agents := h.runner.agents.list()!
|
||||
response.result = json.encode(agents)
|
||||
}
|
||||
'delete' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing pubkey parameter')
|
||||
}
|
||||
h.runner.agents.delete(request.params[0])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'update_status' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing pubkey or status parameters')
|
||||
}
|
||||
status := match request.params[1] {
|
||||
'ok' { model.AgentState.ok }
|
||||
'down' { model.AgentState.down }
|
||||
'error' { model.AgentState.error }
|
||||
'halted' { model.AgentState.halted }
|
||||
else { return error('Invalid status: ${request.params[1]}') }
|
||||
}
|
||||
h.runner.agents.update_status(request.params[0], status)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get_by_service' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing actor or action parameters')
|
||||
}
|
||||
agents := h.runner.agents.get_by_service(request.params[0], request.params[1])!
|
||||
response.result = json.encode(agents)
|
||||
}
|
||||
else {
|
||||
return error('Unknown method: ${request.method}')
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
68
lib/core/jobs/openrpc/handler_group_manager.v
Normal file
68
lib/core/jobs/openrpc/handler_group_manager.v
Normal file
@@ -0,0 +1,68 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import json
|
||||
|
||||
pub fn (mut h OpenRPCServer) handle_request_group(request OpenRPCRequest) !OpenRPCResponse {
|
||||
mut response := rpc_response_new(request.id)
|
||||
method := request.method.all_after_first('group.')
|
||||
println("request group:'${method}'")
|
||||
match method {
|
||||
'new' {
|
||||
group := h.runner.groups.new()
|
||||
response.result = json.encode(group)
|
||||
}
|
||||
'set' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing group parameter')
|
||||
}
|
||||
group := json.decode(model.Group, request.params[0])!
|
||||
h.runner.groups.set(group)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing guid parameter')
|
||||
}
|
||||
group := h.runner.groups.get(request.params[0])!
|
||||
response.result = json.encode(group)
|
||||
}
|
||||
'list' {
|
||||
groups := h.runner.groups.list()!
|
||||
response.result = json.encode(groups)
|
||||
}
|
||||
'delete' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing guid parameter')
|
||||
}
|
||||
h.runner.groups.delete(request.params[0])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'add_member' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing guid or member parameters')
|
||||
}
|
||||
h.runner.groups.add_member(request.params[0], request.params[1])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'remove_member' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing guid or member parameters')
|
||||
}
|
||||
h.runner.groups.remove_member(request.params[0], request.params[1])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get_user_groups' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing user_pubkey parameter')
|
||||
}
|
||||
groups := h.runner.groups.get_user_groups(request.params[0])!
|
||||
response.result = json.encode(groups)
|
||||
}
|
||||
else {
|
||||
return error('Unknown method: ${request.method}')
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
66
lib/core/jobs/openrpc/handler_job_manager.v
Normal file
66
lib/core/jobs/openrpc/handler_job_manager.v
Normal file
@@ -0,0 +1,66 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import json
|
||||
|
||||
pub fn (mut h OpenRPCServer) handle_request_job(request OpenRPCRequest) !OpenRPCResponse {
|
||||
mut response := rpc_response_new(request.id)
|
||||
|
||||
method := request.method.all_after_first('job.')
|
||||
println("request job:'${method}'")
|
||||
println(request)
|
||||
match method {
|
||||
'new' {
|
||||
job := h.runner.jobs.new()
|
||||
response.result = json.encode(job)
|
||||
}
|
||||
'set' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing job parameter')
|
||||
}
|
||||
job := json.decode(model.Job, request.params[0])!
|
||||
h.runner.jobs.set(job)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing guid parameter')
|
||||
}
|
||||
job := h.runner.jobs.get(request.params[0])!
|
||||
response.result = json.encode(job)
|
||||
}
|
||||
'list' {
|
||||
jobs := h.runner.jobs.list()!
|
||||
response.result = json.encode(jobs)
|
||||
}
|
||||
'delete' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing guid parameter')
|
||||
}
|
||||
h.runner.jobs.delete(request.params[0])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'update_status' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing guid or status parameters')
|
||||
}
|
||||
status := match request.params[1] {
|
||||
'created' { model.Status.created }
|
||||
'scheduled' { model.Status.scheduled }
|
||||
'planned' { model.Status.planned }
|
||||
'running' { model.Status.running }
|
||||
'error' { model.Status.error }
|
||||
'ok' { model.Status.ok }
|
||||
else { return error('Invalid status: ${request.params[1]}') }
|
||||
}
|
||||
h.runner.jobs.update_status(request.params[0], status)!
|
||||
job := h.runner.jobs.get(request.params[0])! // Get updated job to return
|
||||
response.result = json.encode(job)
|
||||
}
|
||||
else {
|
||||
return error('Unknown method: ${request.method}')
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
80
lib/core/jobs/openrpc/handler_service_manager.v
Normal file
80
lib/core/jobs/openrpc/handler_service_manager.v
Normal file
@@ -0,0 +1,80 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.core.jobs.model
|
||||
import json
|
||||
|
||||
pub fn (mut h OpenRPCServer) handle_request_service(request OpenRPCRequest) !OpenRPCResponse {
|
||||
mut response := rpc_response_new(request.id)
|
||||
method := request.method.all_after_first('service.')
|
||||
println("request service:'${method}'")
|
||||
match method {
|
||||
'new' {
|
||||
service := h.runner.services.new()
|
||||
response.result = json.encode(service)
|
||||
}
|
||||
'set' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing service parameter')
|
||||
}
|
||||
service := json.decode(model.Service, request.params[0])!
|
||||
h.runner.services.set(service)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing actor parameter')
|
||||
}
|
||||
service := h.runner.services.get(request.params[0])!
|
||||
response.result = json.encode(service)
|
||||
}
|
||||
'list' {
|
||||
services := h.runner.services.list()!
|
||||
response.result = json.encode(services)
|
||||
}
|
||||
'delete' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing actor parameter')
|
||||
}
|
||||
h.runner.services.delete(request.params[0])!
|
||||
response.result = 'true'
|
||||
}
|
||||
'update_status' {
|
||||
if request.params.len < 2 {
|
||||
return error('Missing actor or status parameters')
|
||||
}
|
||||
status := match request.params[1] {
|
||||
'ok' { model.ServiceState.ok }
|
||||
'down' { model.ServiceState.down }
|
||||
'error' { model.ServiceState.error }
|
||||
'halted' { model.ServiceState.halted }
|
||||
else { return error('Invalid status: ${request.params[1]}') }
|
||||
}
|
||||
h.runner.services.update_status(request.params[0], status)!
|
||||
response.result = 'true'
|
||||
}
|
||||
'get_by_action' {
|
||||
if request.params.len < 1 {
|
||||
return error('Missing action parameter')
|
||||
}
|
||||
services := h.runner.services.get_by_action(request.params[0])!
|
||||
response.result = json.encode(services)
|
||||
}
|
||||
'check_access' {
|
||||
if request.params.len < 4 {
|
||||
return error('Missing parameters: requires actor, action, user_pubkey, and groups')
|
||||
}
|
||||
// Parse groups array from JSON string
|
||||
groups := json.decode([]string, request.params[3])!
|
||||
has_access := h.runner.services.check_access(request.params[0], // actor
|
||||
request.params[1], // action
|
||||
request.params[2], // user_pubkey
|
||||
groups)!
|
||||
response.result = json.encode(has_access)
|
||||
}
|
||||
else {
|
||||
return error('Unknown method: ${request.method}')
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
37
lib/core/jobs/openrpc/model.v
Normal file
37
lib/core/jobs/openrpc/model.v
Normal file
@@ -0,0 +1,37 @@
|
||||
module openrpc
|
||||
|
||||
// Generic OpenRPC request/response structures
|
||||
pub struct OpenRPCRequest {
|
||||
pub mut:
|
||||
jsonrpc string @[required]
|
||||
method string @[required]
|
||||
params []string
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
pub struct OpenRPCResponse {
|
||||
pub mut:
|
||||
jsonrpc string @[required]
|
||||
result string
|
||||
error string
|
||||
id int @[required]
|
||||
}
|
||||
|
||||
fn rpc_response_new(id int) OpenRPCResponse {
|
||||
mut response := OpenRPCResponse{
|
||||
jsonrpc: '2.0'
|
||||
id: id
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
fn rpc_response_error(id int, errormsg string) OpenRPCResponse {
|
||||
mut response := OpenRPCResponse{
|
||||
jsonrpc: '2.0'
|
||||
id: id
|
||||
error: errormsg
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
const rpc_queue = 'herorunner:q:rpc'
|
||||
302
lib/core/jobs/openrpc/specs/agent_manager_openrpc.json
Normal file
302
lib/core/jobs/openrpc/specs/agent_manager_openrpc.json
Normal file
@@ -0,0 +1,302 @@
|
||||
{
|
||||
"openrpc": "1.2.6",
|
||||
"info": {
|
||||
"title": "AgentManager Service",
|
||||
"version": "1.0.0",
|
||||
"description": "OpenRPC specification for the AgentManager module and its methods."
|
||||
},
|
||||
"methods": [
|
||||
{
|
||||
"name": "new",
|
||||
"summary": "Create a new Agent instance",
|
||||
"description": "Returns a new Agent with default or empty fields set. Caller can then fill in details.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "Agent",
|
||||
"description": "A freshly created Agent object.",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Agent"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "set",
|
||||
"summary": "Add or update an Agent in the system",
|
||||
"description": "Stores an Agent in Redis by pubkey. Overwrites any previous entry with the same pubkey.",
|
||||
"params": [
|
||||
{
|
||||
"name": "agent",
|
||||
"description": "The Agent instance to be added or updated.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Agent"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Indicates success. No data returned on success.",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get",
|
||||
"summary": "Retrieve an Agent by its public key",
|
||||
"description": "Looks up a single Agent using its pubkey.",
|
||||
"params": [
|
||||
{
|
||||
"name": "pubkey",
|
||||
"description": "The public key to look up.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "Agent",
|
||||
"description": "The Agent that was requested, if found.",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Agent"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list",
|
||||
"summary": "List all Agents",
|
||||
"description": "Returns an array of all known Agents.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "Agents",
|
||||
"description": "A list of all Agents in the system.",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Agent"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "delete",
|
||||
"summary": "Delete an Agent by its public key",
|
||||
"description": "Removes an Agent from the system by pubkey.",
|
||||
"params": [
|
||||
{
|
||||
"name": "pubkey",
|
||||
"description": "The public key of the Agent to be deleted.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Indicates success. No data returned on success.",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "update_status",
|
||||
"summary": "Update the status of an Agent",
|
||||
"description": "Updates only the status field of the specified Agent.",
|
||||
"params": [
|
||||
{
|
||||
"name": "pubkey",
|
||||
"description": "Public key of the Agent to update.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"description": "The new status to set for the Agent.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AgentState"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Indicates success. No data returned on success.",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_by_service",
|
||||
"summary": "Retrieve all Agents that provide a specific service action",
|
||||
"description": "Filters Agents by matching actor and action in any of their declared services.",
|
||||
"params": [
|
||||
{
|
||||
"name": "actor",
|
||||
"description": "The actor name to match.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "action",
|
||||
"description": "The action name to match.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "Agents",
|
||||
"description": "A list of Agents that match the specified service actor and action.",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Agent"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pubkey": {
|
||||
"type": "string",
|
||||
"description": "Public key (ed25519) of the Agent."
|
||||
},
|
||||
"address": {
|
||||
"type": "string",
|
||||
"description": "Network address or domain where the Agent can be reached."
|
||||
},
|
||||
"port": {
|
||||
"type": "integer",
|
||||
"description": "Network port for the Agent (default: 9999)."
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional human-readable description of the Agent."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/AgentStatus"
|
||||
},
|
||||
"services": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/AgentService"
|
||||
},
|
||||
"description": "List of public services provided by the Agent."
|
||||
},
|
||||
"signature": {
|
||||
"type": "string",
|
||||
"description": "Signature (by the Agent's private key) of address+port+description+status."
|
||||
}
|
||||
},
|
||||
"required": ["pubkey", "status", "services"]
|
||||
},
|
||||
"AgentStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"guid": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the job or session."
|
||||
},
|
||||
"timestamp_first": {
|
||||
"$ref": "#/components/schemas/OurTime",
|
||||
"description": "Timestamp when this Agent first came online."
|
||||
},
|
||||
"timestamp_last": {
|
||||
"$ref": "#/components/schemas/OurTime",
|
||||
"description": "Timestamp of the last heartbeat or update from the Agent."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/AgentState"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AgentService": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"actor": {
|
||||
"type": "string",
|
||||
"description": "The actor name providing the service."
|
||||
},
|
||||
"actions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/AgentServiceAction"
|
||||
},
|
||||
"description": "List of actions available for this service."
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional human-readable description for the service."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/AgentServiceState"
|
||||
}
|
||||
},
|
||||
"required": ["actor", "actions", "status"]
|
||||
},
|
||||
"AgentServiceAction": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "Action name."
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional description of this action."
|
||||
},
|
||||
"params": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Dictionary of parameter names to parameter descriptions."
|
||||
},
|
||||
"params_example": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Example values for the parameters."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/AgentServiceState"
|
||||
},
|
||||
"public": {
|
||||
"type": "boolean",
|
||||
"description": "Indicates if the action is publicly accessible to all or restricted."
|
||||
}
|
||||
},
|
||||
"required": ["action", "status", "public"]
|
||||
},
|
||||
"AgentState": {
|
||||
"type": "string",
|
||||
"enum": ["ok", "down", "error", "halted"],
|
||||
"description": "Possible states of an Agent."
|
||||
},
|
||||
"AgentServiceState": {
|
||||
"type": "string",
|
||||
"enum": ["ok", "down", "error", "halted"],
|
||||
"description": "Possible states of an Agent service or action."
|
||||
},
|
||||
"OurTime": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Represents a date/time or timestamp value."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
218
lib/core/jobs/openrpc/specs/group_manager_openrpc.json
Normal file
218
lib/core/jobs/openrpc/specs/group_manager_openrpc.json
Normal file
@@ -0,0 +1,218 @@
|
||||
{
|
||||
"openrpc": "1.2.6",
|
||||
"info": {
|
||||
"title": "Group Manager API",
|
||||
"version": "1.0.0",
|
||||
"description": "An OpenRPC specification for Group Manager methods"
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "Local",
|
||||
"url": "http://localhost:8080"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "GroupManager.new",
|
||||
"summary": "Create a new (in-memory) Group instance",
|
||||
"description": "Creates a new group object. Note that this does NOT store it in Redis. The caller must set the group’s GUID and then call `GroupManager.set` if they wish to persist it.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "group",
|
||||
"description": "The newly-created group instance",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Group"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.set",
|
||||
"summary": "Add or update a Group in Redis",
|
||||
"description": "Stores the specified group in Redis using the group’s GUID as the key.",
|
||||
"params": [
|
||||
{
|
||||
"name": "group",
|
||||
"description": "The group object to store",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Group"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "No return value",
|
||||
"schema": {
|
||||
"type": "null"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.get",
|
||||
"summary": "Retrieve a Group by GUID",
|
||||
"description": "Fetches the group from Redis using the provided GUID.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The group’s unique identifier",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "group",
|
||||
"description": "The requested group",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Group"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.list",
|
||||
"summary": "List all Groups",
|
||||
"description": "Returns an array containing all groups stored in Redis.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "groups",
|
||||
"description": "All currently stored groups",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/GroupList"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.delete",
|
||||
"summary": "Delete a Group by GUID",
|
||||
"description": "Removes the specified group from Redis by its GUID.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The group’s unique identifier",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "No return value",
|
||||
"schema": {
|
||||
"type": "null"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.add_member",
|
||||
"summary": "Add a member to a Group",
|
||||
"description": "Adds a user pubkey or another group’s GUID to the member list of the specified group. Does not add duplicates.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The target group’s unique identifier",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "member",
|
||||
"description": "Pubkey or group GUID to be added to the group",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "No return value",
|
||||
"schema": {
|
||||
"type": "null"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.remove_member",
|
||||
"summary": "Remove a member from a Group",
|
||||
"description": "Removes a user pubkey or another group’s GUID from the member list of the specified group.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The target group’s unique identifier",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "member",
|
||||
"description": "Pubkey or group GUID to be removed from the group",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "No return value",
|
||||
"schema": {
|
||||
"type": "null"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "GroupManager.get_user_groups",
|
||||
"summary": "List Groups that a user belongs to (directly or indirectly)",
|
||||
"description": "Checks each group (and nested groups) to see if the user pubkey is a member, returning all groups in which the user is included (including membership through nested groups).",
|
||||
"params": [
|
||||
{
|
||||
"name": "user_pubkey",
|
||||
"description": "The pubkey of the user to check",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "groups",
|
||||
"description": "A list of groups to which the user belongs",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/GroupList"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Group": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"guid": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the group"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of the group"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional description of the group"
|
||||
},
|
||||
"members": {
|
||||
"type": "array",
|
||||
"description": "List of user pubkeys or other group GUIDs",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["guid", "members"]
|
||||
},
|
||||
"GroupList": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Group"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
304
lib/core/jobs/openrpc/specs/job_manager_openrpc.json
Normal file
304
lib/core/jobs/openrpc/specs/job_manager_openrpc.json
Normal file
@@ -0,0 +1,304 @@
|
||||
{
|
||||
"openrpc": "1.2.6",
|
||||
"info": {
|
||||
"title": "JobManager OpenRPC Specification",
|
||||
"version": "1.0.0",
|
||||
"description": "OpenRPC specification for the JobManager module which handles job operations."
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "Local",
|
||||
"url": "http://localhost:8080/rpc"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "newJob",
|
||||
"summary": "Create a new Job instance",
|
||||
"description": "Creates a new Job with default/empty values. The GUID is left empty for the caller to fill.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "job",
|
||||
"description": "A newly created Job object, not yet persisted.",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "setJob",
|
||||
"summary": "Add or update a Job in the system (Redis)",
|
||||
"description": "Persists the given Job into the data store. If the GUID already exists, the existing job is overwritten.",
|
||||
"params": [
|
||||
{
|
||||
"name": "job",
|
||||
"description": "The Job object to store or update.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Indicates if the operation was successful.",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "getJob",
|
||||
"summary": "Retrieve a Job by its GUID",
|
||||
"description": "Fetches an existing Job from the data store using its unique GUID.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The GUID of the Job to retrieve.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "job",
|
||||
"description": "The retrieved Job object.",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "listJobs",
|
||||
"summary": "List all Jobs",
|
||||
"description": "Returns an array of all Jobs present in the data store.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "jobs",
|
||||
"description": "Array of all Job objects found.",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "deleteJob",
|
||||
"summary": "Remove a Job by its GUID",
|
||||
"description": "Deletes a specific Job from the data store by its GUID.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The GUID of the Job to delete.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Indicates if the job was successfully deleted.",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "updateJobStatus",
|
||||
"summary": "Update the status of a Job",
|
||||
"description": "Sets the status field of a Job in the data store.",
|
||||
"params": [
|
||||
{
|
||||
"name": "guid",
|
||||
"description": "The GUID of the Job to update.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"description": "The new status for the Job.",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Status"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "job",
|
||||
"description": "The updated Job object with new status applied.",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Job"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Job": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"guid": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the Job."
|
||||
},
|
||||
"agents": {
|
||||
"type": "array",
|
||||
"description": "Public keys of the agent(s) which will execute the command.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"type": "string",
|
||||
"description": "Pubkey of the agent who requested the job."
|
||||
},
|
||||
"circle": {
|
||||
"type": "string",
|
||||
"description": "Digital-life circle name this Job belongs to.",
|
||||
"default": "default"
|
||||
},
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": "High-level context for the Job inside a circle.",
|
||||
"default": "default"
|
||||
},
|
||||
"actor": {
|
||||
"type": "string",
|
||||
"description": "Actor name that will handle the Job (e.g. `vm_manager`)."
|
||||
},
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "Action to be taken by the actor (e.g. `start`)."
|
||||
},
|
||||
"params": {
|
||||
"type": "object",
|
||||
"description": "Key-value parameters for the action to be performed.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"timeout_schedule": {
|
||||
"type": "integer",
|
||||
"description": "Timeout (in seconds) before the job is picked up by an agent.",
|
||||
"default": 60
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Timeout (in seconds) for the job to complete.",
|
||||
"default": 3600
|
||||
},
|
||||
"log": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to log job details.",
|
||||
"default": true
|
||||
},
|
||||
"ignore_error": {
|
||||
"type": "boolean",
|
||||
"description": "If true, job errors do not cause an exception to be raised."
|
||||
},
|
||||
"ignore_error_codes": {
|
||||
"type": "array",
|
||||
"description": "Array of error codes to ignore.",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"debug": {
|
||||
"type": "boolean",
|
||||
"description": "If true, additional debug information is provided.",
|
||||
"default": false
|
||||
},
|
||||
"retry": {
|
||||
"type": "integer",
|
||||
"description": "Number of retries allowed on error.",
|
||||
"default": 0
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/JobStatus"
|
||||
},
|
||||
"dependencies": {
|
||||
"type": "array",
|
||||
"description": "List of job dependencies that must complete before this job executes.",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/JobDependency"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"guid",
|
||||
"status"
|
||||
]
|
||||
},
|
||||
"JobStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"guid": {
|
||||
"type": "string",
|
||||
"description": "Unique ID for the Job (mirrors the parent job GUID)."
|
||||
},
|
||||
"created": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When the job was created."
|
||||
},
|
||||
"start": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When the job was picked up to start."
|
||||
},
|
||||
"end": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When the job ended."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/Status"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"guid",
|
||||
"created",
|
||||
"status"
|
||||
]
|
||||
},
|
||||
"JobDependency": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"guid": {
|
||||
"type": "string",
|
||||
"description": "Unique ID of the Job this dependency points to."
|
||||
},
|
||||
"agents": {
|
||||
"type": "array",
|
||||
"description": "Possible agent(s) who can execute the dependency.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"guid"
|
||||
]
|
||||
},
|
||||
"Status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"created",
|
||||
"scheduled",
|
||||
"planned",
|
||||
"running",
|
||||
"error",
|
||||
"ok"
|
||||
],
|
||||
"description": "Enumerates the possible states of a Job."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
301
lib/core/jobs/openrpc/specs/service_manager_openrpc.json
Normal file
301
lib/core/jobs/openrpc/specs/service_manager_openrpc.json
Normal file
@@ -0,0 +1,301 @@
|
||||
{
|
||||
"openrpc": "1.2.6",
|
||||
"info": {
|
||||
"title": "ServiceManager API",
|
||||
"version": "1.0.0",
|
||||
"description": "OpenRPC 2.0 spec for managing services with ServiceManager."
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "Local",
|
||||
"url": "http://localhost:8080"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "ServiceManager_new",
|
||||
"summary": "Create a new Service instance (not saved to Redis yet).",
|
||||
"description": "Creates and returns a new empty Service object with default values. The `actor` field remains empty until the caller sets it.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "service",
|
||||
"$ref": "#/components/schemas/Service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_set",
|
||||
"summary": "Add or update a Service in Redis.",
|
||||
"description": "Stores the Service in Redis, identified by its `actor` property.",
|
||||
"params": [
|
||||
{
|
||||
"name": "service",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Service"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean",
|
||||
"description": "True if operation succeeds."
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_get",
|
||||
"summary": "Retrieve a Service by actor name.",
|
||||
"description": "Gets the Service object from Redis using the given actor name.",
|
||||
"params": [
|
||||
{
|
||||
"name": "actor",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "service",
|
||||
"$ref": "#/components/schemas/Service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_list",
|
||||
"summary": "List all Services.",
|
||||
"description": "Returns an array of all Services stored in Redis.",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "services",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Service"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_delete",
|
||||
"summary": "Delete a Service by actor name.",
|
||||
"description": "Removes the Service from Redis using the given actor name.",
|
||||
"params": [
|
||||
{
|
||||
"name": "actor",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_update_status",
|
||||
"summary": "Update the status of a given Service.",
|
||||
"description": "Updates only the `status` field of a Service specified by its actor name.",
|
||||
"params": [
|
||||
{
|
||||
"name": "actor",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/ServiceState"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_get_by_action",
|
||||
"summary": "Retrieve Services by action name.",
|
||||
"description": "Returns all Services that provide the specified action.",
|
||||
"params": [
|
||||
{
|
||||
"name": "action",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "services",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Service"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ServiceManager_check_access",
|
||||
"summary": "Check if a user has access to a Service action.",
|
||||
"description": "Verifies if a user (and any groups they belong to) has the right to invoke a specified action on a given Service.",
|
||||
"params": [
|
||||
{
|
||||
"name": "actor",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "action",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "user_pubkey",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "groups",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "hasAccess",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Service": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"actor": {
|
||||
"type": "string",
|
||||
"description": "The actor (unique name) providing the service."
|
||||
},
|
||||
"actions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ServiceAction"
|
||||
},
|
||||
"description": "A list of actions available in this service."
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional description of the service."
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/components/schemas/ServiceState",
|
||||
"description": "The current state of the service."
|
||||
},
|
||||
"acl": {
|
||||
"$ref": "#/components/schemas/ACL",
|
||||
"description": "An optional access control list for the entire service."
|
||||
}
|
||||
},
|
||||
"required": ["actor", "actions", "status"]
|
||||
},
|
||||
"ServiceAction": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "A unique identifier for the action."
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Optional description of this action."
|
||||
},
|
||||
"params": {
|
||||
"type": "object",
|
||||
"description": "Parameter definitions for this action.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"params_example": {
|
||||
"type": "object",
|
||||
"description": "Example parameters for this action.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"acl": {
|
||||
"$ref": "#/components/schemas/ACL",
|
||||
"description": "Optional ACL specifically for this action."
|
||||
}
|
||||
},
|
||||
"required": ["action"]
|
||||
},
|
||||
"ACL": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "A friendly name for the ACL."
|
||||
},
|
||||
"ace": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ACE"
|
||||
},
|
||||
"description": "A list of Access Control Entries."
|
||||
}
|
||||
},
|
||||
"required": ["ace"]
|
||||
},
|
||||
"ACE": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"groups": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of group IDs that have this permission."
|
||||
},
|
||||
"users": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of user public keys that have this permission."
|
||||
},
|
||||
"right": {
|
||||
"type": "string",
|
||||
"description": "Permission type (e.g. 'read', 'write', 'admin', 'block')."
|
||||
}
|
||||
},
|
||||
"required": ["right"]
|
||||
},
|
||||
"ServiceState": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"ok",
|
||||
"down",
|
||||
"error",
|
||||
"halted"
|
||||
],
|
||||
"description": "Possible states of a service."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
93
lib/core/jobs/openrpc/ws_server.v
Normal file
93
lib/core/jobs/openrpc/ws_server.v
Normal file
@@ -0,0 +1,93 @@
|
||||
module openrpc
|
||||
|
||||
import net.websocket
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
import json
|
||||
import rand
|
||||
|
||||
// WebSocket server that receives RPC requests
|
||||
pub struct WSServer {
|
||||
mut:
|
||||
redis &redisclient.Redis
|
||||
queue &redisclient.RedisQueue
|
||||
port int = 8080 // Default port, can be configured
|
||||
}
|
||||
|
||||
// Create new WebSocket server
|
||||
pub fn new_ws_server(port int) !&WSServer {
|
||||
mut redis := redisclient.core_get()!
|
||||
return &WSServer{
|
||||
redis: redis
|
||||
queue: &redisclient.RedisQueue{
|
||||
key: rpc_queue
|
||||
redis: redis
|
||||
}
|
||||
port: port
|
||||
}
|
||||
}
|
||||
|
||||
// Start the WebSocket server
|
||||
pub fn (mut s WSServer) start() ! {
|
||||
mut ws_server := websocket.new_server(.ip, s.port, '')
|
||||
|
||||
// Handle new WebSocket connections
|
||||
ws_server.on_connect(fn (mut ws websocket.ServerClient) !bool {
|
||||
println('New WebSocket client connected')
|
||||
return true
|
||||
})!
|
||||
|
||||
// Handle client disconnections
|
||||
ws_server.on_close(fn (mut ws websocket.Client, code int, reason string) ! {
|
||||
println('WebSocket client disconnected (code: ${code}, reason: ${reason})')
|
||||
})
|
||||
|
||||
// Handle incoming messages
|
||||
ws_server.on_message(fn [mut s] (mut ws websocket.Client, msg &websocket.Message) ! {
|
||||
if msg.opcode != .text_frame {
|
||||
println('WebSocket unknown msg opcode (code: ${msg.opcode})')
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
request := json.decode(OpenRPCRequest, msg.payload.bytestr()) or {
|
||||
error_msg := '{"jsonrpc":"2.0","error":"Invalid JSON-RPC request","id":null}'
|
||||
println(error_msg)
|
||||
ws.write(error_msg.bytes(), websocket.OPCode.text_frame) or { panic(err) }
|
||||
return
|
||||
}
|
||||
|
||||
// Generate unique request ID if not provided
|
||||
mut req_id := request.id
|
||||
if req_id == 0 {
|
||||
req_id = rand.i32_in_range(1, 10000000)!
|
||||
}
|
||||
|
||||
println('WebSocket put on queue: \'${rpc_queue}\' (msg: ${msg.payload.bytestr()})')
|
||||
// Send request to Redis queue
|
||||
s.queue.add(msg.payload.bytestr())!
|
||||
|
||||
returnkey := '${rpc_queue}:${req_id}'
|
||||
mut queue_return := &redisclient.RedisQueue{
|
||||
key: returnkey
|
||||
redis: s.redis
|
||||
}
|
||||
|
||||
// Wait for response
|
||||
response := queue_return.get(30)!
|
||||
if response.len < 2 {
|
||||
error_msg := '{"jsonrpc":"2.0","error":"Timeout waiting for response","id":${req_id}}'
|
||||
println('WebSocket error response (err: ${response})')
|
||||
ws.write(error_msg.bytes(), websocket.OPCode.text_frame) or { panic(err) }
|
||||
return
|
||||
}
|
||||
|
||||
println('WebSocket ok response (msg: ${response[1]})')
|
||||
// Send response back to WebSocket client
|
||||
response_str := response[1].str()
|
||||
ws.write(response_str.bytes(), websocket.OPCode.text_frame) or { panic(err) }
|
||||
})
|
||||
|
||||
// Start server
|
||||
println('WebSocket server listening on port ${s.port}')
|
||||
ws_server.listen() or { return error('Failed to start WebSocket server: ${err}') }
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
module log
|
||||
|
||||
import db.sqlite
|
||||
|
||||
pub struct DBBackend {
|
||||
pub:
|
||||
db sqlite.DB
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DBBackendConfig {
|
||||
pub:
|
||||
db sqlite.DB
|
||||
}
|
||||
|
||||
// factory for
|
||||
pub fn new_backend(config DBBackendConfig) !DBBackend {
|
||||
sql config.db {
|
||||
create table Log
|
||||
} or { panic(err) }
|
||||
|
||||
return DBBackend{
|
||||
db: config.db
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
module log
|
||||
|
||||
import time
|
||||
|
||||
@[params]
|
||||
pub struct ViewEvent {
|
||||
pub mut:
|
||||
page string
|
||||
duration time.Duration
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
module log
|
||||
|
||||
import db.sqlite
|
||||
|
||||
pub struct Logger {
|
||||
db_path string
|
||||
// DBBackend
|
||||
}
|
||||
|
||||
pub fn new(db_path string) !Logger {
|
||||
db := sqlite.connect(db_path)!
|
||||
sql db {
|
||||
create table Log
|
||||
} or { panic(err) }
|
||||
return Logger{
|
||||
db_path: db_path
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
module log
|
||||
|
||||
import db.sqlite
|
||||
|
||||
pub fn (logger Logger) new_log(log Log) ! {
|
||||
db := sqlite.connect(logger.db_path)!
|
||||
|
||||
sql db {
|
||||
insert log into Log
|
||||
}!
|
||||
}
|
||||
|
||||
pub struct LogFilter {
|
||||
Log
|
||||
matches_all bool
|
||||
limit int
|
||||
}
|
||||
|
||||
pub fn (logger Logger) filter_logs(filter LogFilter) ![]Log {
|
||||
db := sqlite.connect(logger.db_path)!
|
||||
mut select_stmt := 'select * from Log'
|
||||
|
||||
mut matchers := []string{}
|
||||
if filter.event != '' {
|
||||
matchers << "event == '${filter.event}'"
|
||||
}
|
||||
|
||||
if filter.subject != '' {
|
||||
matchers << "subject == '${filter.subject}'"
|
||||
}
|
||||
|
||||
if filter.object != '' {
|
||||
matchers << "object == '${filter.object}'"
|
||||
}
|
||||
|
||||
if matchers.len > 0 {
|
||||
matchers_str := if filter.matches_all {
|
||||
matchers.join(' AND ')
|
||||
} else {
|
||||
matchers.join(' OR ')
|
||||
}
|
||||
select_stmt += ' where ${matchers_str}'
|
||||
}
|
||||
|
||||
responses := db.exec(select_stmt)!
|
||||
|
||||
mut logs := []Log{}
|
||||
for response in responses {
|
||||
logs << sql db {
|
||||
select from Log where id == response.vals[0].int()
|
||||
}!
|
||||
}
|
||||
|
||||
return logs
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
module log
|
||||
|
||||
import time
|
||||
|
||||
pub struct Log {
|
||||
id int @[primary; sql: serial]
|
||||
pub:
|
||||
timestamp time.Time
|
||||
pub mut:
|
||||
event string
|
||||
subject string
|
||||
object string
|
||||
message string // a custom message that can be attached to a log
|
||||
}
|
||||
|
||||
// pub struct Event {
|
||||
// name string
|
||||
// description string
|
||||
// }
|
||||
|
||||
// // log_request logs http requests
|
||||
// pub fn create_log(log Log) Log {
|
||||
// return Log{
|
||||
// ...log
|
||||
// timestamp: time.now()
|
||||
// })
|
||||
// }
|
||||
|
||||
// // log_request logs http requests
|
||||
// pub fn (mut a Analyzer) get_logs(subject string) []Log {
|
||||
// return []Log{}
|
||||
// }
|
||||
@@ -76,7 +76,7 @@ pub fn (mut path Path) expand(dest string) !Path {
|
||||
|
||||
if path.name().to_lower().ends_with('.tar.gz') || path.name().to_lower().ends_with('.tgz') {
|
||||
cmd := 'tar -xzvf ${path.path} -C ${desto.path}'
|
||||
console.print_debug(cmd)
|
||||
// console.print_debug(cmd)
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code > 0 {
|
||||
return error('Could not expand.\n${res}')
|
||||
@@ -136,7 +136,7 @@ pub fn find_common_ancestor(paths_ []string) string {
|
||||
}
|
||||
}
|
||||
paths := paths_.map(os.abs_path(os.real_path(it))) // get the real path (symlinks... resolved)
|
||||
console.print_debug(paths.str())
|
||||
// console.print_debug(paths.str())
|
||||
parts := paths[0].split('/')
|
||||
mut totest_prev := '/'
|
||||
for i in 1 .. parts.len {
|
||||
@@ -223,7 +223,7 @@ pub fn (mut path Path) move(args MoveArgs) ! {
|
||||
// that last dir needs to move 1 up
|
||||
pub fn (mut path Path) moveup_single_subdir() ! {
|
||||
mut plist := path.list(recursive: false, ignoredefault: true, dirs_only: true)!
|
||||
console.print_debug(plist.str())
|
||||
// console.print_debug(plist.str())
|
||||
if plist.paths.len != 1 {
|
||||
return error('could not find one subdir in ${path.path} , so cannot move up')
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ pub fn (mut plbook PlayBook) add(args_ PlayBookNewArgs) ! {
|
||||
pull: args.git_pull
|
||||
reset: args.git_reset
|
||||
)!
|
||||
args.path = repo.get_path()!
|
||||
args.path = repo.path()
|
||||
}
|
||||
|
||||
// walk over directory
|
||||
|
||||
@@ -20,16 +20,15 @@ pub fn play_core(mut plbook playbook.PlayBook) ! {
|
||||
|
||||
if p.exists('coderoot') {
|
||||
panic('implement')
|
||||
mut coderoot := p.get_path_create('coderoot')!
|
||||
|
||||
mut gs := gittools.get()!
|
||||
// mut coderoot := p.get_path_create('coderoot')!
|
||||
// mut gs := gittools.get()!
|
||||
}
|
||||
action.done = true
|
||||
}
|
||||
|
||||
for mut action in plbook.find(filter: 'session.')! {
|
||||
mut p := action.params
|
||||
mut session := plbook.session
|
||||
// mut p := action.params
|
||||
// mut session := plbook.session
|
||||
|
||||
//!!session.env_set key:'JWT_SHARED_KEY' val:'...'
|
||||
|
||||
|
||||
@@ -28,11 +28,11 @@ pub fn play_doctree(mut plbook playbook.PlayBook) ! {
|
||||
|
||||
for mut action in plbook.find(filter: 'doctree:add')! {
|
||||
mut p := action.params
|
||||
url := p.get_default('url', '')!
|
||||
path := p.get_default('path', '')!
|
||||
// url := p.get_default('url', '')!
|
||||
// path := p.get_default('path', '')!
|
||||
name := p.get('name')!
|
||||
|
||||
mut tree := trees[name] or { return error('tree ${name} not found') }
|
||||
_ := trees[name] or { return error('tree ${name} not found') }
|
||||
|
||||
// tree.scan(
|
||||
// path: path
|
||||
@@ -63,7 +63,7 @@ pub fn play_doctree(mut plbook playbook.PlayBook) ! {
|
||||
for mut action in plbook.find(filter: 'doctree:export')! {
|
||||
panic('implement')
|
||||
mut p := action.params
|
||||
name := p.get('name')!
|
||||
_ := p.get('name')!
|
||||
action.done = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,12 +5,13 @@ import freeflowuniverse.herolib.core.playbook
|
||||
import os
|
||||
|
||||
pub fn play_luadns(mut plbook playbook.PlayBook) ! {
|
||||
mut buildroot := '${os.home_dir()}/hero/var/mdbuild'
|
||||
mut publishroot := '${os.home_dir()}/hero/www/info'
|
||||
mut coderoot := ''
|
||||
// Variables below are not used, commenting them out
|
||||
// mut buildroot := '${os.home_dir()}/hero/var/mdbuild'
|
||||
// mut publishroot := '${os.home_dir()}/hero/www/info'
|
||||
// mut coderoot := ''
|
||||
// mut install := false
|
||||
mut reset := false
|
||||
mut pull := false
|
||||
// mut reset := false
|
||||
// mut pull := false
|
||||
|
||||
for mut action in plbook.find(filter: 'luadns.set_domain')! {
|
||||
mut p := action.params
|
||||
|
||||
@@ -5,7 +5,7 @@ fn setup() !&redisclient.Redis {
|
||||
mut redis := redisclient.core_get()!
|
||||
// Select db 10 to be away from default one '0'
|
||||
redis.selectdb(10) or { panic(err) }
|
||||
return &redis
|
||||
return redis
|
||||
}
|
||||
|
||||
fn cleanup(mut redis redisclient.Redis) ! {
|
||||
@@ -25,7 +25,8 @@ fn test_rpc() {
|
||||
mut r := redis.rpc_get('testrpc')
|
||||
|
||||
r.call(cmd: 'test.cmd', data: 'this is my data, normally json', wait: false)!
|
||||
returnqueue := r.process(10000, process_test)!
|
||||
|
||||
returnqueue := r.process(process_test, timeout: 10000)!
|
||||
mut res := r.result(10000, returnqueue)!
|
||||
console.print_debug(res)
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import json
|
||||
|
||||
@[params]
|
||||
pub struct OpenSSLGenerateArgs {
|
||||
pub:
|
||||
name string = 'default'
|
||||
domain string = 'myregistry.domain.com'
|
||||
reset bool
|
||||
@@ -22,6 +23,7 @@ pub fn (mut ossl OpenSSL) generate(args OpenSSLGenerateArgs) !OpenSSLKey {
|
||||
'
|
||||
|
||||
mut b := builder.new()!
|
||||
println('b: ${b}')
|
||||
mut node := b.node_local()!
|
||||
|
||||
node.exec(cmd: cmd)!
|
||||
|
||||
139
lib/data/cache/README.md
vendored
Normal file
139
lib/data/cache/README.md
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
# HeroLib Cache System
|
||||
|
||||
A high-performance, generic in-memory caching system for V with support for TTL, size limits, and LRU eviction.
|
||||
|
||||
## Features
|
||||
|
||||
- Generic type support (can cache any type)
|
||||
- Configurable maximum entries and memory size limits
|
||||
- Time-To-Live (TTL) support
|
||||
- Least Recently Used (LRU) eviction policy
|
||||
- Memory-aware caching with size-based eviction
|
||||
- Thread-safe operations
|
||||
- Optional persistence support (configurable)
|
||||
|
||||
## Configuration
|
||||
|
||||
The cache system is highly configurable through the `CacheConfig` struct:
|
||||
|
||||
```v
|
||||
pub struct CacheConfig {
|
||||
pub mut:
|
||||
max_entries u32 = 1000 // Maximum number of entries
|
||||
max_size_mb f64 = 100.0 // Maximum cache size in MB
|
||||
ttl_seconds i64 = 3600 // Time-to-live in seconds (0 = no TTL)
|
||||
eviction_ratio f64 = 0.05 // Percentage of entries to evict when full (5%)
|
||||
persist bool // Whether to persist cache to disk
|
||||
}
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
Here's a simple example of using the cache:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.data.cache
|
||||
|
||||
// Define your struct type
|
||||
@[heap]
|
||||
struct User {
|
||||
id u32
|
||||
name string
|
||||
age int
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Create a cache with default configuration
|
||||
mut user_cache := cache.new_cache[User]()
|
||||
|
||||
// Create a user
|
||||
user := &User{
|
||||
id: 1
|
||||
name: 'Alice'
|
||||
age: 30
|
||||
}
|
||||
|
||||
// Add to cache
|
||||
user_cache.set(user.id, user)
|
||||
|
||||
// Retrieve from cache
|
||||
if cached_user := user_cache.get(1) {
|
||||
println('Found user: ${cached_user.name}')
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```v
|
||||
mut user_cache := cache.new_cache[User](
|
||||
max_entries: 1000 // Maximum number of entries
|
||||
max_size_mb: 10.0 // Maximum cache size in MB
|
||||
ttl_seconds: 300 // Items expire after 5 minutes
|
||||
eviction_ratio: 0.2 // Evict 20% of entries when full
|
||||
)
|
||||
```
|
||||
|
||||
### Memory Management
|
||||
|
||||
The cache automatically manages memory using two mechanisms:
|
||||
|
||||
1. **Entry Count Limit**: When `max_entries` is reached, least recently used items are evicted.
|
||||
2. **Memory Size Limit**: When `max_size_mb` is reached, items are evicted based on the `eviction_ratio`.
|
||||
|
||||
```v
|
||||
// Create a cache with strict memory limits
|
||||
config := cache.CacheConfig{
|
||||
max_entries: 100 // Only keep 100 entries maximum
|
||||
max_size_mb: 1.0 // Limit cache to 1MB
|
||||
eviction_ratio: 0.1 // Remove 10% of entries when full
|
||||
}
|
||||
```
|
||||
|
||||
### Cache Operations
|
||||
|
||||
```v
|
||||
mut cache := cache.new_cache[User](cache.CacheConfig{})
|
||||
|
||||
// Add/update items
|
||||
cache.set(1, user1)
|
||||
cache.set(2, user2)
|
||||
|
||||
// Get items
|
||||
if user := cache.get(1) {
|
||||
// Use cached user
|
||||
}
|
||||
|
||||
// Check cache size
|
||||
println('Cache entries: ${cache.len()}')
|
||||
|
||||
// Clear the cache
|
||||
cache.clear()
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Choose Appropriate TTL**: Set TTL based on how frequently your data changes and how critical freshness is.
|
||||
|
||||
2. **Memory Management**:
|
||||
- Set reasonable `max_entries` and `max_size_mb` limits based on your application's memory constraints
|
||||
- Monitor cache size using `len()`
|
||||
- Use appropriate `eviction_ratio` (typically 0.05-0.2) to balance performance and memory usage
|
||||
|
||||
3. **Type Safety**:
|
||||
- Always use `@[heap]` attribute for structs stored in cache
|
||||
- Ensure cached types are properly memory managed
|
||||
|
||||
4. **Error Handling**:
|
||||
- Always use option types when retrieving items (`if value := cache.get(key) {`)
|
||||
- Handle cache misses gracefully
|
||||
|
||||
5. **Performance**:
|
||||
- Consider the trade-off between cache size and hit rate
|
||||
- Monitor and adjust TTL and eviction settings based on usage patterns
|
||||
|
||||
## Thread Safety
|
||||
|
||||
The cache implementation is thread-safe for concurrent access. However, when using the cache in a multi-threaded environment, ensure proper synchronization when accessing cached objects.
|
||||
167
lib/data/cache/cache.v
vendored
Normal file
167
lib/data/cache/cache.v
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
module cache
|
||||
|
||||
import time
|
||||
import math
|
||||
|
||||
// CacheConfig holds cache configuration parameters
|
||||
pub struct CacheConfig {
|
||||
pub mut:
|
||||
max_entries u32 = 1000 // Maximum number of entries
|
||||
max_size_mb f64 = 100.0 // Maximum cache size in MB
|
||||
ttl_seconds i64 = 3600 // Time-to-live in seconds (0 = no TTL)
|
||||
eviction_ratio f64 = 0.05 // Percentage of entries to evict when full (5%)
|
||||
}
|
||||
|
||||
// CacheEntry represents a cached object with its metadata
|
||||
@[heap]
|
||||
struct CacheEntry[T] {
|
||||
mut:
|
||||
obj T // Reference to the cached object
|
||||
last_access i64 // Unix timestamp of last access
|
||||
created_at i64 // Unix timestamp of creation
|
||||
size u32 // Approximate size in bytes
|
||||
}
|
||||
|
||||
// Cache manages the in-memory caching of objects
|
||||
pub struct Cache[T] {
|
||||
mut:
|
||||
entries map[u32]&CacheEntry[T] // Map of object ID to cache entry
|
||||
config CacheConfig // Cache configuration
|
||||
access_log []u32 // Ordered list of object IDs by access time
|
||||
total_size u64 // Total size of cached entries in bytes
|
||||
}
|
||||
|
||||
// new_cache creates a new cache instance with the given configuration
|
||||
pub fn new_cache[T](config CacheConfig) &Cache[T] {
|
||||
return &Cache[T]{
|
||||
entries: map[u32]&CacheEntry[T]{}
|
||||
config: config
|
||||
access_log: []u32{cap: int(config.max_entries)}
|
||||
total_size: 0
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves an object from the cache if it exists
|
||||
pub fn (mut c Cache[T]) get(id u32) ?&T {
|
||||
if entry := c.entries[id] {
|
||||
now := time.now().unix()
|
||||
|
||||
// Check TTL
|
||||
if c.config.ttl_seconds > 0 {
|
||||
if (now - entry.created_at) > c.config.ttl_seconds {
|
||||
c.remove(id)
|
||||
return none
|
||||
}
|
||||
}
|
||||
|
||||
// Update access time
|
||||
unsafe {
|
||||
entry.last_access = now
|
||||
}
|
||||
// Move ID to end of access log
|
||||
idx := c.access_log.index(id)
|
||||
if idx >= 0 {
|
||||
c.access_log.delete(idx)
|
||||
}
|
||||
c.access_log << id
|
||||
|
||||
return &entry.obj
|
||||
}
|
||||
return none
|
||||
}
|
||||
|
||||
// set adds or updates an object in the cache
|
||||
pub fn (mut c Cache[T]) set(id u32, obj &T) {
|
||||
now := time.now().unix()
|
||||
|
||||
// Calculate entry size (approximate)
|
||||
entry_size := sizeof(T) + sizeof(CacheEntry[T])
|
||||
|
||||
// Check memory and entry count limits
|
||||
new_total := c.total_size + u64(entry_size)
|
||||
max_bytes := u64(c.config.max_size_mb * 1024 * 1024)
|
||||
|
||||
// Always evict if we're at or above max_entries
|
||||
if c.entries.len >= int(c.config.max_entries) {
|
||||
c.evict()
|
||||
} else if new_total > max_bytes {
|
||||
// Otherwise evict only if we're over memory limit
|
||||
c.evict()
|
||||
}
|
||||
|
||||
// Create new entry
|
||||
entry := &CacheEntry[T]{
|
||||
obj: *obj
|
||||
last_access: now
|
||||
created_at: now
|
||||
size: u32(entry_size)
|
||||
}
|
||||
|
||||
// Update total size
|
||||
if old := c.entries[id] {
|
||||
c.total_size -= u64(old.size)
|
||||
}
|
||||
c.total_size += u64(entry_size)
|
||||
|
||||
// Add to entries map
|
||||
c.entries[id] = entry
|
||||
|
||||
// Update access log
|
||||
idx := c.access_log.index(id)
|
||||
if idx >= 0 {
|
||||
c.access_log.delete(idx)
|
||||
}
|
||||
c.access_log << id
|
||||
|
||||
// Ensure access_log stays in sync with entries
|
||||
if c.access_log.len > c.entries.len {
|
||||
c.access_log = c.access_log[c.access_log.len - c.entries.len..]
|
||||
}
|
||||
}
|
||||
|
||||
// evict removes entries based on configured eviction ratio
|
||||
fn (mut c Cache[T]) evict() {
|
||||
// If we're at max entries, remove enough to get to 80% capacity
|
||||
target_size := int(c.config.max_entries) * 8 / 10 // 80%
|
||||
num_to_evict := if c.entries.len >= int(c.config.max_entries) {
|
||||
c.entries.len - target_size
|
||||
} else {
|
||||
math.max(1, int(c.entries.len * c.config.eviction_ratio))
|
||||
}
|
||||
|
||||
if num_to_evict > 0 {
|
||||
// Remove oldest entries
|
||||
mut evicted_size := u64(0)
|
||||
for i := 0; i < num_to_evict && i < c.access_log.len; i++ {
|
||||
id := c.access_log[i]
|
||||
if entry := c.entries[id] {
|
||||
evicted_size += u64(entry.size)
|
||||
c.entries.delete(id)
|
||||
}
|
||||
}
|
||||
|
||||
// Update total size and access log
|
||||
c.total_size -= evicted_size
|
||||
c.access_log = c.access_log[num_to_evict..]
|
||||
}
|
||||
}
|
||||
|
||||
// remove deletes a single entry from the cache
|
||||
pub fn (mut c Cache[T]) remove(id u32) {
|
||||
if entry := c.entries[id] {
|
||||
c.total_size -= u64(entry.size)
|
||||
}
|
||||
c.entries.delete(id)
|
||||
}
|
||||
|
||||
// clear empties the cache
|
||||
pub fn (mut c Cache[T]) clear() {
|
||||
c.entries.clear()
|
||||
c.access_log.clear()
|
||||
c.total_size = 0
|
||||
}
|
||||
|
||||
// len returns the number of entries in the cache
|
||||
pub fn (c &Cache[T]) len() int {
|
||||
return c.entries.len
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user