Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 299f6dea06 | |||
| ed025f9acb | |||
| c4ea066927 | |||
| 5f9c6ff2bb | |||
| 8965f7ae89 | |||
| 9a931b65e2 | |||
| 2c149507f6 | |||
|
|
34dea39c52 | ||
|
|
f1a4547961 | ||
|
|
8ae56a8df6 | ||
| b731c4c388 | |||
| e929ce029d | |||
| 5160096a1a | |||
| f219a4041a | |||
| 674eae1c11 | |||
| f62369bd01 | |||
| 7a6660ebd8 | |||
| e20d1bdcc5 | |||
| 3e309b6379 | |||
| ae4e92e090 | |||
| 7b69719f0e | |||
| 1d631fec21 |
187
aiprompts/reflection.md
Normal file
187
aiprompts/reflection.md
Normal file
@@ -0,0 +1,187 @@
|
||||
## Compile time reflection
|
||||
|
||||
$ is used as a prefix for compile time (also referred to as 'comptime') operations.
|
||||
|
||||
Having built-in JSON support is nice, but V also allows you to create efficient serializers for any data format. V has compile time if and for constructs:
|
||||
|
||||
.fields
|
||||
You can iterate over struct fields using .fields, it also works with generic types (e.g. T.fields) and generic arguments (e.g. param.fields where fn gen[T](param T) {).
|
||||
|
||||
struct User {
|
||||
name string
|
||||
age int
|
||||
}
|
||||
|
||||
fn main() {
|
||||
$for field in User.fields {
|
||||
$if field.typ is string {
|
||||
println('${field.name} is of type string')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// name is of type string
|
||||
.values
|
||||
You can read Enum values and their attributes.
|
||||
|
||||
enum Color {
|
||||
red @[RED] // first attribute
|
||||
blue @[BLUE] // second attribute
|
||||
}
|
||||
|
||||
fn main() {
|
||||
$for e in Color.values {
|
||||
println(e.name)
|
||||
println(e.attrs)
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// red
|
||||
// ['RED']
|
||||
// blue
|
||||
// ['BLUE']
|
||||
.attributes
|
||||
You can read Struct attributes.
|
||||
|
||||
@[COLOR]
|
||||
struct Foo {
|
||||
a int
|
||||
}
|
||||
|
||||
fn main() {
|
||||
$for e in Foo.attributes {
|
||||
println(e)
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// StructAttribute{
|
||||
// name: 'COLOR'
|
||||
// has_arg: false
|
||||
// arg: ''
|
||||
// kind: plain
|
||||
// }
|
||||
.variants
|
||||
You can read variant types from Sum type.
|
||||
|
||||
type MySum = int | string
|
||||
|
||||
fn main() {
|
||||
$for v in MySum.variants {
|
||||
$if v.typ is int {
|
||||
println('has int type')
|
||||
} $else $if v.typ is string {
|
||||
println('has string type')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// has int type
|
||||
// has string type
|
||||
.methods
|
||||
You can retrieve information about struct methods.
|
||||
|
||||
struct Foo {
|
||||
}
|
||||
|
||||
fn (f Foo) test() int {
|
||||
return 123
|
||||
}
|
||||
|
||||
fn (f Foo) test2() string {
|
||||
return 'foo'
|
||||
}
|
||||
|
||||
fn main() {
|
||||
foo := Foo{}
|
||||
$for m in Foo.methods {
|
||||
$if m.return_type is int {
|
||||
print('${m.name} returns int: ')
|
||||
println(foo.$method())
|
||||
} $else $if m.return_type is string {
|
||||
print('${m.name} returns string: ')
|
||||
println(foo.$method())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// test returns int: 123
|
||||
// test2 returns string: foo
|
||||
.params
|
||||
You can retrieve information about struct method params.
|
||||
|
||||
struct Test {
|
||||
}
|
||||
|
||||
fn (t Test) foo(arg1 int, arg2 string) {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
$for m in Test.methods {
|
||||
$for param in m.params {
|
||||
println('${typeof(param.typ).name}: ${param.name}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// int: arg1
|
||||
// string: arg2
|
||||
|
||||
## Example
|
||||
|
||||
```v
|
||||
// An example deserializer implementation
|
||||
|
||||
struct User {
|
||||
name string
|
||||
age int
|
||||
}
|
||||
|
||||
fn main() {
|
||||
data := 'name=Alice\nage=18'
|
||||
user := decode[User](data)
|
||||
println(user)
|
||||
}
|
||||
|
||||
fn decode[T](data string) T {
|
||||
mut result := T{}
|
||||
// compile-time `for` loop
|
||||
// T.fields gives an array of a field metadata type
|
||||
$for field in T.fields {
|
||||
$if field.typ is string {
|
||||
// $(string_expr) produces an identifier
|
||||
result.$(field.name) = get_string(data, field.name)
|
||||
} $else $if field.typ is int {
|
||||
result.$(field.name) = get_int(data, field.name)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
fn get_string(data string, field_name string) string {
|
||||
for line in data.split_into_lines() {
|
||||
key_val := line.split('=')
|
||||
if key_val[0] == field_name {
|
||||
return key_val[1]
|
||||
}
|
||||
}
|
||||
return ''
|
||||
}
|
||||
|
||||
fn get_int(data string, field string) int {
|
||||
return get_string(data, field).int()
|
||||
}
|
||||
|
||||
// `decode<User>` generates:
|
||||
// fn decode_User(data string) User {
|
||||
// mut result := User{}
|
||||
// result.name = get_string(data, 'name')
|
||||
// result.age = get_int(data, 'age')
|
||||
// return result
|
||||
// }
|
||||
```
|
||||
2
cli/.gitignore
vendored
2
cli/.gitignore
vendored
@@ -1 +1,3 @@
|
||||
hero
|
||||
compile
|
||||
compile_upload
|
||||
|
||||
22
cli/hero.v
22
cli/hero.v
@@ -19,6 +19,26 @@ fn playcmds_do(path string) ! {
|
||||
}
|
||||
|
||||
fn do() ! {
|
||||
|
||||
if ! core.is_osx()! {
|
||||
if os.getenv('SUDO_COMMAND') != '' || os.getenv('SUDO_USER') != '' {
|
||||
println('Error: Please do not run this program with sudo!')
|
||||
exit(1) // Exit with error code
|
||||
}
|
||||
}
|
||||
|
||||
if os.getuid() == 0 {
|
||||
if core.is_osx()! {
|
||||
eprintln("please do not run hero as root in osx.")
|
||||
exit(1)
|
||||
}
|
||||
} else {
|
||||
if ! core.is_osx()! {
|
||||
eprintln("please do run hero as root, don't use sudo.")
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if os.args.len == 2 {
|
||||
mypath := os.args[1]
|
||||
if mypath.to_lower().ends_with('.hero') {
|
||||
@@ -31,7 +51,7 @@ fn do() ! {
|
||||
mut cmd := Command{
|
||||
name: 'hero'
|
||||
description: 'Your HERO toolset.'
|
||||
version: '1.0.7'
|
||||
version: '1.0.13'
|
||||
}
|
||||
|
||||
// herocmds.cmd_run_add_flags(mut cmd)
|
||||
|
||||
108
examples/clients/mycelium.vsh
Executable file
108
examples/clients/mycelium.vsh
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
import freeflowuniverse.herolib.installers.net.mycelium as mycelium_installer
|
||||
import freeflowuniverse.herolib.osal
|
||||
import time
|
||||
import os
|
||||
import encoding.base64
|
||||
|
||||
const server1_port = 9001
|
||||
const server2_port = 9002
|
||||
|
||||
fn terminate(port int) ! {
|
||||
// Step 1: Run lsof to get process details
|
||||
res := os.execute('lsof -i:${port}')
|
||||
if res.exit_code != 0 {
|
||||
return error('no service running at port ${port} due to: ${res.output}')
|
||||
}
|
||||
|
||||
// Step 2: Parse the output to extract the PID
|
||||
lines := res.output.split('\n')
|
||||
if lines.len < 2 {
|
||||
return error('no process found running on port ${port}')
|
||||
}
|
||||
|
||||
// The PID is the second column in the output
|
||||
fields := lines[1].split(' ')
|
||||
if fields.len < 2 {
|
||||
return error('failed to parse lsof output')
|
||||
}
|
||||
pid := fields[1]
|
||||
|
||||
// Step 3: Kill the process using the PID
|
||||
kill_res := os.execute('kill ${pid}')
|
||||
if kill_res.exit_code != 0 {
|
||||
return error('failed to kill process ${pid}: ${kill_res.output}')
|
||||
}
|
||||
|
||||
println('Successfully terminated process ${pid} running on port ${port}')
|
||||
}
|
||||
|
||||
// Check if not installed install it.
|
||||
mut installer := mycelium_installer.get()!
|
||||
installer.install()!
|
||||
|
||||
mycelium.delete()!
|
||||
|
||||
spawn fn () {
|
||||
os.execute('mkdir -p /tmp/mycelium_server1 && cd /tmp/mycelium_server1 && mycelium --peers tcp://188.40.132.242:9651 quic://[2a01:4f8:212:fa6::2]:9651 tcp://185.69.166.7:9651 quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651 tcp://65.21.231.58:9651 quic://[2a01:4f9:5a:1042::2]:9651 tcp://[2604:a00:50:17b:9e6b:ff:fe1f:e054]:9651 quic://5.78.122.16:9651 tcp://[2a01:4ff:2f0:3621::1]:9651 quic://142.93.217.194:9651 --tun-name tun2 --tcp-listen-port 9652 --quic-listen-port 9653 --api-addr 127.0.0.1:${server1_port}')
|
||||
}()
|
||||
|
||||
spawn fn () {
|
||||
os.execute('mkdir -p /tmp/mycelium_server2 && cd /tmp/mycelium_server2 && mycelium --peers tcp://188.40.132.242:9651 quic://[2a01:4f8:212:fa6::2]:9651 tcp://185.69.166.7:9651 quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651 tcp://65.21.231.58:9651 quic://[2a01:4f9:5a:1042::2]:9651 tcp://[2604:a00:50:17b:9e6b:ff:fe1f:e054]:9651 quic://5.78.122.16:9651 tcp://[2a01:4ff:2f0:3621::1]:9651 quic://142.93.217.194:9651 --tun-name tun3 --tcp-listen-port 9654 --quic-listen-port 9655 --api-addr 127.0.0.1:${server2_port}')
|
||||
}()
|
||||
|
||||
defer {
|
||||
terminate(server1_port) or {}
|
||||
terminate(server2_port) or {}
|
||||
}
|
||||
|
||||
time.sleep(2 * time.second)
|
||||
|
||||
mut client1 := mycelium.get()!
|
||||
client1.server_url = 'http://localhost:${server1_port}'
|
||||
client1.name = 'client1'
|
||||
println(client1)
|
||||
|
||||
mut client2 := mycelium.get()!
|
||||
client2.server_url = 'http://localhost:${server2_port}'
|
||||
client2.name = 'client2'
|
||||
println(client2)
|
||||
|
||||
inspect1 := mycelium.inspect(key_file_path: '/tmp/mycelium_server1/priv_key.bin')!
|
||||
inspect2 := mycelium.inspect(key_file_path: '/tmp/mycelium_server2/priv_key.bin')!
|
||||
|
||||
println('Server 1 public key: ${inspect1.public_key}')
|
||||
println('Server 2 public key: ${inspect2.public_key}')
|
||||
|
||||
// Send a message to a node by public key
|
||||
// Parameters: public_key, payload, topic, wait_for_reply
|
||||
msg := client1.send_msg(
|
||||
public_key: inspect2.public_key // destination public key
|
||||
payload: 'Sending a message from the client 1 to the client 2' // message payload
|
||||
topic: 'testing' // optional topic
|
||||
)!
|
||||
|
||||
println('Sent message ID: ${msg.id}')
|
||||
println('send succeeded')
|
||||
|
||||
// Receive messages
|
||||
// Parameters: wait_for_message, peek_only, topic_filter
|
||||
received := client2.receive_msg(wait: true, peek: false, topic: 'testing')!
|
||||
println('Received message from: ${received.src_pk}')
|
||||
println('Message payload: ${base64.decode_str(received.payload)}')
|
||||
|
||||
// Reply to a message
|
||||
// client1.reply_msg(
|
||||
// id: received.id
|
||||
// public_key: received.src_pk
|
||||
// payload: 'Got your message!'
|
||||
// topic: 'greetings'
|
||||
// )!
|
||||
|
||||
// // // Check message status
|
||||
// // status := client.get_msg_status(msg.id)!
|
||||
// // println('Message status: ${status.state}')
|
||||
// // println('Created at: ${status.created}')
|
||||
// // println('Expires at: ${status.deadline}')
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal
|
||||
import time
|
||||
|
||||
mut gs_default := gittools.new()!
|
||||
|
||||
println(gs_default)
|
||||
|
||||
// // Initializes the Git structure with the coderoot path.
|
||||
// coderoot := '/tmp/code'
|
||||
// mut gs_tmo := gittools.new(coderoot: coderoot)!
|
||||
|
||||
// // Retrieve the specified repository.
|
||||
// mut repo := gs_default.get_repo(name: 'herolib')!
|
||||
|
||||
// println(repo)
|
||||
14
examples/develop/gittools/gittools_path_get.vsh
Executable file
14
examples/develop/gittools/gittools_path_get.vsh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal
|
||||
import time
|
||||
|
||||
mut gs := gittools.new()!
|
||||
mydocs_path := gs.get_path(
|
||||
pull: true
|
||||
reset: false
|
||||
url: 'https://git.ourworld.tf/tfgrid/info_docs_depin/src/branch/main/docs'
|
||||
)!
|
||||
|
||||
println(mydocs_path)
|
||||
@@ -1,5 +1,8 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer
|
||||
import freeflowuniverse.herolib.osal
|
||||
|
||||
coredns_installer.install()!
|
||||
// coredns_installer.delete()!
|
||||
mut installer := coredns_installer.get()!
|
||||
installer.build()!
|
||||
|
||||
@@ -1,5 +1,40 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.installers.net.mycelium as mycelium_installer
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
mycelium_installer.start()!
|
||||
mut installer := mycelium_installer.get()!
|
||||
installer.start()!
|
||||
|
||||
mut r := mycelium.inspect()!
|
||||
println(r)
|
||||
|
||||
mut client := mycelium.get()!
|
||||
|
||||
// Send a message to a node by public key
|
||||
// Parameters: public_key, payload, topic, wait_for_reply
|
||||
msg := client.send_msg('abc123...', // destination public key
|
||||
'Hello World', // message payload
|
||||
'greetings', // optional topic
|
||||
true // wait for reply
|
||||
)!
|
||||
println('Sent message ID: ${msg.id}')
|
||||
|
||||
// Receive messages
|
||||
// Parameters: wait_for_message, peek_only, topic_filter
|
||||
received := client.receive_msg(true, false, 'greetings')!
|
||||
println('Received message from: ${received.src_pk}')
|
||||
println('Message payload: ${received.payload}')
|
||||
|
||||
// Reply to a message
|
||||
client.reply_msg(received.id, // original message ID
|
||||
received.src_pk, // sender's public key
|
||||
'Got your message!', // reply payload
|
||||
'greetings' // topic
|
||||
)!
|
||||
|
||||
// Check message status
|
||||
status := client.get_msg_status(msg.id)!
|
||||
println('Message status: ${status.state}')
|
||||
println('Created at: ${status.created}')
|
||||
println('Expires at: ${status.deadline}')
|
||||
|
||||
12
examples/installers/traefik.vsh
Executable file
12
examples/installers/traefik.vsh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.installers.web.traefik as traefik_installer
|
||||
|
||||
traefik_installer.delete()!
|
||||
mut installer := traefik_installer.get()!
|
||||
|
||||
installer.password = 'planet'
|
||||
traefik_installer.set(installer)!
|
||||
|
||||
installer.start()!
|
||||
6
examples/installers/zinit_installer.vsh
Executable file
6
examples/installers/zinit_installer.vsh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.installers.sysadmintools.zinit as zinit_installer
|
||||
|
||||
mut installer := zinit_installer.get()!
|
||||
installer.start()!
|
||||
24
examples/osal/tun.vsh
Executable file
24
examples/osal/tun.vsh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.osal.tun
|
||||
|
||||
// Check if TUN is available
|
||||
if available := tun.available() {
|
||||
if available {
|
||||
println('TUN is available on this system')
|
||||
|
||||
// Get a free TUN interface name
|
||||
if interface_name := tun.free() {
|
||||
println('Found free TUN interface: ${interface_name}')
|
||||
|
||||
// Example: Now you could use this interface name
|
||||
// to set up your tunnel
|
||||
} else {
|
||||
println('Error finding free interface: ${err}')
|
||||
}
|
||||
} else {
|
||||
println('TUN is not available on this system')
|
||||
}
|
||||
} else {
|
||||
println('Error checking TUN availability: ${err}')
|
||||
}
|
||||
3
examples/virt/podman_buildah/.gitignore
vendored
3
examples/virt/podman_buildah/.gitignore
vendored
@@ -1 +1,4 @@
|
||||
buildah_example
|
||||
buildah_run_clean
|
||||
buildah_run_mdbook
|
||||
buildah_run
|
||||
|
||||
@@ -16,13 +16,13 @@ podman_installer0.install()!
|
||||
|
||||
mut engine := herocontainers.new(install: true, herocompile: false)!
|
||||
|
||||
engine.reset_all()!
|
||||
// engine.reset_all()!
|
||||
|
||||
mut builder_gorust := engine.builder_go_rust()!
|
||||
// mut builder_gorust := engine.builder_go_rust()!
|
||||
|
||||
// will build nodejs, python build & herolib, hero
|
||||
// mut builder_hero := engine.builder_hero(reset:true)!
|
||||
|
||||
// mut builder_web := engine.builder_heroweb(reset:true)!
|
||||
|
||||
builder_gorust.shell()!
|
||||
// builder_gorust.shell()!
|
||||
|
||||
@@ -7,14 +7,22 @@ import freeflowuniverse.herolib.core.base
|
||||
import time
|
||||
import os
|
||||
|
||||
mut pm := herocontainers.new(herocompile: true, install: false)!
|
||||
// herocompile means we do it for the host system
|
||||
mut pm := herocontainers.new(herocompile: false, install: false)!
|
||||
|
||||
mut mybuildcontainer := pm.builder_get('builder_heroweb')!
|
||||
// pm.builder_base(reset:true)!
|
||||
|
||||
mut builder := pm.builder_get('base')!
|
||||
builder.shell()!
|
||||
|
||||
println(builder)
|
||||
|
||||
// builder.install_zinit()!
|
||||
|
||||
// bash & python can be executed directly in build container
|
||||
|
||||
// any of the herocommands can be executed like this
|
||||
mybuildcontainer.run(cmd: 'installers -n heroweb', runtime: .herocmd)!
|
||||
// mybuildcontainer.run(cmd: 'installers -n heroweb', runtime: .herocmd)!
|
||||
|
||||
// //following will execute heroscript in the buildcontainer
|
||||
// mybuildcontainer.run(
|
||||
|
||||
@@ -5,16 +5,15 @@ import freeflowuniverse.herolib.web.docusaurus
|
||||
|
||||
// Create a new docusaurus factory
|
||||
mut docs := docusaurus.new(
|
||||
// build_path: '/tmp/docusaurus_build'
|
||||
build_path: '/tmp/docusaurus_build'
|
||||
)!
|
||||
|
||||
// Create a new docusaurus site
|
||||
mut site := docs.dev(
|
||||
url:'https://git.ourworld.tf/despiegk/docs_kristof'
|
||||
url: 'https://git.ourworld.tf/despiegk/docs_kristof'
|
||||
)!
|
||||
|
||||
|
||||
//FOR FUTURE TO ADD CONTENT FROM DOCTREE
|
||||
// FOR FUTURE TO ADD CONTENT FROM DOCTREE
|
||||
|
||||
// Create a doctree for content
|
||||
// mut tree := doctree.new(name: 'content')!
|
||||
@@ -34,10 +33,10 @@ mut site := docs.dev(
|
||||
// )!
|
||||
|
||||
// Build the docusaurus site
|
||||
//site.build()!
|
||||
// site.build()!
|
||||
|
||||
// Generate the static site
|
||||
//site.generate()!
|
||||
// site.generate()!
|
||||
|
||||
// Optionally open the site in a browser
|
||||
// site.open()!
|
||||
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
os_name="$(uname -s)"
|
||||
arch_name="$(uname -m)"
|
||||
version='1.0.7'
|
||||
version='1.0.13'
|
||||
|
||||
|
||||
# Base URL for GitHub releases
|
||||
|
||||
153
lib/clients/livekit/access_token.v
Normal file
153
lib/clients/livekit/access_token.v
Normal file
@@ -0,0 +1,153 @@
|
||||
module livekit
|
||||
|
||||
// import time
|
||||
// import rand
|
||||
// import crypto.hmac
|
||||
// import crypto.sha256
|
||||
// import encoding.base64
|
||||
// import json
|
||||
|
||||
// // Define AccessTokenOptions struct
|
||||
// pub struct AccessTokenOptions {
|
||||
// pub mut:
|
||||
// ttl int | string // TTL in seconds or a time span (e.g., '2d', '5h')
|
||||
// name string // Display name for the participant
|
||||
// identity string // Identity of the user
|
||||
// metadata string // Custom metadata to be passed to participants
|
||||
// }
|
||||
|
||||
// // Struct representing grants
|
||||
// pub struct ClaimGrants {
|
||||
// pub mut:
|
||||
// video VideoGrant
|
||||
// iss string
|
||||
// exp i64
|
||||
// nbf int
|
||||
// sub string
|
||||
// name string
|
||||
// }
|
||||
|
||||
// // VideoGrant struct placeholder
|
||||
// pub struct VideoGrant {
|
||||
// pub mut:
|
||||
// room string
|
||||
// room_join bool @[json: 'roomJoin']
|
||||
// can_publish bool @[json: 'canPublish']
|
||||
// can_publish_data bool @[json: 'canPublishData']
|
||||
// can_subscribe bool @[json: 'canSubscribe']
|
||||
// }
|
||||
|
||||
// // SIPGrant struct placeholder
|
||||
// struct SIPGrant {}
|
||||
|
||||
// // AccessToken class
|
||||
// pub struct AccessToken {
|
||||
// mut:
|
||||
// api_key string
|
||||
// api_secret string
|
||||
// grants ClaimGrants
|
||||
// identity string
|
||||
// ttl int | string
|
||||
// }
|
||||
|
||||
// // Constructor for AccessToken
|
||||
// pub fn new_access_token(api_key string, api_secret string, options AccessTokenOptions) !AccessToken {
|
||||
// if api_key == '' || api_secret == '' {
|
||||
// return error('API key and API secret must be set')
|
||||
// }
|
||||
|
||||
// ttl := if options.ttl is int { options.ttl } else { 21600 } // Default TTL of 6 hours (21600 seconds)
|
||||
|
||||
// return AccessToken{
|
||||
// api_key: api_key
|
||||
// api_secret: api_secret
|
||||
// identity: options.identity
|
||||
// ttl: ttl
|
||||
// grants: ClaimGrants{
|
||||
// exp: time.now().unix()+ttl
|
||||
// iss: api_key
|
||||
// sub: options.name
|
||||
// name: options.name
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Method to add a video grant to the token
|
||||
// pub fn (mut token AccessToken) add_video_grant(grant VideoGrant) {
|
||||
// token.grants.video = grant
|
||||
// }
|
||||
|
||||
|
||||
// // Method to generate a JWT token
|
||||
// pub fn (token AccessToken) to_jwt() !string {
|
||||
// // Create JWT payload
|
||||
// payload := json.encode(token.grants)
|
||||
|
||||
// println('payload: ${payload}')
|
||||
|
||||
// // Create JWT header
|
||||
// header := '{"alg":"HS256","typ":"JWT"}'
|
||||
|
||||
// // Encode header and payload in base64
|
||||
// header_encoded := base64.url_encode_str(header)
|
||||
// payload_encoded := base64.url_encode_str(payload)
|
||||
|
||||
// // Create the unsigned token
|
||||
// unsigned_token := '${header_encoded}.${payload_encoded}'
|
||||
|
||||
// // Create the HMAC-SHA256 signature
|
||||
// signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
|
||||
|
||||
// // Encode the signature in base64
|
||||
// signature_encoded := base64.url_encode(signature)
|
||||
|
||||
// // Create the final JWT
|
||||
// jwt := '${unsigned_token}.${signature_encoded}'
|
||||
// return jwt
|
||||
// }
|
||||
|
||||
// // TokenVerifier class
|
||||
// pub struct TokenVerifier {
|
||||
// api_key string
|
||||
// api_secret string
|
||||
// }
|
||||
|
||||
// // Constructor for TokenVerifier
|
||||
// pub fn new_token_verifier(api_key string, api_secret string) !TokenVerifier {
|
||||
// if api_key == '' || api_secret == '' {
|
||||
// return error('API key and API secret must be set')
|
||||
// }
|
||||
// return TokenVerifier{
|
||||
// api_key: api_key
|
||||
// api_secret: api_secret
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Method to verify the JWT token
|
||||
// pub fn (verifier TokenVerifier) verify(token string) !ClaimGrants {
|
||||
// // Split the token into parts
|
||||
// parts := token.split('.')
|
||||
// if parts.len != 3 {
|
||||
// return error('Invalid token')
|
||||
// }
|
||||
|
||||
// // Decode header, payload, and signature
|
||||
// payload_encoded := parts[1]
|
||||
// signature_encoded := parts[2]
|
||||
|
||||
// // Recompute the HMAC-SHA256 signature
|
||||
// unsigned_token := '${parts[0]}.${parts[1]}'
|
||||
// expected_signature := hmac.new(verifier.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
|
||||
// expected_signature_encoded := base64.url_encode(expected_signature)
|
||||
|
||||
// // Verify the signature
|
||||
// if signature_encoded != expected_signature_encoded {
|
||||
// return error('Invalid token signature')
|
||||
// }
|
||||
|
||||
// // Decode the payload
|
||||
// payload_json := base64.url_decode_str(payload_encoded)
|
||||
|
||||
// // Parse and return the claims as ClaimGrants
|
||||
// return json.decode(ClaimGrants, payload_json)
|
||||
// }
|
||||
199
lib/clients/livekit/server_client.v
Normal file
199
lib/clients/livekit/server_client.v
Normal file
@@ -0,0 +1,199 @@
|
||||
module livekit
|
||||
|
||||
import net.http
|
||||
import json
|
||||
|
||||
// // pub struct Client {
|
||||
// // pub:
|
||||
// // host string
|
||||
// // token string
|
||||
// // }
|
||||
|
||||
// // pub struct Room {
|
||||
// // pub mut:
|
||||
// // sid string
|
||||
// // name string
|
||||
// // empty_timeout string
|
||||
// // max_participants string
|
||||
// // creation_time string
|
||||
// // turn_password string
|
||||
// // metadata string
|
||||
// // num_participants u32
|
||||
// // active_recording bool
|
||||
// // }
|
||||
|
||||
// pub struct ParticipantInfo {
|
||||
// pub mut:
|
||||
// sid string
|
||||
// identity string
|
||||
// name string
|
||||
// state string
|
||||
// tracks []TrackInfo
|
||||
// metadata string
|
||||
// joined_at i64
|
||||
// permission ParticipantPermission
|
||||
// is_publisher bool
|
||||
// }
|
||||
|
||||
// pub struct TrackInfo {
|
||||
// pub mut:
|
||||
// sid string
|
||||
// typ string @[json: 'type']
|
||||
// source string
|
||||
// name string
|
||||
// mime_type string
|
||||
// muted bool
|
||||
// width u32
|
||||
// height u32
|
||||
// simulcast bool
|
||||
// disable_dtx bool
|
||||
// layers []VideoLayer
|
||||
// }
|
||||
|
||||
// pub struct ParticipantPermission {
|
||||
// pub mut:
|
||||
// can_subscribe bool
|
||||
// can_publish bool
|
||||
// can_publish_data bool
|
||||
// }
|
||||
|
||||
// pub struct VideoLayer {
|
||||
// pub mut:
|
||||
// quality string
|
||||
// width u32
|
||||
// height u32
|
||||
// }
|
||||
|
||||
// // Helper method to make POST requests to LiveKit API
|
||||
// fn (client Client) make_post_request(url string, body string) !http.Response {
|
||||
// mut headers := http.new_header()
|
||||
// headers.add_custom('Authorization', 'Bearer ${client.token}')!
|
||||
// headers.add_custom('Content-Type', 'application/json')!
|
||||
|
||||
// req := http.Request{
|
||||
// method: http.Method.post
|
||||
// url: url
|
||||
// data: body
|
||||
// header: headers
|
||||
// }
|
||||
// return req.do()!
|
||||
// }
|
||||
|
||||
// pub struct CreateRoomArgs {
|
||||
// pub:
|
||||
// name string
|
||||
// empty_timeout u32
|
||||
// max_participants u32
|
||||
// metadata string
|
||||
// }
|
||||
|
||||
// // RoomService API methods
|
||||
// pub fn (client Client) create_room(args CreateRoomArgs) !Room {
|
||||
// body := json.encode(args)
|
||||
// url := '${client.host}/twirp/livekit.RoomService/CreateRoom'
|
||||
// response := client.make_post_request(url, body)!
|
||||
|
||||
// return json.decode(Room, response.body)!
|
||||
// }
|
||||
|
||||
// // pub fn (client Client) list_rooms(names []string) ![]Room {
|
||||
// // body := json.encode({
|
||||
// // 'names': names
|
||||
// // })
|
||||
// // url := '${client.host}/twirp/livekit.RoomService/ListRooms'
|
||||
// // response := client.make_post_request(url, body)!
|
||||
|
||||
// // return json.decode([]Room, response.body)!
|
||||
// // }
|
||||
|
||||
// pub fn (client Client) delete_room(room_name string) ! {
|
||||
// body := json.encode({
|
||||
// 'room': room_name
|
||||
// })
|
||||
// url := '${client.host}/twirp/livekit.RoomService/DeleteRoom'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
|
||||
// pub fn (client Client) list_participants(room_name string) ![]ParticipantInfo {
|
||||
// body := json.encode({
|
||||
// 'room': room_name
|
||||
// })
|
||||
// url := '${client.host}/twirp/livekit.RoomService/ListParticipants'
|
||||
// response := client.make_post_request(url, body)!
|
||||
|
||||
// return json.decode([]ParticipantInfo, response.body)!
|
||||
// }
|
||||
|
||||
// pub fn (client Client) get_participant(room_name string, identity string) !ParticipantInfo {
|
||||
// body := json.encode({
|
||||
// 'room': room_name
|
||||
// 'identity': identity
|
||||
// })
|
||||
// url := '${client.host}/twirp/livekit.RoomService/GetParticipant'
|
||||
// response := client.make_post_request(url, body)!
|
||||
|
||||
// return json.decode(ParticipantInfo, response.body)!
|
||||
// }
|
||||
|
||||
// pub fn (client Client) remove_participant(room_name string, identity string) ! {
|
||||
// body := json.encode({
|
||||
// 'room': room_name
|
||||
// 'identity': identity
|
||||
// })
|
||||
// url := '${client.host}/twirp/livekit.RoomService/RemoveParticipant'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
|
||||
// pub struct MutePublishedTrackArgs {
|
||||
// pub:
|
||||
// room_name string
|
||||
// identity string
|
||||
// track_sid string
|
||||
// muted bool
|
||||
// }
|
||||
|
||||
// pub fn (client Client) mute_published_track(args MutePublishedTrackArgs) ! {
|
||||
// body := json.encode(args)
|
||||
// url := '${client.host}/twirp/livekit.RoomService/MutePublishedTrack'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
|
||||
// pub struct UpdateParticipantArgs {
|
||||
// pub:
|
||||
// room_name string @[json: 'room']
|
||||
// identity string
|
||||
// metadata string
|
||||
// permission ParticipantPermission
|
||||
// }
|
||||
|
||||
// pub fn (client Client) update_participant(args UpdateParticipantArgs) ! {
|
||||
// body := json.encode(args)
|
||||
// url := '${client.host}/twirp/livekit.RoomService/UpdateParticipant'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
|
||||
// pub struct UpdateRoomMetadataArgs {
|
||||
// pub:
|
||||
// room_name string @[json: 'room']
|
||||
// metadata string
|
||||
// }
|
||||
|
||||
// pub fn (client Client) update_room_metadata(args UpdateRoomMetadataArgs) ! {
|
||||
// body := json.encode(args)
|
||||
// url := '${client.host}/twirp/livekit.RoomService/UpdateRoomMetadata'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
|
||||
// pub struct SendDataArgs {
|
||||
// pub:
|
||||
// room_name string @[json: 'room']
|
||||
// data []u8
|
||||
// kind string
|
||||
// destination_identities []string
|
||||
// }
|
||||
|
||||
// pub fn (client Client) send_data(args SendDataArgs) ! {
|
||||
// body := json.encode(args)
|
||||
// url := '${client.host}/twirp/livekit.RoomService/SendData'
|
||||
// _ := client.make_post_request(url, body)!
|
||||
// }
|
||||
@@ -1,39 +1,65 @@
|
||||
module mycelium
|
||||
|
||||
import json
|
||||
import encoding.base64
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
|
||||
// Represents a destination for a message, can be either IP or public key
|
||||
pub struct MessageDestination {
|
||||
pub:
|
||||
pk string
|
||||
ip string @[omitempty] // IP in the subnet of the receiver node
|
||||
pk string @[omitempty] // hex encoded public key of the receiver node
|
||||
}
|
||||
|
||||
// Body of a message to be sent
|
||||
pub struct PushMessageBody {
|
||||
pub:
|
||||
dst MessageDestination
|
||||
payload string
|
||||
topic ?string // optional message topic
|
||||
payload string // base64 encoded message
|
||||
}
|
||||
|
||||
// Response containing message ID after pushing
|
||||
pub struct PushMessageResponseId {
|
||||
pub:
|
||||
id string // hex encoded message ID
|
||||
}
|
||||
|
||||
// A message received by the system
|
||||
pub struct InboundMessage {
|
||||
pub:
|
||||
id string
|
||||
src_ip string @[json: 'srcIP']
|
||||
src_pk string @[json: 'srcPk']
|
||||
dst_ip string @[json: 'dstIp']
|
||||
dst_pk string @[json: 'dstPk']
|
||||
payload string
|
||||
src_ip string @[json: 'srcIp'] // Sender overlay IP address
|
||||
src_pk string @[json: 'srcPk'] // Sender public key, hex encoded
|
||||
dst_ip string @[json: 'dstIp'] // Receiver overlay IP address
|
||||
dst_pk string @[json: 'dstPk'] // Receiver public key, hex encoded
|
||||
topic string // Optional message topic
|
||||
payload string // Message payload, base64 encoded
|
||||
}
|
||||
|
||||
// Information about an outbound message
|
||||
pub struct MessageStatusResponse {
|
||||
pub:
|
||||
id string
|
||||
dst string
|
||||
state string
|
||||
created string
|
||||
deadline string
|
||||
msg_len string @[json: 'msgLen']
|
||||
dst string // IP address of receiving node
|
||||
state string // pending, received, read, aborted or sending object
|
||||
created i64 // Unix timestamp of creation
|
||||
deadline i64 // Unix timestamp of expiry
|
||||
msg_len int @[json: 'msgLen'] // Length in bytes
|
||||
}
|
||||
|
||||
// General information about a node
|
||||
pub struct Info {
|
||||
pub:
|
||||
node_subnet string @[json: 'nodeSubnet'] // subnet owned by node
|
||||
}
|
||||
|
||||
// Response containing public key for a node IP
|
||||
pub struct PublicKeyResponse {
|
||||
pub:
|
||||
node_pub_key string @[json: 'NodePubKey'] // hex encoded public key
|
||||
}
|
||||
|
||||
// Get connection to mycelium server
|
||||
pub fn (mut self Mycelium) connection() !&httpconnection.HTTPConnection {
|
||||
mut c := self.conn or {
|
||||
mut c2 := httpconnection.new(
|
||||
@@ -47,30 +73,63 @@ pub fn (mut self Mycelium) connection() !&httpconnection.HTTPConnection {
|
||||
return c
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) send_msg(pk string, payload string, wait bool) !InboundMessage {
|
||||
@[params]
|
||||
pub struct SendMessageArgs {
|
||||
pub mut:
|
||||
public_key string @[required]
|
||||
payload string @[required]
|
||||
topic ?string
|
||||
wait bool
|
||||
}
|
||||
|
||||
// Send a message to a node identified by public key
|
||||
pub fn (mut self Mycelium) send_msg(args SendMessageArgs) !InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut params := {
|
||||
'dst': json.encode(MessageDestination{ pk: pk })
|
||||
'payload': payload
|
||||
mut body := PushMessageBody{
|
||||
dst: MessageDestination{
|
||||
pk: args.public_key
|
||||
ip: ''
|
||||
}
|
||||
payload: base64.encode_str(args.payload)
|
||||
topic: if v := args.topic {
|
||||
base64.encode_str(v)
|
||||
} else {
|
||||
none
|
||||
}
|
||||
}
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?reply_timeout=120'
|
||||
mut prefix := '/api/v1/messages'
|
||||
if args.wait {
|
||||
prefix += '?reply_timeout=120'
|
||||
}
|
||||
return conn.post_json_generic[InboundMessage](
|
||||
method: .post
|
||||
prefix: prefix
|
||||
params: params
|
||||
data: json.encode(body)
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) receive_msg(wait bool) !InboundMessage {
|
||||
@[params]
|
||||
pub struct ReceiveMessageArgs {
|
||||
pub mut:
|
||||
topic ?string
|
||||
wait bool
|
||||
peek bool
|
||||
}
|
||||
|
||||
// Receive a message from the queue
|
||||
pub fn (mut self Mycelium) receive_msg(args ReceiveMessageArgs) !InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?timeout=60'
|
||||
mut prefix := '/api/v1/messages?peek=${args.peek}&'
|
||||
|
||||
if args.wait {
|
||||
prefix += 'timeout=120&'
|
||||
}
|
||||
|
||||
if v := args.topic {
|
||||
prefix += 'topic=${base64.encode_str(v)}'
|
||||
}
|
||||
|
||||
return conn.get_json_generic[InboundMessage](
|
||||
method: .get
|
||||
prefix: prefix
|
||||
@@ -78,17 +137,9 @@ pub fn (mut self Mycelium) receive_msg(wait bool) !InboundMessage {
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) receive_msg_opt(wait bool) ?InboundMessage {
|
||||
mut conn := self.connection()!
|
||||
mut prefix := ''
|
||||
if wait {
|
||||
prefix = '?timeout=60'
|
||||
}
|
||||
res := conn.get_json_generic[InboundMessage](
|
||||
method: .get
|
||||
prefix: prefix
|
||||
dataformat: .json
|
||||
) or {
|
||||
// Optional version of receive_msg that returns none on 204
|
||||
pub fn (mut self Mycelium) receive_msg_opt(args ReceiveMessageArgs) ?InboundMessage {
|
||||
res := self.receive_msg(args) or {
|
||||
if err.msg().contains('204') {
|
||||
return none
|
||||
}
|
||||
@@ -97,25 +148,62 @@ pub fn (mut self Mycelium) receive_msg_opt(wait bool) ?InboundMessage {
|
||||
return res
|
||||
}
|
||||
|
||||
// Get status of a message by ID
|
||||
pub fn (mut self Mycelium) get_msg_status(id string) !MessageStatusResponse {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[MessageStatusResponse](
|
||||
method: .get
|
||||
prefix: 'status/${id}'
|
||||
prefix: '/api/v1/messages/status/${id}'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut self Mycelium) reply_msg(id string, pk string, payload string) ! {
|
||||
@[params]
|
||||
pub struct ReplyMessageArgs {
|
||||
pub mut:
|
||||
id string @[required]
|
||||
public_key string @[required]
|
||||
payload string @[required]
|
||||
topic ?string
|
||||
}
|
||||
|
||||
// Reply to a message
|
||||
pub fn (mut self Mycelium) reply_msg(args ReplyMessageArgs) ! {
|
||||
mut conn := self.connection()!
|
||||
mut params := {
|
||||
'dst': json.encode(MessageDestination{ pk: pk })
|
||||
'payload': payload
|
||||
mut body := PushMessageBody{
|
||||
dst: MessageDestination{
|
||||
pk: args.public_key
|
||||
ip: ''
|
||||
}
|
||||
payload: base64.encode_str(args.payload)
|
||||
topic: if v := args.topic { base64.encode_str(v) } else { none }
|
||||
}
|
||||
conn.post_json_generic[json.Any](
|
||||
_ := conn.post_json_str(
|
||||
method: .post
|
||||
prefix: 'reply/${id}'
|
||||
params: params
|
||||
prefix: '/api/v1/messages/reply/${args.id}'
|
||||
data: json.encode(body)
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
// curl -v -H 'Content-Type: application/json' -d '{"dst": {"pk": "be4bf135d60b7e43a46be1ad68f955cdc1209a3c55dc30d00c4463b1dace4377"}, "payload": "xuV+"}' http://localhost:8989/api/v1/messages\
|
||||
|
||||
// Get node info
|
||||
pub fn (mut self Mycelium) get_info() !Info {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[Info](
|
||||
method: .get
|
||||
prefix: '/api/v1/admin'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
// Get public key for a node IP
|
||||
pub fn (mut self Mycelium) get_pubkey_from_ip(ip string) !PublicKeyResponse {
|
||||
mut conn := self.connection()!
|
||||
return conn.get_json_generic[PublicKeyResponse](
|
||||
method: .get
|
||||
prefix: '/api/v1/pubkey/${ip}'
|
||||
dataformat: .json
|
||||
)!
|
||||
}
|
||||
|
||||
71
lib/clients/mycelium/mycelium_check.v
Normal file
71
lib/clients/mycelium/mycelium_check.v
Normal file
@@ -0,0 +1,71 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
|
||||
pub fn check() bool {
|
||||
// if core.is_osx()! {
|
||||
// mut scr := screen.new(reset: false) or {return False}
|
||||
// name := 'mycelium'
|
||||
// if !scr.exists(name) {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
|
||||
// if !(osal.process_exists_byname('mycelium') or {return False}) {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// TODO: might be dangerous if that one goes out
|
||||
ping_result := osal.ping(address: '40a:152c:b85b:9646:5b71:d03a:eb27:2462', retry: 2) or {
|
||||
return false
|
||||
}
|
||||
if ping_result == .ok {
|
||||
console.print_debug('could reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return true
|
||||
}
|
||||
console.print_stderr('could not reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return false
|
||||
}
|
||||
|
||||
pub struct MyceliumInspectResult {
|
||||
pub:
|
||||
public_key string @[json: publicKey]
|
||||
address string
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct MyceliumInspectArgs {
|
||||
pub:
|
||||
key_file_path string = '/root/hero/cfg/priv_key.bin'
|
||||
}
|
||||
|
||||
pub fn inspect(args MyceliumInspectArgs) !MyceliumInspectResult {
|
||||
command := 'mycelium inspect --key-file ${args.key_file_path} --json'
|
||||
result := os.execute(command)
|
||||
|
||||
if result.exit_code != 0 {
|
||||
return error('Command failed: ${result.output}')
|
||||
}
|
||||
|
||||
inspect_result := json.decode(MyceliumInspectResult, result.output) or {
|
||||
return error('Failed to parse JSON: ${err}')
|
||||
}
|
||||
|
||||
return inspect_result
|
||||
}
|
||||
|
||||
// if returns empty then probably mycelium is not installed
|
||||
pub fn ipaddr() string {
|
||||
r := inspect() or { MyceliumInspectResult{} }
|
||||
return r.address
|
||||
}
|
||||
@@ -2,8 +2,6 @@ module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
|
||||
__global (
|
||||
mycelium_global map[string]&Mycelium
|
||||
@@ -12,35 +10,71 @@ __global (
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
// set the model in mem and the config on the filesystem
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&Mycelium {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := Mycelium{}
|
||||
if args.name !in mycelium_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('mycelium', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return mycelium_global[args.name] or {
|
||||
println(mycelium_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for mycelium with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o Mycelium) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('mycelium', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('mycelium', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('mycelium', args.name)!
|
||||
if args.name in mycelium_global {
|
||||
// del mycelium_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o Mycelium) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
mycelium_global[o.name] = &o2
|
||||
mycelium_default = o.name
|
||||
}
|
||||
|
||||
// check we find the config on the filesystem
|
||||
pub fn exists(args_ ArgsGet) bool {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context() or { panic('bug') }
|
||||
return context.hero_config_exists('mycelium', model.name)
|
||||
}
|
||||
|
||||
// load the config error if it doesn't exist
|
||||
pub fn load(args_ ArgsGet) ! {
|
||||
mut model := args_get(args_)
|
||||
mut context := base.context()!
|
||||
mut heroscript := context.hero_config_get('mycelium', model.name)!
|
||||
play(heroscript: heroscript)!
|
||||
}
|
||||
|
||||
// save the config to the filesystem in the context
|
||||
pub fn save(o Mycelium) ! {
|
||||
mut context := base.context()!
|
||||
heroscript := encoderhero.encode[Mycelium](o)!
|
||||
context.hero_config_set('mycelium', model.name, heroscript)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
@@ -50,21 +84,28 @@ pub mut:
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut model := args_
|
||||
mut args := args_
|
||||
|
||||
if model.heroscript == '' {
|
||||
model.heroscript = heroscript_default()!
|
||||
}
|
||||
mut plbook := model.plbook or { playbook.new(text: model.heroscript)! }
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut configure_actions := plbook.find(filter: 'mycelium.configure')!
|
||||
if configure_actions.len > 0 {
|
||||
for config_action in configure_actions {
|
||||
mut p := config_action.params
|
||||
mycfg := cfg_play(p)!
|
||||
console.print_debug('install action mycelium.configure\n${mycfg}')
|
||||
set(mycfg)!
|
||||
save(mycfg)!
|
||||
mut install_actions := plbook.find(filter: 'mycelium.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for mycelium
|
||||
pub fn switch(name string) {
|
||||
mycelium_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -1,39 +1,33 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
|
||||
pub const version = '0.0.0'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
pub fn heroscript_default() !string {
|
||||
heroscript := "
|
||||
!!mycelium.configure
|
||||
name:'mycelium'
|
||||
"
|
||||
return heroscript
|
||||
}
|
||||
|
||||
@[heap]
|
||||
pub struct Mycelium {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
server_url string
|
||||
conn ?&httpconnection.HTTPConnection
|
||||
server_url string = 'http://localhost:8989'
|
||||
conn ?&httpconnection.HTTPConnection @[skip; str: skip]
|
||||
}
|
||||
|
||||
fn cfg_play(p paramsparser.Params) ! {
|
||||
mut mycfg := Mycelium{
|
||||
name: p.get_default('name', 'default')!
|
||||
server_url: p.get_default('server_url', 'http://localhost:8989/api/v1/messages')!
|
||||
}
|
||||
set(mycfg)!
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ Mycelium) !Mycelium {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
fn obj_init(obj_ Mycelium) !Mycelium {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj Mycelium) !string {
|
||||
return encoderhero.encode[Mycelium](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !Mycelium {
|
||||
mut obj := encoderhero.decode[Mycelium](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
602
lib/clients/mycelium/openapi.yaml
Normal file
602
lib/clients/mycelium/openapi.yaml
Normal file
@@ -0,0 +1,602 @@
|
||||
openapi: 3.0.2
|
||||
info:
|
||||
version: '1.0.0'
|
||||
|
||||
title: Mycelium management
|
||||
contact:
|
||||
url: 'https://github.com/threefoldtech/mycelium'
|
||||
license:
|
||||
name: Apache 2.0
|
||||
url: 'https://github.com/threefoldtech/mycelium/blob/master/LICENSE'
|
||||
|
||||
description: |
|
||||
This is the specification of the **mycelium** management API. It is used to perform admin tasks on the system, and
|
||||
to perform administrative duties.
|
||||
|
||||
externalDocs:
|
||||
description: For full documentation, check out the mycelium github repo.
|
||||
url: 'https://github.com/threefoldtech/mycelium'
|
||||
|
||||
tags:
|
||||
- name: Admin
|
||||
description: Administrative operations
|
||||
- name: Peer
|
||||
description: Operations related to peer management
|
||||
- name: Route
|
||||
description: Operations related to network routes
|
||||
- name: Message
|
||||
description: Operations on the embedded message subsystem
|
||||
|
||||
servers:
|
||||
- url: 'http://localhost:8989'
|
||||
|
||||
paths:
|
||||
'/api/v1/admin':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
summary: Get general info about the node
|
||||
description: |
|
||||
Get general info about the node, which is not related to other more specific functionality
|
||||
operationId: getInfo
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Info'
|
||||
|
||||
'/api/v1/admin/peers':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: List known peers
|
||||
description: |
|
||||
List all peers known in the system, and info about their connection.
|
||||
This includes the endpoint, how we know about the peer, the connection state, and if the connection is alive the amount
|
||||
of bytes we've sent to and received from the peer.
|
||||
operationId: getPeers
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/PeerStats'
|
||||
post:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: Add a new peer
|
||||
description: |
|
||||
Add a new peer identified by the provided endpoint.
|
||||
The peer is added to the list of known peers. It will eventually be connected
|
||||
to by the standard connection loop of the peer manager. This means that a peer
|
||||
which can't be connected to will stay in the system, as it might be reachable
|
||||
later on.
|
||||
operationId: addPeer
|
||||
responses:
|
||||
'204':
|
||||
description: Peer added
|
||||
'400':
|
||||
description: Malformed endpoint
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: Details about why the endpoint is not valid
|
||||
'409':
|
||||
description: Peer already exists
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: message saying we already know this peer
|
||||
|
||||
'/api/v1/admin/peers/{endpoint}':
|
||||
delete:
|
||||
tags:
|
||||
- Admin
|
||||
- Peer
|
||||
summary: Remove an existing peer
|
||||
description: |
|
||||
Remove an existing peer identified by the provided endpoint.
|
||||
The peer is removed from the list of known peers. If a connection to it
|
||||
is currently active, it will be closed.
|
||||
operationId: deletePeer
|
||||
responses:
|
||||
'204':
|
||||
description: Peer removed
|
||||
'400':
|
||||
description: Malformed endpoint
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: Details about why the endpoint is not valid
|
||||
'404':
|
||||
description: Peer doesn't exist
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
description: message saying we don't know this peer
|
||||
|
||||
'/api/v1/admin/routes/selected':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Route
|
||||
summary: List all selected routes
|
||||
description: |
|
||||
List all selected routes in the system, and their next hop identifier, metric and sequence number.
|
||||
It is possible for a route to be selected and have an infinite metric. This route will however not forward packets.
|
||||
operationId: getSelectedRoutes
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Route'
|
||||
|
||||
'/api/v1/admin/routes/fallback':
|
||||
get:
|
||||
tags:
|
||||
- Admin
|
||||
- Route
|
||||
summary: List all active fallback routes
|
||||
description: |
|
||||
List all fallback routes in the system, and their next hop identifier, metric and sequence number.
|
||||
These routes are available to be selected in case the selected route for a destination suddenly fails, or gets retracted.
|
||||
operationId: getSelectedRoutes
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Route'
|
||||
|
||||
'/api/v1/messages':
|
||||
get:
|
||||
tags:
|
||||
- Message
|
||||
summary: Get a message from the inbound message queue
|
||||
description: |
|
||||
Get a message from the inbound message queue. By default, the message is removed from the queue and won't be shown again.
|
||||
If the peek query parameter is set to true, the message will be peeked, and the next call to this endpoint will show the same message.
|
||||
This method returns immediately by default: a message is returned if one is ready, and if there isn't nothing is returned. If the timeout
|
||||
query parameter is set, this call won't return for the given amount of seconds, unless a message is received
|
||||
operationId: popMessage
|
||||
parameters:
|
||||
- in: query
|
||||
name: peek
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
description: Whether to peek the message or not. If this is true, the message won't be removed from the inbound queue when it is read
|
||||
example: true
|
||||
- in: query
|
||||
name: timeout
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: |
|
||||
Amount of seconds to wait for a message to arrive if one is not available. Setting this to 0 is valid and will return
|
||||
a message if present, or return immediately if there isn't
|
||||
example: 60
|
||||
- in: query
|
||||
name: topic
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
description: |
|
||||
Optional filter for loading messages. If set, the system checks if the message has the given string at the start. This way
|
||||
a topic can be encoded.
|
||||
example: example.topic
|
||||
responses:
|
||||
'200':
|
||||
description: Message retrieved
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InboundMessage'
|
||||
'204':
|
||||
description: No message ready
|
||||
post:
|
||||
tags:
|
||||
- Message
|
||||
summary: Submit a new message to the system.
|
||||
description: |
|
||||
Push a new message to the systems outbound message queue. The system will continuously attempt to send the message until
|
||||
it is either fully transmitted, or the send deadline is expired.
|
||||
operationId: pushMessage
|
||||
parameters:
|
||||
- in: query
|
||||
name: reply_timeout
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
description: |
|
||||
Amount of seconds to wait for a reply to this message to come in. If not set, the system won't wait for a reply and return
|
||||
the ID of the message, which can be used later. If set, the system will wait for at most the given amount of seconds for a reply
|
||||
to come in. If a reply arrives, it is returned to the client. If not, the message ID is returned for later use.
|
||||
example: 120
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageBody'
|
||||
responses:
|
||||
'200':
|
||||
description: We received a reply within the specified timeout
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InboundMessage'
|
||||
|
||||
'201':
|
||||
description: Message pushed successfully, and not waiting for a reply
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageResponseId'
|
||||
'408':
|
||||
description: The system timed out waiting for a reply to the message
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageResponseId'
|
||||
|
||||
'/api/v1/messsages/reply/{id}':
|
||||
post:
|
||||
tags:
|
||||
- Message
|
||||
summary: Reply to a message with the given ID
|
||||
description: |
|
||||
Submits a reply message to the system, where ID is an id of a previously received message. If the sender is waiting
|
||||
for a reply, it will bypass the queue of open messages.
|
||||
operationId: pushMessageReply
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: abcdef0123456789
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushMessageBody'
|
||||
responses:
|
||||
'204':
|
||||
description: successfully submitted the reply
|
||||
|
||||
'/api/v1/messages/status/{id}':
|
||||
get:
|
||||
tags:
|
||||
- Message
|
||||
summary: Get the status of an outbound message
|
||||
description: |
|
||||
Get information about the current state of an outbound message. This can be used to check the transmission
|
||||
state, size and destination of the message.
|
||||
operationId: getMessageInfo
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: abcdef0123456789
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/MessageStatusResponse'
|
||||
'404':
|
||||
description: Message not found
|
||||
|
||||
'/api/v1/pubkey/{mycelium_ip}':
|
||||
get:
|
||||
summary: Get the pubkey from node ip
|
||||
description: |
|
||||
Get the node's public key from it's IP address.
|
||||
operationId: getPublicKeyFromIp
|
||||
parameters:
|
||||
- in: path
|
||||
name: mycelium_ip
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 5fd:7636:b80:9ad0::1
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PublicKeyResponse'
|
||||
'404':
|
||||
description: Public key not found
|
||||
|
||||
|
||||
components:
|
||||
schemas:
|
||||
Info:
|
||||
description: General information about a node
|
||||
type: object
|
||||
properties:
|
||||
nodeSubnet:
|
||||
description: The subnet owned by the node and advertised to peers
|
||||
type: string
|
||||
example: 54f:b680:ba6e:7ced::/64
|
||||
|
||||
Endpoint:
|
||||
description: Identification to connect to a peer
|
||||
type: object
|
||||
properties:
|
||||
proto:
|
||||
description: Protocol used
|
||||
type: string
|
||||
enum:
|
||||
- 'tcp'
|
||||
- 'quic'
|
||||
example: tcp
|
||||
socketAddr:
|
||||
description: The socket address used
|
||||
type: string
|
||||
example: 192.0.2.6:9651
|
||||
|
||||
PeerStats:
|
||||
description: Info about a peer
|
||||
type: object
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: '#/components/schemas/Endpoint'
|
||||
type:
|
||||
description: How we know about this peer
|
||||
type: string
|
||||
enum:
|
||||
- 'static'
|
||||
- 'inbound'
|
||||
- 'linkLocalDiscovery'
|
||||
example: static
|
||||
connectionState:
|
||||
description: The current state of the connection to the peer
|
||||
type: string
|
||||
enum:
|
||||
- 'alive'
|
||||
- 'connecting'
|
||||
- 'dead'
|
||||
example: alive
|
||||
txBytes:
|
||||
description: The amount of bytes transmitted to this peer
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
example: 464531564
|
||||
rxBytes:
|
||||
description: The amount of bytes received from this peer
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
example: 64645089
|
||||
|
||||
Route:
|
||||
description: Information about a route
|
||||
type: object
|
||||
properties:
|
||||
subnet:
|
||||
description: The overlay subnet for which this is the route
|
||||
type: string
|
||||
example: 469:1348:ab0c:a1d8::/64
|
||||
nextHop:
|
||||
description: A way to identify the next hop of the route, where forwarded packets will be sent
|
||||
type: string
|
||||
example: TCP 203.0.113.2:60128 <-> 198.51.100.27:9651
|
||||
metric:
|
||||
description: The metric of the route, an estimation of how long the packet will take to arrive at its final destination
|
||||
oneOf:
|
||||
- description: A finite metric value
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65534
|
||||
example: 13
|
||||
- description: An infinite (unreachable) metric. This is always `infinite`
|
||||
type: string
|
||||
example: infinite
|
||||
seqno:
|
||||
description: the sequence number advertised with this route by the source
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
example: 1
|
||||
|
||||
InboundMessage:
|
||||
description: A message received by the system
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
description: Id of the message, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: 0123456789abcdef
|
||||
srcIp:
|
||||
description: Sender overlay IP address
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
srcPk:
|
||||
description: Sender public key, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: fedbca9876543210fedbca9876543210fedbca9876543210fedbca9876543210
|
||||
dstIp:
|
||||
description: Receiver overlay IP address
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 34f:b680:ba6e:7ced:355f:346f:d97b:eecb
|
||||
dstPk:
|
||||
description: Receiver public key, hex encoded. This is the public key of the system
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
|
||||
topic:
|
||||
description: An optional message topic
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
example: hpV+
|
||||
payload:
|
||||
description: The message payload, encoded in standard alphabet base64
|
||||
type: string
|
||||
format: byte
|
||||
example: xuV+
|
||||
|
||||
PushMessageBody:
|
||||
description: A message to send to a given receiver
|
||||
type: object
|
||||
properties:
|
||||
dst:
|
||||
$ref: '#/components/schemas/MessageDestination'
|
||||
topic:
|
||||
description: An optional message topic
|
||||
type: string
|
||||
format: byte
|
||||
minLength: 0
|
||||
maxLength: 340
|
||||
example: hpV+
|
||||
payload:
|
||||
description: The message to send, base64 encoded
|
||||
type: string
|
||||
format: byte
|
||||
example: xuV+
|
||||
|
||||
MessageDestination:
|
||||
oneOf:
|
||||
- description: An IP in the subnet of the receiver node
|
||||
type: object
|
||||
properties:
|
||||
ip:
|
||||
description: The target IP of the message
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
- description: The hex encoded public key of the receiver node
|
||||
type: object
|
||||
properties:
|
||||
pk:
|
||||
description: The hex encoded public key of the target node
|
||||
type: string
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32
|
||||
|
||||
PushMessageResponseId:
|
||||
description: The ID generated for a message after pushing it to the system
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
description: Id of the message, hex encoded
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 16
|
||||
maxLength: 16
|
||||
example: 0123456789abcdef
|
||||
|
||||
MessageStatusResponse:
|
||||
description: Information about an outbound message
|
||||
type: object
|
||||
properties:
|
||||
dst:
|
||||
description: IP address of the receiving node
|
||||
type: string
|
||||
format: ipv6
|
||||
example: 449:abcd:0123:defa::1
|
||||
state:
|
||||
$ref: '#/components/schemas/TransmissionState'
|
||||
created:
|
||||
description: Unix timestamp of when this message was created
|
||||
type: integer
|
||||
format: int64
|
||||
example: 1649512789
|
||||
deadline:
|
||||
description: Unix timestamp of when this message will expire. If the message is not received before this, the system will give up
|
||||
type: integer
|
||||
format: int64
|
||||
example: 1649513089
|
||||
msgLen:
|
||||
description: Length of the message in bytes
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 27
|
||||
|
||||
TransmissionState:
|
||||
description: The state of an outbound message in it's lifetime
|
||||
oneOf:
|
||||
- type: string
|
||||
enum: ['pending', 'received', 'read', 'aborted']
|
||||
example: 'received'
|
||||
- type: object
|
||||
properties:
|
||||
sending:
|
||||
type: object
|
||||
properties:
|
||||
pending:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 5
|
||||
sent:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 17
|
||||
acked:
|
||||
type: integer
|
||||
minimum: 0
|
||||
example: 3
|
||||
example: 'received'
|
||||
|
||||
PublicKeyResponse:
|
||||
description: Public key requested based on a node's IP
|
||||
type: object
|
||||
properties:
|
||||
NodePubKey:
|
||||
type: string
|
||||
format: hex
|
||||
minLength: 64
|
||||
maxLength: 64
|
||||
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
|
||||
@@ -1,6 +1,13 @@
|
||||
# Mycelium Client
|
||||
|
||||
A V client library for interacting with the Mycelium messaging system. This client provides functionality for sending, receiving, and managing messages through a Mycelium server.
|
||||
A V client library for interacting with the Mycelium messaging system. This client provides functionality for configuring and inspecting a Mycelium node.
|
||||
|
||||
## Components
|
||||
|
||||
The Mycelium integration consists of two main components:
|
||||
|
||||
1. **Mycelium Client** (this package) - For interacting with a running Mycelium node
|
||||
2. **Mycelium Installer** (in `installers/net/mycelium/`) - For installing and managing Mycelium nodes
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -11,131 +18,101 @@ The client can be configured either through V code or using heroscript.
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Get default client instance
|
||||
mut client := mycelium.get()!
|
||||
|
||||
// By default connects to http://localhost:8989/api/v1/messages
|
||||
// To use a different server:
|
||||
mut client := mycelium.get(name: "custom", server_url: "http://myserver:8989/api/v1/messages")!
|
||||
// Get named client instance
|
||||
mut client := mycelium.get(name: "custom")!
|
||||
```
|
||||
|
||||
### Heroscript Configuration
|
||||
## Core Functions
|
||||
|
||||
```hero
|
||||
!!mycelium.configure
|
||||
name:'custom' # optional, defaults to 'default'
|
||||
server_url:'http://myserver:8989/api/v1/messages' # optional, defaults to localhost:8989
|
||||
```
|
||||
### Inspect Node
|
||||
|
||||
Note: Configuration is not needed if using a locally running Mycelium server with default settings.
|
||||
|
||||
## Example Script
|
||||
|
||||
Save as `mycelium_example.vsh`:
|
||||
Get information about the local Mycelium node:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Get node info including public key and address
|
||||
result := mycelium.inspect()!
|
||||
println('Public Key: ${result.public_key}')
|
||||
println('Address: ${result.address}')
|
||||
|
||||
// Get just the IP address
|
||||
addr := mycelium.ipaddr()
|
||||
println('IP Address: ${addr}')
|
||||
```
|
||||
|
||||
### Check Node Status
|
||||
|
||||
Check if the Mycelium node is running and reachable:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
is_running := mycelium.check()
|
||||
if is_running {
|
||||
println('Mycelium node is running and reachable')
|
||||
} else {
|
||||
println('Mycelium node is not running or unreachable')
|
||||
}
|
||||
```
|
||||
|
||||
### Sending and Receiving Messages
|
||||
|
||||
The client provides several functions for sending and receiving messages between nodes:
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
|
||||
// Initialize client
|
||||
mut client := mycelium.get()!
|
||||
|
||||
// Send a message and wait for reply
|
||||
// Send a message to a node by public key
|
||||
// Parameters: public_key, payload, topic, wait_for_reply
|
||||
msg := client.send_msg(
|
||||
pk: "recipient_public_key"
|
||||
payload: "Hello!"
|
||||
wait: true // wait for reply (timeout 120s)
|
||||
'abc123...', // destination public key
|
||||
'Hello World', // message payload
|
||||
'greetings', // optional topic
|
||||
true // wait for reply
|
||||
)!
|
||||
println('Sent message ID: ${msg.id}')
|
||||
|
||||
// Receive messages
|
||||
// Parameters: wait_for_message, peek_only, topic_filter
|
||||
received := client.receive_msg(true, false, 'greetings')!
|
||||
println('Received message from: ${received.src_pk}')
|
||||
println('Message payload: ${received.payload}')
|
||||
|
||||
// Reply to a message
|
||||
client.reply_msg(
|
||||
received.id, // original message ID
|
||||
received.src_pk, // sender's public key
|
||||
'Got your message!', // reply payload
|
||||
'greetings' // topic
|
||||
)!
|
||||
println('Message sent with ID: ${msg.id}')
|
||||
|
||||
// Check message status
|
||||
status := client.get_msg_status(msg.id)!
|
||||
println('Message status: ${status.state}')
|
||||
|
||||
// Receive messages with timeout
|
||||
if incoming := client.receive_msg_opt(wait: true) {
|
||||
println('Received message: ${incoming.payload}')
|
||||
println('From: ${incoming.src_pk}')
|
||||
|
||||
// Reply to the message
|
||||
client.reply_msg(
|
||||
id: incoming.id
|
||||
pk: incoming.src_pk
|
||||
payload: "Got your message!"
|
||||
)!
|
||||
}
|
||||
println('Created at: ${status.created}')
|
||||
println('Expires at: ${status.deadline}')
|
||||
```
|
||||
|
||||
## API Reference
|
||||
The messaging API supports:
|
||||
- Sending messages to nodes identified by public key
|
||||
- Optional message topics for filtering
|
||||
- Waiting for replies when sending messages
|
||||
- Peeking at messages without removing them from the queue
|
||||
- Tracking message delivery status
|
||||
- Base64 encoded message payloads for binary data
|
||||
|
||||
### Sending Messages
|
||||
## Installation and Management
|
||||
|
||||
```v
|
||||
// Send a message to a specific public key
|
||||
// wait=true means wait for reply (timeout 120s)
|
||||
msg := client.send_msg(pk: "recipient_public_key", payload: "Hello!", wait: true)!
|
||||
For installing and managing Mycelium nodes, use the Mycelium Installer package located in `installers/net/mycelium/`. The installer provides functionality for:
|
||||
|
||||
// Get status of a sent message
|
||||
status := client.get_msg_status(id: "message_id")!
|
||||
```
|
||||
|
||||
### Receiving Messages
|
||||
|
||||
```v
|
||||
// Receive a message (non-blocking)
|
||||
msg := client.receive_msg(wait: false)!
|
||||
|
||||
// Receive a message with timeout (blocking for 60s)
|
||||
msg := client.receive_msg(wait: true)!
|
||||
|
||||
// Receive a message (returns none if no message available)
|
||||
if msg := client.receive_msg_opt(wait: false) {
|
||||
println('Received: ${msg.payload}')
|
||||
}
|
||||
```
|
||||
|
||||
### Replying to Messages
|
||||
|
||||
```v
|
||||
// Reply to a specific message
|
||||
client.reply_msg(
|
||||
id: "original_message_id",
|
||||
pk: "sender_public_key",
|
||||
payload: "Reply message"
|
||||
)!
|
||||
```
|
||||
|
||||
## Message Types
|
||||
|
||||
### InboundMessage
|
||||
```v
|
||||
struct InboundMessage {
|
||||
id string
|
||||
src_ip string
|
||||
src_pk string
|
||||
dst_ip string
|
||||
dst_pk string
|
||||
payload string
|
||||
}
|
||||
```
|
||||
|
||||
### MessageStatusResponse
|
||||
```v
|
||||
struct MessageStatusResponse {
|
||||
id string
|
||||
dst string
|
||||
state string
|
||||
created string
|
||||
deadline string
|
||||
msg_len string
|
||||
}
|
||||
```
|
||||
|
||||
## Heroscript Complete Example
|
||||
|
||||
```hero
|
||||
!!mycelium.configure
|
||||
name:'mycelium'
|
||||
server_url:'http://localhost:8989/api/v1/messages'
|
||||
|
||||
# More heroscript commands can be added here as the API expands
|
||||
- Installing Mycelium nodes
|
||||
- Starting/stopping nodes
|
||||
- Managing node configuration
|
||||
- Setting up TUN interfaces
|
||||
- Configuring peer connections
|
||||
|
||||
@@ -152,11 +152,16 @@ fn build_() ! {
|
||||
// if core.platform()!= .ubuntu {
|
||||
// return error('only support ubuntu for now')
|
||||
// }
|
||||
// golang.install()!
|
||||
|
||||
//mut g:=golang.get()!
|
||||
//g.install()!
|
||||
|
||||
//console.print_header('build coredns')
|
||||
|
||||
//mut gs := gittools.new(coderoot: '~/code')!
|
||||
// console.print_header('build ${model.name}')
|
||||
|
||||
// gitpath := gittools.get_repo(coderoot: '/tmp/builder', url: url, reset: true, pull: true)!
|
||||
// gitpath := gittools.get_repo(url: url, reset: true, pull: true)!
|
||||
|
||||
// cmd := '
|
||||
// cd ??{gitpath}
|
||||
|
||||
@@ -42,8 +42,8 @@ pub fn get(args_ ArgsGet) !&${args.classname} {
|
||||
set(obj)!
|
||||
}else{
|
||||
heroscript := context.hero_config_get("${args.name}",args.name)!
|
||||
mut obj:=heroscript_loads(heroscript)!
|
||||
set_in_mem(obj)!
|
||||
mut obj_:=heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return ${args.name}_global[args.name] or {
|
||||
@@ -58,14 +58,14 @@ pub fn set(o ${args.classname})! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set("gitea", o.name, heroscript)!
|
||||
context.hero_config_set("${args.name}", o.name, heroscript)!
|
||||
}
|
||||
|
||||
//does the config exists?
|
||||
pub fn exists(args_ ArgsGet)! {
|
||||
pub fn exists(args_ ArgsGet)! bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists("gitea", args.name)
|
||||
return context.hero_config_exists("${args.name}", args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet)! {
|
||||
|
||||
@@ -29,19 +29,37 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
|
||||
description: 'Url where docusaurus source is.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .string
|
||||
required: false
|
||||
name: 'deploykey'
|
||||
abbrev: 'dk'
|
||||
// default: ''
|
||||
description: 'Path of SSH Key used to deploy.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .string
|
||||
required: false
|
||||
name: 'publish'
|
||||
// default: ''
|
||||
description: 'Path where to publish.'
|
||||
})
|
||||
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'build'
|
||||
abbrev: 'b'
|
||||
name: 'buildpublish'
|
||||
abbrev: 'bp'
|
||||
description: 'build and publish.'
|
||||
})
|
||||
|
||||
cmd_run.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'builddev'
|
||||
abbrev: 'bd'
|
||||
name: 'builddevpublish'
|
||||
abbrev: 'bpd'
|
||||
description: 'build dev version and publish.'
|
||||
})
|
||||
|
||||
@@ -49,7 +67,6 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'update'
|
||||
abbrev: 'p'
|
||||
description: 'update your environment the template and the repo you are working on (git pull).'
|
||||
})
|
||||
|
||||
@@ -67,6 +84,8 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
|
||||
fn cmd_docusaurus_execute(cmd Command) ! {
|
||||
mut update := cmd.flags.get_bool('update') or { false }
|
||||
mut url := cmd.flags.get_string('url') or { '' }
|
||||
mut publish_path := cmd.flags.get_string('publish') or { '' }
|
||||
mut deploykey := cmd.flags.get_string('deploykey') or { '' }
|
||||
|
||||
// mut path := cmd.flags.get_string('path') or { '' }
|
||||
// if path == '' {
|
||||
@@ -74,30 +93,42 @@ fn cmd_docusaurus_execute(cmd Command) ! {
|
||||
// }
|
||||
// path = path.replace('~', os.home_dir())
|
||||
|
||||
mut build := cmd.flags.get_bool('build') or { false }
|
||||
mut builddev := cmd.flags.get_bool('builddev') or { false }
|
||||
mut buildpublish := cmd.flags.get_bool('buildpublish') or { false }
|
||||
mut builddevpublish := cmd.flags.get_bool('builddevpublish') or { false }
|
||||
mut dev := cmd.flags.get_bool('dev') or { false }
|
||||
|
||||
// if build== false && build== false && build== false {
|
||||
// eprintln("specify build, builddev or dev")
|
||||
// exit(1)
|
||||
// }
|
||||
|
||||
|
||||
mut docs := docusaurus.new(update: update)!
|
||||
|
||||
if build {
|
||||
// Create a new docusaurus site
|
||||
if publish_path.len>0 {
|
||||
_ := docs.build(
|
||||
url: url
|
||||
update: update
|
||||
publish_path: publish_path
|
||||
deploykey:deploykey
|
||||
)!
|
||||
}
|
||||
|
||||
if builddev {
|
||||
|
||||
if buildpublish {
|
||||
// Create a new docusaurus site
|
||||
_ := docs.build_dev(
|
||||
_ := docs.build_publish(
|
||||
url: url
|
||||
update: update
|
||||
deploykey:deploykey
|
||||
)!
|
||||
}
|
||||
|
||||
if builddevpublish {
|
||||
// Create a new docusaurus site
|
||||
_ := docs.build_dev_publish(
|
||||
url: url
|
||||
update: update
|
||||
deploykey:deploykey
|
||||
)!
|
||||
}
|
||||
|
||||
@@ -106,6 +137,7 @@ fn cmd_docusaurus_execute(cmd Command) ! {
|
||||
_ := docs.dev(
|
||||
url: url
|
||||
update: update
|
||||
deploykey:deploykey
|
||||
)!
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +189,7 @@ pub fn (mut h HTTPConnection) get(req_ Request) !string {
|
||||
req.debug = true
|
||||
req.method = .get
|
||||
result := h.send(req)!
|
||||
println(result)
|
||||
return result.data
|
||||
}
|
||||
|
||||
|
||||
@@ -38,23 +38,33 @@ fn decode_struct[T](_ T, data string) !T {
|
||||
|
||||
// return t_
|
||||
$for field in T.fields {
|
||||
// $if fiel
|
||||
$if field.is_struct {
|
||||
$if field.typ !is time.Time {
|
||||
if !field.name[0].is_capital() {
|
||||
// skip embedded ones
|
||||
// Check if field has skip attribute
|
||||
mut should_skip := false
|
||||
|
||||
for attr in field.attrs {
|
||||
if attr.contains('skip') {
|
||||
should_skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !should_skip {
|
||||
$if field.is_struct {
|
||||
$if field.typ !is time.Time {
|
||||
if !field.name[0].is_capital() {
|
||||
// skip embedded ones
|
||||
mut data_fmt := data.replace(action_str, '')
|
||||
data_fmt = data.replace('define.${obj_name}', 'define')
|
||||
typ.$(field.name) = decode_struct(typ.$(field.name), data_fmt)!
|
||||
}
|
||||
}
|
||||
} $else $if field.is_array {
|
||||
if is_struct_array(typ.$(field.name))! {
|
||||
mut data_fmt := data.replace(action_str, '')
|
||||
data_fmt = data.replace('define.${obj_name}', 'define')
|
||||
typ.$(field.name) = decode_struct(typ.$(field.name), data_fmt)!
|
||||
arr := decode_array(typ.$(field.name), data_fmt)!
|
||||
typ.$(field.name) = arr
|
||||
}
|
||||
}
|
||||
} $else $if field.is_array {
|
||||
if is_struct_array(typ.$(field.name))! {
|
||||
mut data_fmt := data.replace(action_str, '')
|
||||
data_fmt = data.replace('define.${obj_name}', 'define')
|
||||
arr := decode_array(typ.$(field.name), data_fmt)!
|
||||
typ.$(field.name) = arr
|
||||
}
|
||||
}
|
||||
}
|
||||
} $else {
|
||||
|
||||
@@ -28,6 +28,7 @@ pub fn encode[T](val T) !string {
|
||||
$if T is $struct {
|
||||
e.encode_struct[T](val)!
|
||||
} $else $if T is $array {
|
||||
// TODO: need to make comma separated list only works if int,u8,u16,i8... or string if string put all elements in \''...\'',...
|
||||
e.add_child_list[T](val, 'TODO')
|
||||
} $else {
|
||||
return error('can only add elements for struct or array of structs. \n${val}')
|
||||
@@ -128,18 +129,30 @@ pub fn (mut e Encoder) encode_struct[T](t T) ! {
|
||||
|
||||
// encode children structs and array of structs
|
||||
$for field in T.fields {
|
||||
val := t.$(field.name)
|
||||
// time is encoded in the above params encoding step so skip and dont treat as recursive struct
|
||||
$if val is time.Time || val is ourtime.OurTime {
|
||||
} $else $if val is $struct {
|
||||
if field.name[0].is_capital() {
|
||||
embedded_params := paramsparser.encode(val, recursive: false)!
|
||||
e.params.params << embedded_params.params
|
||||
} else {
|
||||
e.add(val)!
|
||||
// Check if field has skip attribute
|
||||
mut should_skip := false
|
||||
|
||||
for attr in field.attrs {
|
||||
if attr.contains('skip') {
|
||||
should_skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !should_skip {
|
||||
val := t.$(field.name)
|
||||
// time is encoded in the above params encoding step so skip and dont treat as recursive struct
|
||||
$if val is time.Time || val is ourtime.OurTime {
|
||||
} $else $if val is $struct {
|
||||
if field.name[0].is_capital() {
|
||||
embedded_params := paramsparser.encode(val, recursive: false)!
|
||||
e.params.params << embedded_params.params
|
||||
} else {
|
||||
e.add(val)!
|
||||
}
|
||||
} $else $if val is $array {
|
||||
e.encode_array(val)!
|
||||
}
|
||||
} $else $if val is $array {
|
||||
e.encode_array(val)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
42
lib/data/encoderhero/encoder_ignorepropery_test.v
Normal file
42
lib/data/encoderhero/encoder_ignorepropery_test.v
Normal file
@@ -0,0 +1,42 @@
|
||||
module encoderhero
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import time
|
||||
import v.reflection
|
||||
|
||||
struct MyStruct {
|
||||
id int
|
||||
name string
|
||||
// skip attributes would be best way how to do the encoding but can't get it to work
|
||||
other ?&Remark @[skip; str: skip]
|
||||
}
|
||||
|
||||
// is the one we should skip
|
||||
struct Remark {
|
||||
id int
|
||||
}
|
||||
|
||||
fn test_encode() ! {
|
||||
mut o := MyStruct{
|
||||
id: 1
|
||||
name: 'test'
|
||||
other: &Remark{
|
||||
id: 123
|
||||
}
|
||||
}
|
||||
|
||||
script := encode[MyStruct](o)!
|
||||
|
||||
assert script.trim_space() == '!!define.my_struct id:1 name:test'
|
||||
|
||||
println(script)
|
||||
|
||||
o2 := decode[MyStruct](script)!
|
||||
|
||||
assert o2 == MyStruct{
|
||||
id: 1
|
||||
name: 'test'
|
||||
}
|
||||
|
||||
println(o2)
|
||||
}
|
||||
@@ -17,11 +17,16 @@ pub fn (params Params) decode_struct[T](_ T) !T {
|
||||
$if field.is_enum {
|
||||
t.$(field.name) = params.get_int(field.name) or { 0 }
|
||||
} $else {
|
||||
if field.name[0].is_capital() {
|
||||
// embed := params.decode_struct(t.$(field.name))!
|
||||
t.$(field.name) = params.decode_struct(t.$(field.name))!
|
||||
} else {
|
||||
t.$(field.name) = params.decode_value(t.$(field.name), field.name)!
|
||||
// super annoying didn't find other way, then to ignore options
|
||||
$if field.is_option {
|
||||
} $else {
|
||||
if field.name[0].is_capital() {
|
||||
// embed := params.decode_struct(t.$(field.name))!
|
||||
t.$(field.name) = params.decode_struct(t.$(field.name))!
|
||||
// panic("to implement")
|
||||
} else {
|
||||
t.$(field.name) = params.decode_value(t.$(field.name), field.name)!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,11 +35,7 @@ pub fn (params Params) decode_struct[T](_ T) !T {
|
||||
|
||||
pub fn (params Params) decode_value[T](_ T, key string) !T {
|
||||
// $if T is $option {
|
||||
// // unwrap and encode optionals
|
||||
// workaround := t
|
||||
// if workaround != none {
|
||||
// encode(t, args)!
|
||||
// }
|
||||
// return error("is option")
|
||||
// }
|
||||
// value := params.get(field.name)!
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ const test_child = TestChild{
|
||||
|
||||
const test_struct = TestStruct{
|
||||
name: 'test'
|
||||
nick: 'test_nick'
|
||||
birthday: time.new(
|
||||
day: 12
|
||||
month: 12
|
||||
@@ -104,6 +103,8 @@ fn test_decode() {
|
||||
decoded_child := test_child_params.decode[TestChild]()!
|
||||
assert decoded_child == test_child
|
||||
|
||||
// IMPORTANT OPTIONALS ARE NOT SUPPORTED AND WILL NOT BE ENCODED FOR NOW (unless we find ways how to deal with attributes to not encode skipped elements)
|
||||
|
||||
// test recursive decode struct with child
|
||||
decoded := test_params.decode[TestStruct]()!
|
||||
assert decoded == test_struct
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
# Git Tools Module
|
||||
|
||||
A comprehensive Git management module for V that provides high-level abstractions for Git operations, repository management, and automation of common Git workflows.
|
||||
### Get a specific path starting from url
|
||||
|
||||
## Features
|
||||
below is powerful command, will get the repo, put on right location, you can force a pull or even reset everything
|
||||
|
||||
- Repository management (clone, load, delete)
|
||||
- Branch operations (create, switch, checkout)
|
||||
- Tag management (create, switch, verify)
|
||||
- Change tracking and commits
|
||||
- Remote operations (push, pull)
|
||||
- SSH key integration
|
||||
- Submodule support
|
||||
- Repository status tracking
|
||||
- Light cloning option for large repositories
|
||||
```v
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
mut gs := gittools.new()!
|
||||
mydocs_path:=gs.get_path(
|
||||
pull:true,
|
||||
reset:false,
|
||||
url:'https://git.ourworld.tf/tfgrid/info_docs_depin/src/branch/main/docs'
|
||||
)!
|
||||
|
||||
## Basic Usage
|
||||
println(mydocs_path)
|
||||
|
||||
```
|
||||
|
||||
### Repository Management
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ pub mut:
|
||||
log bool = true // If true, logs git commands/statements
|
||||
debug bool = true
|
||||
ssh_key_name string // name of ssh key to be used when loading the gitstructure
|
||||
ssh_key_path string
|
||||
reload bool
|
||||
}
|
||||
|
||||
@@ -35,6 +36,8 @@ pub fn new(args_ GitStructureArgsNew) !&GitStructure {
|
||||
log: args.log
|
||||
debug: args.debug
|
||||
ssh_key_name: args.ssh_key_name
|
||||
ssh_key_path: args.ssh_key_path
|
||||
|
||||
}
|
||||
|
||||
return get(coderoot: args.coderoot, reload: args.reload, cfg: cfg)
|
||||
@@ -77,7 +80,19 @@ pub fn get(args_ GitStructureArgGet) !&GitStructure {
|
||||
coderoot: pathlib.get_dir(path: args.coderoot, create: true)!
|
||||
}
|
||||
|
||||
mut cfg := args.cfg or {
|
||||
mut cfg_:=GitStructureConfig{coderoot:"SKIP"}
|
||||
cfg_
|
||||
}
|
||||
|
||||
if cfg.coderoot != "SKIP"{
|
||||
gs.config_ = cfg
|
||||
gs.config_save()!
|
||||
//println(gs.config()!)
|
||||
}
|
||||
|
||||
gs.config()! // will load the config, don't remove
|
||||
|
||||
gs.load(false)!
|
||||
|
||||
if gs.repos.keys().len == 0 || args.reload {
|
||||
|
||||
@@ -14,8 +14,10 @@ pub mut:
|
||||
log bool = true // If true, logs git commands/statements
|
||||
debug bool = true
|
||||
ssh_key_name string
|
||||
ssh_key_path string
|
||||
}
|
||||
|
||||
|
||||
// GitStructure holds information about repositories within a specific code root.
|
||||
// This structure keeps track of loaded repositories, their configurations, and their status.
|
||||
@[heap]
|
||||
@@ -233,6 +235,6 @@ pub fn (mut self GitStructure) config_reset() ! {
|
||||
pub fn (mut self GitStructure) config_save() ! {
|
||||
// Retrieve the configuration from Redis.
|
||||
mut redis := redis_get()
|
||||
datajson := json.encode(self.config)
|
||||
datajson := json.encode(self.config()!)
|
||||
redis.set('${self.cache_key()}:config', datajson)!
|
||||
}
|
||||
|
||||
@@ -88,7 +88,6 @@ pub fn (mut gitstructure GitStructure) get_repos(args_ ReposGetArgs) ![]&GitRepo
|
||||
// provider string // Git provider (e.g., GitHub).
|
||||
// pull bool // Pull the last changes.
|
||||
// reset bool // Reset the changes.
|
||||
// reload bool // Reload the repo into redis cache
|
||||
// url string // Repository URL, used if cloning is needed.
|
||||
//```
|
||||
//
|
||||
@@ -151,7 +150,6 @@ fn repo_match_check(repo GitRepo, args ReposGetArgs) !bool {
|
||||
// provider string // Git provider (e.g., GitHub).
|
||||
// pull bool // Pull the last changes.
|
||||
// reset bool // Reset the changes.
|
||||
// reload bool // Reload the repo into redis cache
|
||||
// url string // Repository URL, used if cloning is needed.
|
||||
//```
|
||||
//
|
||||
@@ -161,6 +159,10 @@ fn repo_match_check(repo GitRepo, args ReposGetArgs) !bool {
|
||||
// Raises:
|
||||
// - Error: If multiple repositories are found with similar names or if cloning fails.
|
||||
pub fn (mut gitstructure GitStructure) get_path(args_ ReposGetArgs) !string {
|
||||
mut args := args_
|
||||
if args.pull {
|
||||
args.status_clean = true
|
||||
}
|
||||
mut r := gitstructure.get_repo(args_)!
|
||||
mut mypath := r.get_path_of_url(args_.url)!
|
||||
return mypath
|
||||
|
||||
@@ -34,7 +34,17 @@ pub fn (mut gitstructure GitStructure) clone(args GitCloneArgs) !&GitRepo {
|
||||
extra = '--depth 1 --no-single-branch '
|
||||
}
|
||||
|
||||
cmd := 'cd ${parent_dir} && git clone ${extra} ${repo.get_http_url()!} ${repo.name}'
|
||||
cfg:=gitstructure.config()!
|
||||
|
||||
mut cmd := 'cd ${parent_dir} && git clone ${extra} ${repo.get_http_url()!} ${repo.name}'
|
||||
|
||||
mut sshkey_include := ""
|
||||
if cfg.ssh_key_path.len>0{
|
||||
sshkey_include="GIT_SSH_COMMAND=\"ssh -i ${cfg.ssh_key_path}\" "
|
||||
cmd = 'cd ${parent_dir} && ${sshkey_include}git clone ${extra} ${repo.get_ssh_url()!} ${repo.name}'
|
||||
}
|
||||
|
||||
console.print_debug(cmd)
|
||||
result := os.execute(cmd)
|
||||
if result.exit_code != 0 {
|
||||
return error('Cannot clone the repository due to: \n${result.output}')
|
||||
|
||||
13
lib/installers/infra/coredns/.heroscript
Normal file
13
lib/installers/infra/coredns/.heroscript
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
!!hero_code.generate_installer
|
||||
name:'coredns'
|
||||
classname:'CoreDNS'
|
||||
singleton:1
|
||||
templates:1
|
||||
default:1
|
||||
title:'coredns'
|
||||
supported_platforms:''
|
||||
reset:0
|
||||
startupmanager:1
|
||||
hasconfig:1
|
||||
build:1
|
||||
@@ -1,138 +0,0 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
@[params]
|
||||
pub struct InstallArgs {
|
||||
pub mut:
|
||||
reset bool // this means we re-install and forgot what we did before
|
||||
start bool = true
|
||||
stop bool
|
||||
restart bool // this means we stop if started, otherwise just start
|
||||
homedir string // not sure what this is?
|
||||
config_path string // path to Corefile, if empty will install default one
|
||||
config_url string // path to Corefile through e.g. git url, will pull it if it is not local yet
|
||||
dnszones_path string // path to where all the dns zones are
|
||||
dnszones_url string // path on git url pull if needed
|
||||
plugins []string // list of plugins to build CoreDNS with
|
||||
example bool // if true we will install examples
|
||||
}
|
||||
|
||||
pub fn install_(args_ InstallArgs) ! {
|
||||
mut args := args_
|
||||
version := '1.11.1'
|
||||
|
||||
res := os.execute('${osal.profile_path_source_and()!} coredns version')
|
||||
if res.exit_code == 0 {
|
||||
r := res.output.split_into_lines().filter(it.trim_space().starts_with('CoreDNS-'))
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse coredns version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) > texttools.version(r[0].all_after_first('CoreDNS-')) {
|
||||
args.reset = true
|
||||
}
|
||||
} else {
|
||||
args.reset = true
|
||||
}
|
||||
|
||||
if args.reset {
|
||||
console.print_header('install coredns')
|
||||
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! {
|
||||
url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_arm64.tgz'
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_amd64.tgz'
|
||||
} else if core.is_osx_arm()! {
|
||||
url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_arm64.tgz'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_amd64.tgz'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 13000
|
||||
expand_dir: '/tmp/coredns'
|
||||
)!
|
||||
|
||||
mut binpath := dest.file_get('coredns')!
|
||||
osal.cmd_add(
|
||||
cmdname: 'coredns'
|
||||
source: binpath.path
|
||||
)!
|
||||
}
|
||||
|
||||
configure(args)!
|
||||
|
||||
if args.example {
|
||||
example_configure(args)!
|
||||
}
|
||||
|
||||
if args.restart {
|
||||
restart(args)!
|
||||
return
|
||||
}
|
||||
|
||||
if args.start {
|
||||
start(args)!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restart(args_ InstallArgs) ! {
|
||||
stop(args_)!
|
||||
start(args_)!
|
||||
}
|
||||
|
||||
pub fn stop(args_ InstallArgs) ! {
|
||||
console.print_header('coredns stop')
|
||||
|
||||
name := 'coredns'
|
||||
|
||||
// use startup manager, see caddy
|
||||
mut scr := screen.new()!
|
||||
scr.kill(name)!
|
||||
}
|
||||
|
||||
pub fn start(args_ InstallArgs) ! {
|
||||
mut args := args_
|
||||
configure(args)!
|
||||
|
||||
if check()! {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('coredns start')
|
||||
|
||||
name := 'coredns'
|
||||
|
||||
mut scr := screen.new()!
|
||||
|
||||
mut s := scr.add(name: name, reset: true)!
|
||||
|
||||
cmd2 := "coredns -conf '${args.config_path}'"
|
||||
|
||||
s.cmd_send(cmd2)!
|
||||
|
||||
if !check()! {
|
||||
return error("coredns did not install propertly, do: curl 'http://localhost:3334/health'")
|
||||
}
|
||||
|
||||
console.print_header('coredns running')
|
||||
}
|
||||
|
||||
pub fn check() !bool {
|
||||
// this checks health of coredns
|
||||
mut conn := httpconnection.new(name: 'coredns', url: 'http://localhost:3334')!
|
||||
r := conn.get(prefix: 'health')!
|
||||
if r.trim_space() == 'OK' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.installers.base
|
||||
import os
|
||||
|
||||
pub fn play(mut plbook playbook.PlayBook) ! {
|
||||
base.play(playbook)!
|
||||
|
||||
coredns_actions := plbook.find(filter: 'coredns.')!
|
||||
if coredns_actions.len == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
mut install_actions := plbook.find(filter: 'coredns.install')!
|
||||
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
mut p := install_action.params
|
||||
|
||||
// CoreDNS parameters
|
||||
reset := p.get_default_false('reset')
|
||||
start := p.get_default_true('start')
|
||||
stop := p.get_default_false('stop')
|
||||
restart := p.get_default_false('restart')
|
||||
homedir := p.get_default('homedir', '${os.home_dir()}/hero/var/coredns')!
|
||||
config_path := p.get_default('config_path', '${os.home_dir()}/hero/cfg/Corefile')!
|
||||
config_url := p.get_default('config_url', '')!
|
||||
dnszones_path := p.get_default('dnszones_path', '${os.home_dir()}/hero/var/coredns/zones')!
|
||||
dnszones_url := p.get_default('dnszones_url', '')!
|
||||
plugins := p.get_list_default('plugins', [])!
|
||||
example := p.get_default_false('example')
|
||||
|
||||
install(
|
||||
reset: reset
|
||||
start: start
|
||||
stop: stop
|
||||
restart: restart
|
||||
homedir: homedir
|
||||
config_path: config_path
|
||||
config_url: config_url
|
||||
dnszones_path: dnszones_path
|
||||
dnszones_url: dnszones_url
|
||||
plugins: plugins
|
||||
example: example
|
||||
)!
|
||||
}
|
||||
}
|
||||
}
|
||||
180
lib/installers/infra/coredns/coredns_actions.v
Normal file
180
lib/installers/infra/coredns/coredns_actions.v
Normal file
@@ -0,0 +1,180 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import freeflowuniverse.herolib.installers.lang.golang
|
||||
import os
|
||||
|
||||
fn startupcmd() ![]zinit.ZProcessNewArgs {
|
||||
mut args := get()!
|
||||
mut res := []zinit.ZProcessNewArgs{}
|
||||
cmd := "coredns -conf '${args.config_path}'"
|
||||
res << zinit.ZProcessNewArgs{
|
||||
name: 'coredns'
|
||||
cmd: cmd
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
fn running() !bool {
|
||||
mut installer := get()!
|
||||
mut conn := httpconnection.new(name: 'coredns', url: 'http://localhost:3334')!
|
||||
r := conn.get(prefix: 'health')!
|
||||
if r.trim_space() == 'OK' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
fn start_pre() ! {
|
||||
fix()!
|
||||
}
|
||||
|
||||
fn start_post() ! {
|
||||
set_local_dns()
|
||||
}
|
||||
|
||||
fn stop_pre() ! {
|
||||
}
|
||||
|
||||
fn stop_post() ! {
|
||||
}
|
||||
|
||||
//////////////////// following actions are not specific to instance of the object
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
res := os.execute('${osal.profile_path_source_and()!} coredns version')
|
||||
if res.exit_code != 0 {
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.trim_space().starts_with('CoreDNS-'))
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse coredns version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) == texttools.version(r[0].all_after_first('CoreDNS-')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// get the Upload List of the files
|
||||
fn ulist_get() !ulist.UList {
|
||||
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
|
||||
return ulist.UList{}
|
||||
}
|
||||
|
||||
// uploads to S3 server if configured
|
||||
fn upload() ! {
|
||||
// installers.upload(
|
||||
// cmdname: 'coredns'
|
||||
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/coredns'
|
||||
// )!
|
||||
}
|
||||
|
||||
fn install() ! {
|
||||
console.print_header('install coredns')
|
||||
build()! // because we need the plugins
|
||||
// mut url := ''
|
||||
// if core.is_linux_arm()! {
|
||||
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_arm64.tgz'
|
||||
// } else if core.is_linux_intel()! {
|
||||
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_amd64.tgz'
|
||||
// } else if core.is_osx_arm()! {
|
||||
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_arm64.tgz'
|
||||
// } else if core.is_osx_intel()! {
|
||||
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_amd64.tgz'
|
||||
// } else {
|
||||
// return error('unsported platform')
|
||||
// }
|
||||
|
||||
// mut dest := osal.download(
|
||||
// url: url
|
||||
// minsize_kb: 13000
|
||||
// expand_dir: '/tmp/coredns'
|
||||
// )!
|
||||
|
||||
// mut binpath := dest.file_get('coredns')!
|
||||
// osal.cmd_add(
|
||||
// cmdname: 'coredns'
|
||||
// source: binpath.path
|
||||
// )!
|
||||
}
|
||||
|
||||
fn build() ! {
|
||||
url := 'https://github.com/coredns/coredns'
|
||||
|
||||
if core.platform()! != .ubuntu {
|
||||
return error('only support ubuntu for now')
|
||||
}
|
||||
mut g := golang.get()!
|
||||
g.install()!
|
||||
|
||||
console.print_header('build coredns')
|
||||
|
||||
mut gs := gittools.new()!
|
||||
|
||||
gitpath := gs.get_path(
|
||||
pull: true
|
||||
reset: true
|
||||
url: url
|
||||
)!
|
||||
|
||||
// set the plugins file on right location
|
||||
pluginsfile := $tmpl('templates/plugin.cfg')
|
||||
mut path := pathlib.get_file(path: '${gitpath}/plugin.cfg', create: true)!
|
||||
path.write(pluginsfile)!
|
||||
|
||||
cmd := '
|
||||
cd ${gitpath}
|
||||
make
|
||||
'
|
||||
osal.execute_stdout(cmd)!
|
||||
|
||||
// now copy to the default bin path
|
||||
mut codedir := pathlib.get_dir(path: '${gitpath}', create: false)!
|
||||
mut binpath := codedir.file_get('coredns')!
|
||||
osal.cmd_add(
|
||||
cmdname: 'coredns'
|
||||
source: binpath.path
|
||||
)!
|
||||
}
|
||||
|
||||
fn destroy() ! {
|
||||
// mut systemdfactory := systemd.new()!
|
||||
// systemdfactory.destroy("zinit")!
|
||||
|
||||
// osal.process_kill_recursive(name:'zinit')!
|
||||
// osal.cmd_delete('zinit')!
|
||||
|
||||
// osal.package_remove('
|
||||
// podman
|
||||
// conmon
|
||||
// buildah
|
||||
// skopeo
|
||||
// runc
|
||||
// ')!
|
||||
|
||||
// //will remove all paths where go/bin is found
|
||||
// osal.profile_path_add_remove(paths2delete:"go/bin")!
|
||||
|
||||
// osal.rm("
|
||||
// podman
|
||||
// conmon
|
||||
// buildah
|
||||
// skopeo
|
||||
// runc
|
||||
// /var/lib/containers
|
||||
// /var/lib/podman
|
||||
// /var/lib/buildah
|
||||
// /tmp/podman
|
||||
// /tmp/conmon
|
||||
// ")!
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import os
|
||||
|
||||
pub fn configure(args_ InstallArgs) ! {
|
||||
mut args := args_
|
||||
pub fn configure() ! {
|
||||
mut args := get()!
|
||||
mut gs := gittools.get()!
|
||||
mut repo_path := ''
|
||||
|
||||
set_global_dns()
|
||||
|
||||
if args.config_url.len > 0 {
|
||||
mut repo := gs.get_repo(
|
||||
url: args.config_url
|
||||
@@ -37,15 +40,21 @@ pub fn configure(args_ InstallArgs) ! {
|
||||
mycorefile := $tmpl('templates/Corefile')
|
||||
mut path := pathlib.get_file(path: args.config_path, create: true)!
|
||||
path.write(mycorefile)!
|
||||
|
||||
if args.example {
|
||||
example_configure()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn example_configure(args_ InstallArgs) ! {
|
||||
mut args := args_
|
||||
pub fn example_configure() ! {
|
||||
mut args := get()!
|
||||
|
||||
exampledbfile := $tmpl('templates/db.example.org')
|
||||
myipaddr := osal.ipaddr_pub_get_check()!
|
||||
|
||||
exampledbfile := $tmpl('templates/ourexample.org')
|
||||
|
||||
mut path_testzone := pathlib.get_file(
|
||||
path: '${args_.dnszones_path}/db.example.org'
|
||||
path: '${args.dnszones_path}/ourexample.org'
|
||||
create: true
|
||||
)!
|
||||
path_testzone.template_write(exampledbfile, true)!
|
||||
280
lib/installers/infra/coredns/coredns_factory_.v
Normal file
280
lib/installers/infra/coredns/coredns_factory_.v
Normal file
@@ -0,0 +1,280 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
|
||||
__global (
|
||||
coredns_global map[string]&CoreDNS
|
||||
coredns_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&CoreDNS {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := CoreDNS{}
|
||||
if args.name !in coredns_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('coredns', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return coredns_global[args.name] or {
|
||||
println(coredns_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for coredns with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o CoreDNS) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('coredns', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('coredns', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('coredns', args.name)!
|
||||
if args.name in coredns_global {
|
||||
// del coredns_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o CoreDNS) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
coredns_global[o.name] = &o2
|
||||
coredns_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'coredns.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
|
||||
mut other_actions := plbook.find(filter: 'coredns.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action coredns.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action coredns.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
if other_action.name in ['start', 'stop', 'restart'] {
|
||||
mut p := other_action.params
|
||||
name := p.get('name')!
|
||||
mut coredns_obj := get(name: name)!
|
||||
console.print_debug('action object:\n${coredns_obj}')
|
||||
if other_action.name == 'start' {
|
||||
console.print_debug('install action coredns.${other_action.name}')
|
||||
coredns_obj.start()!
|
||||
}
|
||||
|
||||
if other_action.name == 'stop' {
|
||||
console.print_debug('install action coredns.${other_action.name}')
|
||||
coredns_obj.stop()!
|
||||
}
|
||||
if other_action.name == 'restart' {
|
||||
console.print_debug('install action coredns.${other_action.name}')
|
||||
coredns_obj.restart()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
|
||||
// unknown
|
||||
// screen
|
||||
// zinit
|
||||
// tmux
|
||||
// systemd
|
||||
match cat {
|
||||
.zinit {
|
||||
console.print_debug('startupmanager: zinit')
|
||||
return startupmanager.get(cat: .zinit)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('startupmanager: systemd')
|
||||
return startupmanager.get(cat: .systemd)!
|
||||
}
|
||||
else {
|
||||
console.print_debug('startupmanager: auto')
|
||||
return startupmanager.get()!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load from disk and make sure is properly intialized
|
||||
pub fn (mut self CoreDNS) reload() ! {
|
||||
switch(self.name)
|
||||
self = obj_init(self)!
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) start() ! {
|
||||
switch(self.name)
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('coredns start')
|
||||
|
||||
if !installed()! {
|
||||
install()!
|
||||
}
|
||||
|
||||
configure()!
|
||||
|
||||
start_pre()!
|
||||
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
|
||||
console.print_debug('starting coredns with ${zprocess.startuptype}...')
|
||||
|
||||
sm.new(zprocess)!
|
||||
|
||||
sm.start(zprocess.name)!
|
||||
}
|
||||
|
||||
start_post()!
|
||||
|
||||
for _ in 0 .. 50 {
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
return error('coredns did not install properly.')
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) install_start(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
self.install(args)!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) stop() ! {
|
||||
switch(self.name)
|
||||
stop_pre()!
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
sm.stop(zprocess.name)!
|
||||
}
|
||||
stop_post()!
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) restart() ! {
|
||||
switch(self.name)
|
||||
self.stop()!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) running() !bool {
|
||||
switch(self.name)
|
||||
|
||||
// walk over the generic processes, if not running return
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
r := sm.running(zprocess.name)!
|
||||
if r == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return running()!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct InstallArgs {
|
||||
pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) build() ! {
|
||||
switch(self.name)
|
||||
build()!
|
||||
}
|
||||
|
||||
pub fn (mut self CoreDNS) destroy() ! {
|
||||
switch(self.name)
|
||||
self.stop() or {}
|
||||
destroy()!
|
||||
}
|
||||
|
||||
// switch instance to be used for coredns
|
||||
pub fn switch(name string) {
|
||||
coredns_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
62
lib/installers/infra/coredns/coredns_fix.v
Normal file
62
lib/installers/infra/coredns/coredns_fix.v
Normal file
@@ -0,0 +1,62 @@
|
||||
module coredns
|
||||
|
||||
import os
|
||||
import net
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
fn is_systemd_resolved_active() bool {
|
||||
result := os.execute('systemctl is-active systemd-resolved')
|
||||
return result.exit_code == 0 && result.output.trim_space() == 'active'
|
||||
}
|
||||
|
||||
fn disable_systemd_resolved() {
|
||||
console.print_debug('Stopping and disabling systemd-resolved...')
|
||||
os.execute('sudo systemctl stop systemd-resolved')
|
||||
os.execute('sudo systemctl disable systemd-resolved')
|
||||
os.execute('sudo systemctl mask systemd-resolved')
|
||||
}
|
||||
|
||||
fn is_dns_port_free() bool {
|
||||
result := os.execute("sudo ss -tunlp | grep ':53 '")
|
||||
return result.exit_code != 0
|
||||
}
|
||||
|
||||
fn set_local_dns() {
|
||||
console.print_debug('Updating /etc/resolv.conf to use local DNS...')
|
||||
os.execute('sudo rm -f /etc/resolv.conf')
|
||||
os.write_file('/etc/resolv.conf', 'nameserver 127.0.0.1\n') or {
|
||||
console.print_debug('Failed to update /etc/resolv.conf')
|
||||
return
|
||||
}
|
||||
console.print_debug('/etc/resolv.conf updated successfully.')
|
||||
}
|
||||
|
||||
fn set_global_dns() {
|
||||
console.print_debug('Updating /etc/resolv.conf to use local DNS...')
|
||||
os.execute('sudo rm -f /etc/resolv.conf')
|
||||
os.write_file('/etc/resolv.conf', 'nameserver 8.8.8.8\n') or {
|
||||
console.print_debug('Failed to update /etc/resolv.conf')
|
||||
return
|
||||
}
|
||||
console.print_debug('/etc/resolv.conf updated successfully for global.')
|
||||
}
|
||||
|
||||
pub fn fix() ! {
|
||||
console.print_debug('Checking if systemd-resolved is active...')
|
||||
if is_systemd_resolved_active() {
|
||||
disable_systemd_resolved()
|
||||
} else {
|
||||
println('systemd-resolved is already disabled.')
|
||||
}
|
||||
|
||||
console.print_debug('Checking if DNS UDP port 53 is free...')
|
||||
if is_dns_port_free() {
|
||||
console.print_debug('UDP port 53 is free.')
|
||||
} else {
|
||||
console.print_debug('UDP port 53 is still in use. Ensure CoreDNS or another service is properly set up.')
|
||||
return
|
||||
}
|
||||
|
||||
set_global_dns()
|
||||
console.print_debug('Setup complete. Ensure CoreDNS is running.')
|
||||
}
|
||||
39
lib/installers/infra/coredns/coredns_model.v
Normal file
39
lib/installers/infra/coredns/coredns_model.v
Normal file
@@ -0,0 +1,39 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
|
||||
pub const version = '1.12.0'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
@[heap]
|
||||
pub struct CoreDNS {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
config_path string
|
||||
config_url string // path to Corefile through e.g. git url, will pull it if it is not local yet
|
||||
dnszones_path string // path to where all the dns zones are
|
||||
dnszones_url string // path on git url pull if needed (is comma or \n separated list)
|
||||
plugins string // list of plugins to build CoreDNS with (is comma or \n separated list)
|
||||
example bool = true // if true we will install examples
|
||||
}
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ CoreDNS) !CoreDNS {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj CoreDNS) !string {
|
||||
return encoderhero.encode[CoreDNS](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !CoreDNS {
|
||||
mut obj := encoderhero.decode[CoreDNS](heroscript)!
|
||||
return obj
|
||||
}
|
||||
43
lib/installers/infra/coredns/readme.md
Normal file
43
lib/installers/infra/coredns/readme.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# coredns
|
||||
|
||||
coredns
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.lib.installers.infra.coredns as coredns_installer
|
||||
|
||||
heroscript:="
|
||||
!!coredns.configure name:'test'
|
||||
config_path: '/etc/coredns/Corefile'
|
||||
dnszones_path: '/etc/coredns/zones'
|
||||
plugins: 'forward,cache'
|
||||
example: true
|
||||
|
||||
!!coredns.start name:'test' reset:1
|
||||
"
|
||||
|
||||
coredns_installer.play(heroscript=heroscript)!
|
||||
|
||||
//or we can call the default and do a start with reset
|
||||
//mut installer:= coredns_installer.get()!
|
||||
//installer.start(reset:true)!
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!coredns.configure
|
||||
name: 'custom'
|
||||
config_path: '/etc/coredns/Corefile'
|
||||
config_url: 'https://github.com/example/coredns-config'
|
||||
dnszones_path: '/etc/coredns/zones'
|
||||
dnszones_url: 'https://github.com/example/dns-zones'
|
||||
plugins: 'forward,cache'
|
||||
example: false
|
||||
```
|
||||
@@ -3,5 +3,11 @@
|
||||
log
|
||||
errors
|
||||
health :3334
|
||||
import '${args.dnszones_path}/*'
|
||||
redis {
|
||||
address localhost:6379
|
||||
connect_timeout 100
|
||||
read_timeout 100
|
||||
ttl 360
|
||||
prefix dns:
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
??ORIGIN example.org.
|
||||
^^ 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. (
|
||||
2017042745 ; serial
|
||||
7200 ; refresh (2 hours)
|
||||
3600 ; retry (1 hour)
|
||||
1209600 ; expire (2 weeks)
|
||||
3600 ; minimum (1 hour)
|
||||
)
|
||||
|
||||
3600 IN NS a.iana-servers.net.
|
||||
3600 IN NS b.iana-servers.net.
|
||||
|
||||
www IN A 127.0.0.1
|
||||
IN AAAA ::1
|
||||
17
lib/installers/infra/coredns/templates/ourexample.org
Normal file
17
lib/installers/infra/coredns/templates/ourexample.org
Normal file
@@ -0,0 +1,17 @@
|
||||
??ORIGIN ourexample.org.
|
||||
^^ 3600 IN SOA ns1.ourexample.org. hostmaster.ourexample.org. (
|
||||
2017042745 ; serial
|
||||
7200 ; refresh (2 hours)
|
||||
3600 ; retry (1 hour)
|
||||
1209600 ; expire (2 weeks)
|
||||
3600 ; minimum (1 hour)
|
||||
)
|
||||
|
||||
3600 IN NS ns1.ourexample.org.
|
||||
3600 IN NS ns2.ourexample.org.
|
||||
|
||||
ns1 IN A ${myipaddr}
|
||||
ns2 IN A ${myipaddr}
|
||||
www IN A ${myipaddr}
|
||||
test IN A ${myipaddr}
|
||||
IN AAAA ::1
|
||||
68
lib/installers/infra/coredns/templates/plugin.cfg
Normal file
68
lib/installers/infra/coredns/templates/plugin.cfg
Normal file
@@ -0,0 +1,68 @@
|
||||
# Directives are registered in the order they should be executed.
|
||||
#
|
||||
# Ordering is VERY important. Every plugin will feel the effects of all other
|
||||
# plugin below (after) them during a request, but they must not care what plugin
|
||||
# above them are doing.
|
||||
|
||||
# How to rebuild with updated plugin configurations: Modify the list below and
|
||||
# run `go generate && go build`
|
||||
|
||||
# The parser takes the input format of:
|
||||
#
|
||||
# <plugin-name>:<package-name>
|
||||
# Or
|
||||
# <plugin-name>:<fully-qualified-package-name>
|
||||
#
|
||||
# External plugin example:
|
||||
#
|
||||
# log:github.com/coredns/coredns/plugin/log
|
||||
# Local plugin example:
|
||||
# log:log
|
||||
#etcd:etcd
|
||||
|
||||
root:root
|
||||
metadata:metadata
|
||||
geoip:geoip
|
||||
cancel:cancel
|
||||
tls:tls
|
||||
timeouts:timeouts
|
||||
multisocket:multisocket
|
||||
reload:reload
|
||||
nsid:nsid
|
||||
bufsize:bufsize
|
||||
bind:bind
|
||||
debug:debug
|
||||
trace:trace
|
||||
ready:ready
|
||||
health:health
|
||||
pprof:pprof
|
||||
prometheus:metrics
|
||||
errors:errors
|
||||
log:log
|
||||
dnstap:dnstap
|
||||
local:local
|
||||
dns64:dns64
|
||||
acl:acl
|
||||
any:any
|
||||
chaos:chaos
|
||||
loadbalance:loadbalance
|
||||
tsig:tsig
|
||||
cache:cache
|
||||
rewrite:rewrite
|
||||
header:header
|
||||
dnssec:dnssec
|
||||
autopath:autopath
|
||||
minimal:minimal
|
||||
template:template
|
||||
transfer:transfer
|
||||
hosts:hosts
|
||||
file:file
|
||||
secondary:secondary
|
||||
loop:loop
|
||||
forward:forward
|
||||
erratic:erratic
|
||||
whoami:whoami
|
||||
on:github.com/coredns/caddy/onevent
|
||||
sign:sign
|
||||
view:view
|
||||
redis:github.com/codysnider/coredns-redis
|
||||
@@ -1,11 +1,12 @@
|
||||
!!hero_code.generate_installer
|
||||
name: "mycelium"
|
||||
classname: "MyceliumInstaller"
|
||||
hasconfig: false
|
||||
singleton: true
|
||||
hasconfig: true
|
||||
singleton: false
|
||||
default: true
|
||||
title: ""
|
||||
templates: false
|
||||
build: true
|
||||
startupmanager: true
|
||||
supported_platforms: ""
|
||||
|
||||
|
||||
@@ -1,227 +0,0 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
|
||||
// install mycelium will return true if it was already installed
|
||||
pub fn installss(args_ InstallArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
console.print_header('install mycelium.')
|
||||
|
||||
version := '0.5.6'
|
||||
|
||||
res := os.execute('${osal.profile_path_source_and()!} mycelium -V')
|
||||
if res.exit_code == 0 {
|
||||
r := res.output.split_into_lines().filter(it.trim_space().starts_with('mycelium'))
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse mycelium version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) > texttools.version(r[0].all_after_first('mycelium')) {
|
||||
args.reset = true
|
||||
}
|
||||
} else {
|
||||
args.reset = true
|
||||
}
|
||||
|
||||
if args.reset {
|
||||
console.print_header('install mycelium')
|
||||
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_osx_arm()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-apple-darwin.tar.gz'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-apple-darwin.tar.gz'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
// console.print_debug(url)
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 1000
|
||||
reset: true
|
||||
expand_dir: '/tmp/myceliumnet'
|
||||
)!
|
||||
|
||||
mut myceliumfile := dest.file_get('mycelium')! // file in the dest
|
||||
|
||||
// console.print_debug(myceliumfile.str())
|
||||
|
||||
osal.cmd_add(
|
||||
source: myceliumfile.path
|
||||
)!
|
||||
}
|
||||
|
||||
// if args.restart {
|
||||
// stop()!
|
||||
// }
|
||||
start()!
|
||||
|
||||
console.print_debug('install mycelium ok')
|
||||
}
|
||||
|
||||
pub fn restart() ! {
|
||||
stop()!
|
||||
start()!
|
||||
}
|
||||
|
||||
pub fn stop() ! {
|
||||
name := 'mycelium'
|
||||
console.print_debug('stop ${name}')
|
||||
if core.is_osx()! {
|
||||
mut scr := screen.new(reset: false)!
|
||||
scr.kill(name)!
|
||||
} else {
|
||||
mut sm := startupmanager.get()!
|
||||
sm.stop(name)!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start(args InstallArgs) ! {
|
||||
if check() {
|
||||
console.print_header('mycelium was already running')
|
||||
return
|
||||
}
|
||||
myinitname := core.initname()!
|
||||
name := 'mycelium'
|
||||
console.print_debug('start ${name} (startupmanger:${myinitname})')
|
||||
|
||||
mut cmd := ''
|
||||
|
||||
if core.is_osx()! {
|
||||
cmd = 'sudo -s '
|
||||
}
|
||||
|
||||
cmd += 'mycelium --key-file ${osal.hero_path()!}/cfg/priv_key.bin --peers tcp://188.40.132.242:9651 quic://185.69.166.7:9651 tcp://65.21.231.58:9651 --tun-name utun9'
|
||||
console.print_debug(cmd)
|
||||
if core.is_osx()! {
|
||||
// do not change, because we need this on osx at least
|
||||
|
||||
mut scr := screen.new(reset: false)!
|
||||
|
||||
if scr.exists(name) {
|
||||
console.print_header('mycelium was already running')
|
||||
return
|
||||
}
|
||||
|
||||
mut s := scr.add(name: name, start: true, reset: args.reset)!
|
||||
s.cmd_send(cmd)!
|
||||
|
||||
mut myui := ui.new()!
|
||||
console.clear()
|
||||
|
||||
console.print_stderr("
|
||||
On the next screen you will be able to fill in your password.
|
||||
Once done and the server is started: do 'control a + d'
|
||||
|
||||
")
|
||||
|
||||
_ = myui.ask_yesno(question: 'Please confirm you understand?')!
|
||||
|
||||
s.attach()! // to allow filling in passwd
|
||||
} else {
|
||||
mut sm := startupmanager.get()!
|
||||
sm.new(
|
||||
name: name
|
||||
cmd: cmd
|
||||
start: true
|
||||
)!
|
||||
}
|
||||
|
||||
console.print_debug('startup manager started')
|
||||
|
||||
time.sleep(100 * time.millisecond)
|
||||
|
||||
if !check() {
|
||||
return error('cound not start mycelium')
|
||||
}
|
||||
|
||||
console.print_header('mycelium is running')
|
||||
}
|
||||
|
||||
pub fn check() bool {
|
||||
// if core.is_osx()! {
|
||||
// mut scr := screen.new(reset: false) or {return False}
|
||||
// name := 'mycelium'
|
||||
// if !scr.exists(name) {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
|
||||
// if !(osal.process_exists_byname('mycelium') or {return False}) {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// TODO: might be dangerous if that one goes out
|
||||
ping_result := osal.ping(address: '40a:152c:b85b:9646:5b71:d03a:eb27:2462', retry: 2) or {
|
||||
return false
|
||||
}
|
||||
if ping_result == .ok {
|
||||
console.print_debug('could reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return true
|
||||
}
|
||||
console.print_stderr('could not reach 40a:152c:b85b:9646:5b71:d03a:eb27:2462')
|
||||
return false
|
||||
}
|
||||
|
||||
// install mycelium will return true if it was already installed
|
||||
pub fn build_() ! {
|
||||
rust.install()!
|
||||
console.print_header('build mycelium')
|
||||
if !osal.done_exists('build_mycelium') && !osal.cmd_exists('mycelium') {
|
||||
panic('implement')
|
||||
// USE OUR PRIMITIVES (TODO, needs to change, was from zola)
|
||||
cmd := '
|
||||
source ~/.cargo/env
|
||||
cd /tmp
|
||||
rm -rf mycelium
|
||||
git clone https://github.com/getmycelium/mycelium.git
|
||||
cd mycelium
|
||||
cargo install --path . --locked
|
||||
mycelium --version
|
||||
cargo build --release --locked --no-default-features --features=native-tls
|
||||
cp target/release/mycelium ~/.cargo/bin/mycelium
|
||||
'
|
||||
osal.execute_stdout(cmd)!
|
||||
osal.done_set('build_mycelium', 'OK')!
|
||||
console.print_header('mycelium installed')
|
||||
} else {
|
||||
console.print_header('mycelium already installed')
|
||||
}
|
||||
}
|
||||
|
||||
struct MyceliumInspectResult {
|
||||
public_key string @[json: publicKey]
|
||||
address string
|
||||
}
|
||||
|
||||
pub fn inspect() !MyceliumInspectResult {
|
||||
command := 'mycelium inspect --key-file /root/hero/cfg/priv_key.bin --json'
|
||||
result := os.execute(command)
|
||||
if result.exit_code != 0 {
|
||||
return error('Command failed: ${result.output}')
|
||||
}
|
||||
inspect_result := json.decode(MyceliumInspectResult, result.output) or {
|
||||
return error('Failed to parse JSON: ${err}')
|
||||
}
|
||||
return inspect_result
|
||||
}
|
||||
|
||||
// if returns empty then probably mycelium is not installed
|
||||
pub fn ipaddr() string {
|
||||
r := inspect() or { MyceliumInspectResult{} }
|
||||
return r.address
|
||||
}
|
||||
@@ -3,50 +3,37 @@ module mycelium
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.osal.systemd
|
||||
import freeflowuniverse.herolib.installers.sysadmintools.zinit as zinit_installer
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
import freeflowuniverse.herolib.installers.lang.golang
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
import freeflowuniverse.herolib.installers.lang.python
|
||||
import os
|
||||
|
||||
fn startupcmd() ![]zinit.ZProcessNewArgs {
|
||||
mut installer := get()!
|
||||
mut res := []zinit.ZProcessNewArgs{}
|
||||
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// res << zinit.ZProcessNewArgs{
|
||||
// name: 'mycelium'
|
||||
// cmd: 'mycelium server'
|
||||
// env: {
|
||||
// 'HOME': '/root'
|
||||
// }
|
||||
// }
|
||||
|
||||
mut peers_str := installer.peers.join(' ')
|
||||
mut tun_name := 'tun${installer.tun_nr}'
|
||||
|
||||
res << zinit.ZProcessNewArgs{
|
||||
name: 'mycelium'
|
||||
cmd: 'mycelium --key-file ${osal.hero_path()!}/cfg/priv_key.bin --peers ${peers_str} --tun-name ${tun_name}'
|
||||
env: {
|
||||
'HOME': '/root'
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
fn running_() !bool {
|
||||
mut installer := get()!
|
||||
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// this checks health of mycelium
|
||||
// curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works
|
||||
// url:='http://127.0.0.1:${cfg.port}/api/v1'
|
||||
// mut conn := httpconnection.new(name: 'mycelium', url: url)!
|
||||
|
||||
// if cfg.secret.len > 0 {
|
||||
// conn.default_header.add(.authorization, 'Bearer ${cfg.secret}')
|
||||
// }
|
||||
// conn.default_header.add(.content_type, 'application/json')
|
||||
// console.print_debug("curl -X 'GET' '${url}'/tags --oauth2-bearer ${cfg.secret}")
|
||||
// r := conn.get_json_dict(prefix: 'tags', debug: false) or {return false}
|
||||
// println(r)
|
||||
// if true{panic("ssss")}
|
||||
// tags := r['Tags'] or { return false }
|
||||
// console.print_debug(tags)
|
||||
// console.print_debug('mycelium is answering.')
|
||||
return false
|
||||
fn running() !bool {
|
||||
mycelium.inspect() or { return false }
|
||||
return true
|
||||
}
|
||||
|
||||
fn start_pre() ! {
|
||||
@@ -64,19 +51,21 @@ fn stop_post() ! {
|
||||
//////////////////// following actions are not specific to instance of the object
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed_() !bool {
|
||||
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// res := os.execute('${osal.profile_path_source_and()!} mycelium version')
|
||||
// if res.exit_code != 0 {
|
||||
// return false
|
||||
// }
|
||||
// r := res.output.split_into_lines().filter(it.trim_space().len > 0)
|
||||
// if r.len != 1 {
|
||||
// return error("couldn't parse mycelium version.\n${res.output}")
|
||||
// }
|
||||
// if texttools.version(version) == texttools.version(r[0]) {
|
||||
// return true
|
||||
// }
|
||||
fn installed() !bool {
|
||||
cmd := '${osal.profile_path_source_and()!} mycelium -V'
|
||||
// println(cmd)
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
println(res)
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse mycelium version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) == texttools.version(r[0].all_after_last('mycelium')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -87,64 +76,78 @@ fn ulist_get() !ulist.UList {
|
||||
}
|
||||
|
||||
// uploads to S3 server if configured
|
||||
fn upload_() ! {
|
||||
fn upload() ! {
|
||||
// installers.upload(
|
||||
// cmdname: 'mycelium'
|
||||
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/mycelium'
|
||||
// )!
|
||||
}
|
||||
|
||||
fn install_() ! {
|
||||
fn install() ! {
|
||||
console.print_header('install mycelium')
|
||||
// THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// mut url := ''
|
||||
// if core.is_linux_arm()! {
|
||||
// url = 'https://github.com/mycelium-dev/mycelium/releases/download/v${version}/mycelium_${version}_linux_arm64.tar.gz'
|
||||
// } else if core.is_linux_intel()! {
|
||||
// url = 'https://github.com/mycelium-dev/mycelium/releases/download/v${version}/mycelium_${version}_linux_amd64.tar.gz'
|
||||
// } else if core.is_osx_arm()! {
|
||||
// url = 'https://github.com/mycelium-dev/mycelium/releases/download/v${version}/mycelium_${version}_darwin_arm64.tar.gz'
|
||||
// } else if core.is_osx_intel()! {
|
||||
// url = 'https://github.com/mycelium-dev/mycelium/releases/download/v${version}/mycelium_${version}_darwin_amd64.tar.gz'
|
||||
// } else {
|
||||
// return error('unsported platform')
|
||||
// }
|
||||
|
||||
// mut dest := osal.download(
|
||||
// url: url
|
||||
// minsize_kb: 9000
|
||||
// expand_dir: '/tmp/mycelium'
|
||||
// )!
|
||||
mut z_installer := zinit_installer.get()!
|
||||
z_installer.start()!
|
||||
|
||||
// //dest.moveup_single_subdir()!
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_osx_arm()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-aarch64-apple-darwin.tar.gz'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/threefoldtech/mycelium/releases/download/v${version}/mycelium-x86_64-apple-darwin.tar.gz'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
|
||||
// mut binpath := dest.file_get('mycelium')!
|
||||
// osal.cmd_add(
|
||||
// cmdname: 'mycelium'
|
||||
// source: binpath.path
|
||||
// )!
|
||||
pathlib.get_dir(
|
||||
path: '${osal.hero_path()!}/cfg'
|
||||
create: true
|
||||
)!
|
||||
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 5000
|
||||
expand_dir: '/tmp/mycelium'
|
||||
)!
|
||||
mut binpath := dest.file_get('mycelium')!
|
||||
osal.cmd_add(
|
||||
cmdname: 'mycelium'
|
||||
source: binpath.path
|
||||
)!
|
||||
}
|
||||
|
||||
fn build_() ! {
|
||||
// url := 'https://github.com/threefoldtech/mycelium'
|
||||
fn build() ! {
|
||||
url := 'https://github.com/threefoldtech/mycelium'
|
||||
myplatform := core.platform()!
|
||||
if myplatform != .ubuntu {
|
||||
return error('only support ubuntu for now')
|
||||
}
|
||||
rust.install()!
|
||||
|
||||
// make sure we install base on the node
|
||||
// if core.platform()!= .ubuntu {
|
||||
// return error('only support ubuntu for now')
|
||||
// }
|
||||
// golang.install()!
|
||||
console.print_header('build mycelium')
|
||||
|
||||
// console.print_header('build mycelium')
|
||||
mut gs := gittools.new()!
|
||||
gitpath := gs.get_path(
|
||||
pull: true
|
||||
reset: false
|
||||
url: url
|
||||
)!
|
||||
|
||||
// gitpath := gittools.get_repo(coderoot: '/tmp/builder', url: url, reset: true, pull: true)!
|
||||
panic('implement')
|
||||
|
||||
cmd := '
|
||||
cd ${gitpath}
|
||||
source ~/.cargo/env
|
||||
cargo install --path . --locked
|
||||
cargo build --release --locked --no-default-features --features=native-tls
|
||||
cp target/release/mycelium ~/.cargo/bin/mycelium
|
||||
mycelium --version
|
||||
'
|
||||
osal.execute_stdout(cmd)!
|
||||
|
||||
// cmd := '
|
||||
// cd ${gitpath}
|
||||
// source ~/.cargo/env
|
||||
// exit 1 #todo
|
||||
// '
|
||||
// osal.execute_stdout(cmd)!
|
||||
//
|
||||
// //now copy to the default bin path
|
||||
// mut binpath := dest.file_get('...')!
|
||||
// adds it to path
|
||||
@@ -154,34 +157,11 @@ fn build_() ! {
|
||||
// )!
|
||||
}
|
||||
|
||||
fn destroy_() ! {
|
||||
// mut systemdfactory := systemd.new()!
|
||||
// systemdfactory.destroy("zinit")!
|
||||
fn destroy() ! {
|
||||
osal.process_kill_recursive(name: 'mycelium')!
|
||||
osal.cmd_delete('mycelium')!
|
||||
|
||||
// osal.process_kill_recursive(name:'zinit')!
|
||||
// osal.cmd_delete('zinit')!
|
||||
|
||||
// osal.package_remove('
|
||||
// podman
|
||||
// conmon
|
||||
// buildah
|
||||
// skopeo
|
||||
// runc
|
||||
// ')!
|
||||
|
||||
// //will remove all paths where go/bin is found
|
||||
// osal.profile_path_add_remove(paths2delete:"go/bin")!
|
||||
|
||||
// osal.rm("
|
||||
// podman
|
||||
// conmon
|
||||
// buildah
|
||||
// skopeo
|
||||
// runc
|
||||
// /var/lib/containers
|
||||
// /var/lib/podman
|
||||
// /var/lib/buildah
|
||||
// /tmp/podman
|
||||
// /tmp/conmon
|
||||
// ")!
|
||||
osal.rm('
|
||||
mycelium
|
||||
')!
|
||||
}
|
||||
|
||||
@@ -3,17 +3,141 @@ module mycelium
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
|
||||
__global (
|
||||
mycelium_global map[string]&MyceliumInstaller
|
||||
mycelium_default string
|
||||
mycelium_installer_global map[string]&MyceliumInstaller
|
||||
mycelium_installer_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&MyceliumInstaller {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := MyceliumInstaller{}
|
||||
if args.name !in mycelium_installer_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('mycelium', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return mycelium_installer_global[args.name] or {
|
||||
println(mycelium_installer_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for mycelium with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o MyceliumInstaller) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('mycelium', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('mycelium', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('mycelium', args.name)!
|
||||
if args.name in mycelium_installer_global {
|
||||
// del mycelium_installer_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o MyceliumInstaller) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
mycelium_installer_global[o.name] = &o2
|
||||
mycelium_installer_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'mycelium.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
|
||||
mut other_actions := plbook.find(filter: 'mycelium.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action mycelium.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action mycelium.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
if other_action.name in ['start', 'stop', 'restart'] {
|
||||
mut p := other_action.params
|
||||
name := p.get('name')!
|
||||
mut mycelium_obj := get(name: name)!
|
||||
console.print_debug('action object:\n${mycelium_obj}')
|
||||
if other_action.name == 'start' {
|
||||
console.print_debug('install action mycelium.${other_action.name}')
|
||||
mycelium_obj.start()!
|
||||
}
|
||||
|
||||
if other_action.name == 'stop' {
|
||||
console.print_debug('install action mycelium.${other_action.name}')
|
||||
mycelium_obj.stop()!
|
||||
}
|
||||
if other_action.name == 'restart' {
|
||||
console.print_debug('install action mycelium.${other_action.name}')
|
||||
mycelium_obj.restart()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -40,6 +164,12 @@ fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManag
|
||||
}
|
||||
}
|
||||
|
||||
// load from disk and make sure is properly intialized
|
||||
pub fn (mut self MyceliumInstaller) reload() ! {
|
||||
switch(self.name)
|
||||
self = obj_init(self)!
|
||||
}
|
||||
|
||||
pub fn (mut self MyceliumInstaller) start() ! {
|
||||
switch(self.name)
|
||||
if self.running()! {
|
||||
@@ -48,8 +178,8 @@ pub fn (mut self MyceliumInstaller) start() ! {
|
||||
|
||||
console.print_header('mycelium start')
|
||||
|
||||
if !installed_()! {
|
||||
install_()!
|
||||
if !installed()! {
|
||||
install()!
|
||||
}
|
||||
|
||||
configure()!
|
||||
@@ -77,9 +207,9 @@ pub fn (mut self MyceliumInstaller) start() ! {
|
||||
return error('mycelium did not install properly.')
|
||||
}
|
||||
|
||||
pub fn (mut self MyceliumInstaller) install_start(model InstallArgs) ! {
|
||||
pub fn (mut self MyceliumInstaller) install_start(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
self.install(model)!
|
||||
self.install(args)!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
@@ -119,19 +249,32 @@ pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn install(args InstallArgs) ! {
|
||||
if args.reset {
|
||||
destroy()!
|
||||
}
|
||||
if !(installed_()!) {
|
||||
install_()!
|
||||
pub fn (mut self MyceliumInstaller) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn destroy() ! {
|
||||
destroy_()!
|
||||
pub fn (mut self MyceliumInstaller) build() ! {
|
||||
switch(self.name)
|
||||
build()!
|
||||
}
|
||||
|
||||
pub fn build() ! {
|
||||
build_()!
|
||||
pub fn (mut self MyceliumInstaller) destroy() ! {
|
||||
switch(self.name)
|
||||
self.stop() or {}
|
||||
destroy()!
|
||||
}
|
||||
|
||||
// switch instance to be used for mycelium
|
||||
pub fn switch(name string) {
|
||||
mycelium_installer_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
module mycelium
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import freeflowuniverse.herolib.osal.tun
|
||||
|
||||
pub const version = '0.0.0'
|
||||
pub const version = '0.5.7'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
@@ -11,17 +11,59 @@ const default = true
|
||||
@[heap]
|
||||
pub struct MyceliumInstaller {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
name string = 'default'
|
||||
peers []string = [
|
||||
'tcp://188.40.132.242:9651',
|
||||
'quic://[2a01:4f8:212:fa6::2]:9651',
|
||||
'tcp://185.69.166.7:9651',
|
||||
'quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651',
|
||||
'tcp://65.21.231.58:9651',
|
||||
'quic://[2a01:4f9:5a:1042::2]:9651',
|
||||
'tcp://[2604:a00:50:17b:9e6b:ff:fe1f:e054]:9651',
|
||||
'quic://5.78.122.16:9651',
|
||||
'tcp://[2a01:4ff:2f0:3621::1]:9651',
|
||||
'quic://142.93.217.194:9651',
|
||||
]
|
||||
tun_nr int
|
||||
}
|
||||
|
||||
fn obj_init(obj_ MyceliumInstaller) !MyceliumInstaller {
|
||||
// never call get here, only thing we can do here is work on object itself
|
||||
mut obj := obj_
|
||||
panic('implement')
|
||||
return obj
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ MyceliumInstaller) !MyceliumInstaller {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
// called before start if done
|
||||
fn configure() ! {
|
||||
// mut installer := get()!
|
||||
mut installer := get()!
|
||||
if installer.tun_nr == 0 {
|
||||
// Check if TUN is available first
|
||||
if available := tun.available() {
|
||||
if !available {
|
||||
return error('TUN is not available on this system')
|
||||
}
|
||||
// Get free TUN interface name
|
||||
if interface_name := tun.free() {
|
||||
// Parse the interface number from the name (e.g. "tun0" -> 0)
|
||||
nr := interface_name.trim_string_left('tun').int()
|
||||
installer.tun_nr = nr
|
||||
} else {
|
||||
return error('Failed to get free TUN interface: ${err}')
|
||||
}
|
||||
} else {
|
||||
return error('Failed to check TUN availability: ${err}')
|
||||
}
|
||||
set(installer)!
|
||||
}
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj MyceliumInstaller) !string {
|
||||
return encoderhero.encode[MyceliumInstaller](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !MyceliumInstaller {
|
||||
mut obj := encoderhero.decode[MyceliumInstaller](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
1
lib/installers/net/mycelium/tun.v
Normal file
1
lib/installers/net/mycelium/tun.v
Normal file
@@ -0,0 +1 @@
|
||||
module mycelium
|
||||
@@ -1,34 +1,17 @@
|
||||
# zinit
|
||||
|
||||
Zinit is threefold startup manager, in linux will be launched inside systemd
|
||||
|
||||
```v
|
||||
|
||||
|
||||
To get started
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.installers.something. zinit
|
||||
|
||||
mut installer:= zinit.get()!
|
||||
import freeflowuniverse.herolib.installers.sysadmintools.zinit as zinit_installer
|
||||
|
||||
mut installer:=zinit_installer.get()!
|
||||
installer.start()!
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!zinit.install
|
||||
homedir: '/home/user/zinit'
|
||||
username: 'admin'
|
||||
password: 'secretpassword'
|
||||
title: 'Some Title'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ module zinit
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
import freeflowuniverse.herolib.installers.lang.rust
|
||||
@@ -11,7 +12,7 @@ import freeflowuniverse.herolib.osal.systemd
|
||||
import os
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed_() !bool {
|
||||
fn installed() !bool {
|
||||
cmd := 'zinit --version'
|
||||
// console.print_debug(cmd)
|
||||
res := os.execute(cmd)
|
||||
@@ -28,7 +29,7 @@ fn installed_() !bool {
|
||||
return false
|
||||
}
|
||||
|
||||
fn install_() ! {
|
||||
fn install() ! {
|
||||
console.print_header('install zinit')
|
||||
if !core.is_linux()! {
|
||||
return error('only support linux for now')
|
||||
@@ -52,7 +53,7 @@ fn install_() ! {
|
||||
console.print_header('install zinit done')
|
||||
}
|
||||
|
||||
fn build_() ! {
|
||||
fn build() ! {
|
||||
if !core.is_linux()! {
|
||||
return error('only support linux for now')
|
||||
}
|
||||
@@ -91,12 +92,12 @@ fn ulist_get() !ulist.UList {
|
||||
}
|
||||
|
||||
// uploads to S3 server if configured
|
||||
fn upload_() ! {
|
||||
fn upload() ! {
|
||||
}
|
||||
|
||||
fn startupcmd() ![]ZProcessNewArgs {
|
||||
fn startupcmd() ![]zinit.ZProcessNewArgs {
|
||||
mut res := []zinit.ZProcessNewArgs{}
|
||||
res << ZProcessNewArgs{
|
||||
res << zinit.ZProcessNewArgs{
|
||||
name: 'zinit'
|
||||
cmd: '/usr/local/bin/zinit init'
|
||||
startuptype: .systemd
|
||||
@@ -106,7 +107,7 @@ fn startupcmd() ![]ZProcessNewArgs {
|
||||
return res
|
||||
}
|
||||
|
||||
fn running_() !bool {
|
||||
fn running() !bool {
|
||||
cmd := 'zinit list'
|
||||
return osal.execute_ok(cmd)
|
||||
}
|
||||
@@ -123,7 +124,7 @@ fn stop_pre() ! {
|
||||
fn stop_post() ! {
|
||||
}
|
||||
|
||||
fn destroy_() ! {
|
||||
fn destroy() ! {
|
||||
mut systemdfactory := systemd.new()!
|
||||
systemdfactory.destroy('zinit')!
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ module zinit
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
@@ -14,6 +15,65 @@ __global (
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&Zinit {
|
||||
return &Zinit{}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut other_actions := plbook.find(filter: 'zinit.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action zinit.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action zinit.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
if other_action.name in ['start', 'stop', 'restart'] {
|
||||
mut p := other_action.params
|
||||
name := p.get('name')!
|
||||
mut zinit_obj := get(name: name)!
|
||||
console.print_debug('action object:\n${zinit_obj}')
|
||||
if other_action.name == 'start' {
|
||||
console.print_debug('install action zinit.${other_action.name}')
|
||||
zinit_obj.start()!
|
||||
}
|
||||
|
||||
if other_action.name == 'stop' {
|
||||
console.print_debug('install action zinit.${other_action.name}')
|
||||
zinit_obj.stop()!
|
||||
}
|
||||
if other_action.name == 'restart' {
|
||||
console.print_debug('install action zinit.${other_action.name}')
|
||||
zinit_obj.restart()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -48,8 +108,8 @@ pub fn (mut self Zinit) start() ! {
|
||||
|
||||
console.print_header('zinit start')
|
||||
|
||||
if !installed_()! {
|
||||
install_()!
|
||||
if !installed()! {
|
||||
install()!
|
||||
}
|
||||
|
||||
configure()!
|
||||
@@ -77,9 +137,9 @@ pub fn (mut self Zinit) start() ! {
|
||||
return error('zinit did not install properly.')
|
||||
}
|
||||
|
||||
pub fn (mut self Zinit) install_start(model InstallArgs) ! {
|
||||
pub fn (mut self Zinit) install_start(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
self.install(model)!
|
||||
self.install(args)!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
@@ -119,19 +179,32 @@ pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn install(args InstallArgs) ! {
|
||||
if args.reset {
|
||||
destroy()!
|
||||
}
|
||||
if !(installed_()!) {
|
||||
install_()!
|
||||
pub fn (mut self Zinit) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn destroy() ! {
|
||||
destroy_()!
|
||||
pub fn (mut self Zinit) build() ! {
|
||||
switch(self.name)
|
||||
build()!
|
||||
}
|
||||
|
||||
pub fn build() ! {
|
||||
build_()!
|
||||
pub fn (mut self Zinit) destroy() ! {
|
||||
switch(self.name)
|
||||
self.stop() or {}
|
||||
destroy()!
|
||||
}
|
||||
|
||||
// switch instance to be used for zinit
|
||||
pub fn switch(name string) {
|
||||
zinit_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -5,74 +5,69 @@ import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
|
||||
|
||||
import os
|
||||
|
||||
|
||||
//////////////////// following actions are not specific to instance of the object
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
res := os.execute('${osal.profile_path_source_and()!} podman -v')
|
||||
if res.exit_code != 0 {
|
||||
println(res)
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse podman version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) <= texttools.version(r[0].all_after("version")) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
res := os.execute('${osal.profile_path_source_and()!} podman -v')
|
||||
if res.exit_code != 0 {
|
||||
println(res)
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.trim_space().len > 0)
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse podman version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) <= texttools.version(r[0].all_after('version')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//get the Upload List of the files
|
||||
// get the Upload List of the files
|
||||
fn ulist_get() !ulist.UList {
|
||||
//optionally build a UList which is all paths which are result of building, is then used e.g. in upload
|
||||
return ulist.UList{}
|
||||
// optionally build a UList which is all paths which are result of building, is then used e.g. in upload
|
||||
return ulist.UList{}
|
||||
}
|
||||
|
||||
fn upload() ! {
|
||||
}
|
||||
|
||||
fn install() ! {
|
||||
console.print_header('install podman')
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! || core.is_linux_intel()!{
|
||||
osal.package_install("podman,buildah,crun,mmdebstrap")!
|
||||
return
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/containers/podman/releases/download/v${version}/podman-installer-macos-arm64.pkg'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/containers/podman/releases/download/v${version}/podman-installer-macos-amd64.pkg'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
console.print_header('install podman')
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! || core.is_linux_intel()! {
|
||||
osal.package_install('podman,buildah,crun,mmdebstrap')!
|
||||
return
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/containers/podman/releases/download/v${version}/podman-installer-macos-arm64.pkg'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/containers/podman/releases/download/v${version}/podman-installer-macos-amd64.pkg'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 9000
|
||||
expand_dir: '/tmp/podman'
|
||||
)!
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 9000
|
||||
expand_dir: '/tmp/podman'
|
||||
)!
|
||||
|
||||
//dest.moveup_single_subdir()!
|
||||
// dest.moveup_single_subdir()!
|
||||
|
||||
panic("implement")
|
||||
panic('implement')
|
||||
}
|
||||
|
||||
|
||||
fn destroy() ! {
|
||||
// mut systemdfactory := systemd.new()!
|
||||
// systemdfactory.destroy("zinit")!
|
||||
|
||||
// mut systemdfactory := systemd.new()!
|
||||
// systemdfactory.destroy("zinit")!
|
||||
// osal.process_kill_recursive(name:'zinit')!
|
||||
// osal.cmd_delete('zinit')!
|
||||
|
||||
// osal.process_kill_recursive(name:'zinit')!
|
||||
// osal.cmd_delete('zinit')!
|
||||
|
||||
osal.package_remove('
|
||||
osal.package_remove('
|
||||
podman
|
||||
conmon
|
||||
buildah
|
||||
@@ -80,10 +75,10 @@ fn destroy() ! {
|
||||
runc
|
||||
')!
|
||||
|
||||
// //will remove all paths where go/bin is found
|
||||
// osal.profile_path_add_remove(paths2delete:"go/bin")!
|
||||
// //will remove all paths where go/bin is found
|
||||
// osal.profile_path_add_remove(paths2delete:"go/bin")!
|
||||
|
||||
osal.rm("
|
||||
osal.rm('
|
||||
podman
|
||||
conmon
|
||||
buildah
|
||||
@@ -94,8 +89,5 @@ fn destroy() ! {
|
||||
/var/lib/buildah
|
||||
/tmp/podman
|
||||
/tmp/conmon
|
||||
")!
|
||||
|
||||
|
||||
')!
|
||||
}
|
||||
|
||||
|
||||
@@ -4,121 +4,109 @@ import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
|
||||
__global (
|
||||
podman_global map[string]&PodmanInstaller
|
||||
podman_default string
|
||||
podman_global map[string]&PodmanInstaller
|
||||
podman_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet{
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
name string
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&PodmanInstaller {
|
||||
return &PodmanInstaller{}
|
||||
pub fn get(args_ ArgsGet) !&PodmanInstaller {
|
||||
return &PodmanInstaller{}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string //if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
|
||||
mut args:=args_
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or {
|
||||
playbook.new(text: args.heroscript)!
|
||||
}
|
||||
|
||||
|
||||
mut other_actions := plbook.find(filter: 'podman.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ["destroy","install","build"]{
|
||||
mut p := other_action.params
|
||||
reset:=p.get_default_false("reset")
|
||||
if other_action.name == "destroy" || reset{
|
||||
console.print_debug("install action podman.destroy")
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == "install"{
|
||||
console.print_debug("install action podman.install")
|
||||
install()!
|
||||
}
|
||||
}
|
||||
}
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut other_actions := plbook.find(filter: 'podman.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action podman.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action podman.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
|
||||
// unknown
|
||||
// screen
|
||||
// zinit
|
||||
// tmux
|
||||
// systemd
|
||||
match cat{
|
||||
.zinit{
|
||||
console.print_debug("startupmanager: zinit")
|
||||
return startupmanager.get(cat:.zinit)!
|
||||
}
|
||||
.systemd{
|
||||
console.print_debug("startupmanager: systemd")
|
||||
return startupmanager.get(cat:.systemd)!
|
||||
}else{
|
||||
console.print_debug("startupmanager: auto")
|
||||
return startupmanager.get()!
|
||||
}
|
||||
}
|
||||
// unknown
|
||||
// screen
|
||||
// zinit
|
||||
// tmux
|
||||
// systemd
|
||||
match cat {
|
||||
.zinit {
|
||||
console.print_debug('startupmanager: zinit')
|
||||
return startupmanager.get(cat: .zinit)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('startupmanager: systemd')
|
||||
return startupmanager.get(cat: .systemd)!
|
||||
}
|
||||
else {
|
||||
console.print_debug('startupmanager: auto')
|
||||
return startupmanager.get()!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@[params]
|
||||
pub struct InstallArgs{
|
||||
pub struct InstallArgs {
|
||||
pub mut:
|
||||
reset bool
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn (mut self PodmanInstaller) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn (mut self PodmanInstaller) destroy() ! {
|
||||
switch(self.name)
|
||||
destroy()!
|
||||
switch(self.name)
|
||||
destroy()!
|
||||
}
|
||||
|
||||
|
||||
|
||||
//switch instance to be used for podman
|
||||
// switch instance to be used for podman
|
||||
pub fn switch(name string) {
|
||||
podman_default = name
|
||||
podman_default = name
|
||||
}
|
||||
|
||||
|
||||
//helpers
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs{
|
||||
instance string = 'default'
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
module podman
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
@@ -7,34 +8,31 @@ pub const version = '4.9.3'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
//THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
@[heap]
|
||||
pub struct PodmanInstaller {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
name string = 'default'
|
||||
}
|
||||
|
||||
|
||||
|
||||
//your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ PodmanInstaller)!PodmanInstaller{
|
||||
mut mycfg:=mycfg_
|
||||
return mycfg
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ PodmanInstaller) !PodmanInstaller {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
//called before start if done
|
||||
// called before start if done
|
||||
fn configure() ! {
|
||||
//mut installer := get()!
|
||||
// mut installer := get()!
|
||||
}
|
||||
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj PodmanInstaller) !string {
|
||||
return encoderhero.encode[PodmanInstaller ](obj)!
|
||||
return encoderhero.encode[PodmanInstaller](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !PodmanInstaller {
|
||||
mut obj := encoderhero.decode[PodmanInstaller](heroscript)!
|
||||
return obj
|
||||
mut obj := encoderhero.decode[PodmanInstaller](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import os
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
checkcmd := '${osal.profile_path_source_and()!} bun -version'
|
||||
checkcmd := '${os.home_dir()}/.bun/bin/bun -version'
|
||||
res := os.execute(checkcmd)
|
||||
if res.exit_code != 0 {
|
||||
println(res)
|
||||
@@ -22,7 +22,7 @@ fn installed() !bool {
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse bun version.\n${res.output}")
|
||||
}
|
||||
println(' ${texttools.version(version)} <= ${texttools.version(r[0])}')
|
||||
// println(' ${texttools.version(version)} <= ${texttools.version(r[0])}')
|
||||
if texttools.version(version) <= texttools.version(r[0]) {
|
||||
return true
|
||||
}
|
||||
|
||||
11
lib/installers/web/traefik/.heroscript
Normal file
11
lib/installers/web/traefik/.heroscript
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
!!hero_code.generate_installer
|
||||
name:'traefik'
|
||||
classname:'TraefikServer'
|
||||
singleton:1 //there can only be 1 object in the globals, is called 'default'
|
||||
templates:1 //are there templates for the installer
|
||||
default:1 //can we create a default when the factory is used
|
||||
title:''
|
||||
supported_platforms:'' //osx, ... (empty means all)
|
||||
reset:0 // regenerate all, dangerous !!!
|
||||
startupmanager:1 //managed by a startup manager, default true
|
||||
17
lib/installers/web/traefik/htpasswd.v
Normal file
17
lib/installers/web/traefik/htpasswd.v
Normal file
@@ -0,0 +1,17 @@
|
||||
module traefik
|
||||
|
||||
import crypto.bcrypt
|
||||
|
||||
// generate_htpasswd creates an Apache-style htpasswd entry for the given user and password
|
||||
// using bcrypt hashing with configurable cost (default 12)
|
||||
fn generate_htpasswd(user string, password string) !string {
|
||||
// Generate bcrypt hash
|
||||
hashed_password := bcrypt.generate_from_password(password.bytes(), 12) or {
|
||||
return error('Failed to hash password: ${err}')
|
||||
}
|
||||
|
||||
println(hashed_password)
|
||||
|
||||
// Return final formatted string
|
||||
return '${user}:${hashed_password}'
|
||||
}
|
||||
21
lib/installers/web/traefik/readme.md
Normal file
21
lib/installers/web/traefik/readme.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# traefik
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.installers.web.traefik as traefik_installer
|
||||
|
||||
mut installer:= traefik_installer.get()!
|
||||
|
||||
installer.start()!
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
36
lib/installers/web/traefik/templates/traefik.toml
Normal file
36
lib/installers/web/traefik/templates/traefik.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[api]
|
||||
dashboard = true
|
||||
debug = false
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.https]
|
||||
address = ":443"
|
||||
|
||||
# [providers]
|
||||
# [providers.file]
|
||||
# filename = "/etc/traefik/dynamic_conf.toml"
|
||||
# watch = true
|
||||
|
||||
[log]
|
||||
level = "WARN"
|
||||
|
||||
[accessLog]
|
||||
|
||||
[middlewares]
|
||||
[middlewares.basicAuth]
|
||||
[middlewares.basicAuth.basicAuth]
|
||||
users = [
|
||||
"${htaccesscode}"
|
||||
]
|
||||
realm = "Traefik Dashboard"
|
||||
removeHeader = true
|
||||
|
||||
[http.routers]
|
||||
[http.routers.api]
|
||||
rule = "Host(`traefik.local`)"
|
||||
service = "api^^internal"
|
||||
entryPoints = ["https"]
|
||||
middlewares = ["basicAuth"]
|
||||
112
lib/installers/web/traefik/traefik_actions.v
Normal file
112
lib/installers/web/traefik/traefik_actions.v
Normal file
@@ -0,0 +1,112 @@
|
||||
module traefik
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
import os
|
||||
|
||||
fn startupcmd() ![]zinit.ZProcessNewArgs {
|
||||
mut installer := get()!
|
||||
mut res := []zinit.ZProcessNewArgs{}
|
||||
res << zinit.ZProcessNewArgs{
|
||||
name: 'traefik'
|
||||
cmd: 'traefik'
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
fn running() !bool {
|
||||
cmd := 'traefik healthcheck'
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
fn start_pre() ! {
|
||||
}
|
||||
|
||||
fn start_post() ! {
|
||||
}
|
||||
|
||||
fn stop_pre() ! {
|
||||
}
|
||||
|
||||
fn stop_post() ! {
|
||||
}
|
||||
|
||||
//////////////////// following actions are not specific to instance of the object
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
res := os.execute('${osal.profile_path_source_and()!} traefik version')
|
||||
if res.exit_code != 0 {
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.contains('Version'))
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse traefik version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) == texttools.version(r[0].all_after('Version:')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// get the Upload List of the files
|
||||
fn ulist_get() !ulist.UList {
|
||||
return ulist.UList{}
|
||||
}
|
||||
|
||||
fn upload() ! {
|
||||
// installers.upload(
|
||||
// cmdname: 'traefik'
|
||||
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/traefik'
|
||||
// )!
|
||||
}
|
||||
|
||||
fn install() ! {
|
||||
console.print_header('install traefik')
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! {
|
||||
url = 'https://github.com/traefik/traefik/releases/download/v${version}/traefik_v${version}_linux_arm64.tar.gz'
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/traefik/traefik/releases/download/v${version}/traefik_v${version}_linux_amd64.tar.gz'
|
||||
} else if core.is_osx_arm()! {
|
||||
url = 'https://github.com/traefik/traefik/releases/download/v${version}/traefik_v${version}_darwin_arm64.tar.gz'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/traefik/traefik/releases/download/v${version}/traefik_v${version}_darwin_arm64.tar.gz'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 20000
|
||||
expand_dir: '/tmp/traefik'
|
||||
)!
|
||||
|
||||
mut binpath := dest.file_get('traefik')!
|
||||
osal.cmd_add(
|
||||
cmdname: 'traefik'
|
||||
source: binpath.path
|
||||
)!
|
||||
}
|
||||
|
||||
fn destroy() ! {
|
||||
osal.process_kill_recursive(name: 'traefik')!
|
||||
osal.cmd_delete('traefik')!
|
||||
|
||||
osal.package_remove('
|
||||
traefik
|
||||
')!
|
||||
|
||||
osal.rm('
|
||||
traefik
|
||||
')!
|
||||
}
|
||||
275
lib/installers/web/traefik/traefik_factory_.v
Normal file
275
lib/installers/web/traefik/traefik_factory_.v
Normal file
@@ -0,0 +1,275 @@
|
||||
module traefik
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
|
||||
__global (
|
||||
traefik_global map[string]&TraefikServer
|
||||
traefik_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&TraefikServer {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := TraefikServer{}
|
||||
if args.name !in traefik_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('traefik', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return traefik_global[args.name] or {
|
||||
println(traefik_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for traefik with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o TraefikServer) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('traefik', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('traefik', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('traefik', args.name)!
|
||||
if args.name in traefik_global {
|
||||
// del traefik_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o TraefikServer) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
traefik_global[o.name] = &o2
|
||||
traefik_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'traefik.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
|
||||
mut other_actions := plbook.find(filter: 'traefik.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action traefik.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action traefik.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
if other_action.name in ['start', 'stop', 'restart'] {
|
||||
mut p := other_action.params
|
||||
name := p.get('name')!
|
||||
mut traefik_obj := get(name: name)!
|
||||
console.print_debug('action object:\n${traefik_obj}')
|
||||
if other_action.name == 'start' {
|
||||
console.print_debug('install action traefik.${other_action.name}')
|
||||
traefik_obj.start()!
|
||||
}
|
||||
|
||||
if other_action.name == 'stop' {
|
||||
console.print_debug('install action traefik.${other_action.name}')
|
||||
traefik_obj.stop()!
|
||||
}
|
||||
if other_action.name == 'restart' {
|
||||
console.print_debug('install action traefik.${other_action.name}')
|
||||
traefik_obj.restart()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
|
||||
// unknown
|
||||
// screen
|
||||
// zinit
|
||||
// tmux
|
||||
// systemd
|
||||
match cat {
|
||||
.zinit {
|
||||
console.print_debug('startupmanager: zinit')
|
||||
return startupmanager.get(cat: .zinit)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('startupmanager: systemd')
|
||||
return startupmanager.get(cat: .systemd)!
|
||||
}
|
||||
else {
|
||||
console.print_debug('startupmanager: auto')
|
||||
return startupmanager.get()!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load from disk and make sure is properly intialized
|
||||
pub fn (mut self TraefikServer) reload() ! {
|
||||
switch(self.name)
|
||||
self = obj_init(self)!
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) start() ! {
|
||||
switch(self.name)
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('traefik start')
|
||||
|
||||
if !installed()! {
|
||||
install()!
|
||||
}
|
||||
|
||||
configure()!
|
||||
|
||||
start_pre()!
|
||||
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
|
||||
console.print_debug('starting traefik with ${zprocess.startuptype}...')
|
||||
|
||||
sm.new(zprocess)!
|
||||
|
||||
sm.start(zprocess.name)!
|
||||
}
|
||||
|
||||
start_post()!
|
||||
|
||||
for _ in 0 .. 50 {
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
return error('traefik did not install properly.')
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) install_start(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
self.install(args)!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) stop() ! {
|
||||
switch(self.name)
|
||||
stop_pre()!
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
sm.stop(zprocess.name)!
|
||||
}
|
||||
stop_post()!
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) restart() ! {
|
||||
switch(self.name)
|
||||
self.stop()!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) running() !bool {
|
||||
switch(self.name)
|
||||
|
||||
// walk over the generic processes, if not running return
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
r := sm.running(zprocess.name)!
|
||||
if r == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return running()!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct InstallArgs {
|
||||
pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self TraefikServer) destroy() ! {
|
||||
switch(self.name)
|
||||
self.stop() or {}
|
||||
destroy()!
|
||||
}
|
||||
|
||||
// switch instance to be used for traefik
|
||||
pub fn switch(name string) {
|
||||
traefik_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
52
lib/installers/web/traefik/traefik_model.v
Normal file
52
lib/installers/web/traefik/traefik_model.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module traefik
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
|
||||
pub const version = '3.3.3'
|
||||
const singleton = true
|
||||
const default = true
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
@[heap]
|
||||
pub struct TraefikServer {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
// homedir string
|
||||
// configpath string
|
||||
// username string
|
||||
password string @[secret]
|
||||
// title string
|
||||
// host string
|
||||
// port int
|
||||
}
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ TraefikServer) !TraefikServer {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
// called before start if done
|
||||
fn configure() ! {
|
||||
mut installer := get()!
|
||||
htaccesscode := generate_htpasswd('admin', installer.password)!
|
||||
mut mycode := $tmpl('templates/traefik.toml')
|
||||
mut path := pathlib.get_file(path: '/etc/traefik/traefik.toml', create: true)!
|
||||
path.write(mycode)!
|
||||
console.print_debug(mycode)
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj TraefikServer) !string {
|
||||
return encoderhero.encode[TraefikServer](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !TraefikServer {
|
||||
mut obj := encoderhero.decode[TraefikServer](heroscript)!
|
||||
return obj
|
||||
}
|
||||
@@ -109,12 +109,12 @@ pub fn profile_path_source() !string {
|
||||
}
|
||||
pp := profile_path()!
|
||||
if os.exists(pp) {
|
||||
res := os.execute('source ${pp}')
|
||||
res := os.execute('/bin/sh ${pp}')
|
||||
if res.exit_code != 0 {
|
||||
console.print_stderr('WARNING: your profile is corrupt: ${pp}')
|
||||
console.print_stderr('WARNING: your profile is corrupt, did:\nsource ${pp}\n${res}')
|
||||
return error('profile corrupt')
|
||||
} else {
|
||||
return 'source ${pp}'
|
||||
return '. ${pp}'
|
||||
}
|
||||
}
|
||||
return ''
|
||||
@@ -124,8 +124,8 @@ pub fn profile_path_source() !string {
|
||||
// or empty if it doesn't exist
|
||||
pub fn profile_path_source_and() !string {
|
||||
p := profile_path_source() or { return '' }
|
||||
if p.len==0{
|
||||
return ""
|
||||
if p.len == 0 {
|
||||
return ''
|
||||
}
|
||||
return '${p} && '
|
||||
}
|
||||
@@ -300,7 +300,7 @@ pub fn profile_paths_preferred() ![]string {
|
||||
|
||||
for file in toadd {
|
||||
if os.exists(file) {
|
||||
println('${file} exists')
|
||||
// println('${file} exists')
|
||||
profile_files2 << file
|
||||
}
|
||||
}
|
||||
@@ -308,9 +308,9 @@ pub fn profile_paths_preferred() ![]string {
|
||||
}
|
||||
|
||||
pub fn profile_path() !string {
|
||||
if core.is_osx()! {
|
||||
return '${os.home_dir()}/.zprofile'
|
||||
} else {
|
||||
return '${os.home_dir()}/.bash_profile'
|
||||
mut preferred := profile_paths_preferred()!
|
||||
if preferred.len == 0 {
|
||||
return error("can't find profile_path, found none")
|
||||
}
|
||||
return preferred[0]
|
||||
}
|
||||
|
||||
95
lib/osal/coredns/README.md
Normal file
95
lib/osal/coredns/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# CoreDNS Redis Record Management
|
||||
|
||||
This module provides functionality for managing DNS records in Redis for use with CoreDNS. It supports various DNS record types and provides a simple interface for adding and managing DNS records.
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.lib.osal.coredns
|
||||
|
||||
// Create a new DNS record set
|
||||
mut rs := coredns.new_dns_record_set()
|
||||
|
||||
// Create and populate DNS records
|
||||
rs.set_soa(mbox: 'hostmaster.example.net.', ns: 'ns1.example.net.')
|
||||
rs.add_srv(target: 'tcp.example.com.', port: 123)
|
||||
rs.add_txt(text: 'this is a wildcard')
|
||||
rs.add_mx(host: 'host1.example.net.')
|
||||
rs.add_a(name: 'host1', ip: '5.5.5.5')
|
||||
rs.add_aaaa(name: 'host1', ip: '2001:db8::1')
|
||||
rs.add_ns(host: 'ns1.example.net.')
|
||||
rs.add_ns(host: 'ns2.example.net.')
|
||||
|
||||
// Store records in Redis
|
||||
rs.set('example.com')!
|
||||
```
|
||||
|
||||
|
||||
## Record Types
|
||||
|
||||
The following DNS record types are supported:
|
||||
|
||||
### SRV Record
|
||||
```v
|
||||
SRVRecord {
|
||||
target string // Required: Target hostname
|
||||
port int // Required: Port number
|
||||
priority int // Default: 10
|
||||
weight int // Default: 100
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### TXT Record
|
||||
```v
|
||||
TXTRecord {
|
||||
text string // Required: Text content
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### MX Record
|
||||
```v
|
||||
MXRecord {
|
||||
host string // Required: Mail server hostname
|
||||
preference int // Default: 10
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### A Record
|
||||
```v
|
||||
ARecord {
|
||||
name string // Required: Hostname
|
||||
ip string // Required: IPv4 address
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### AAAA Record
|
||||
```v
|
||||
AAAARecord {
|
||||
name string // Required: Hostname
|
||||
ip string // Required: IPv6 address
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### NS Record
|
||||
```v
|
||||
NSRecord {
|
||||
host string // Required: Nameserver hostname
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
|
||||
### SOA Record
|
||||
```v
|
||||
SOARecord {
|
||||
mbox string // Required: Email address of the admin
|
||||
ns string // Required: Primary nameserver
|
||||
refresh int // Default: 44
|
||||
retry int // Default: 55
|
||||
expire int // Default: 66
|
||||
minttl int // Default: 100
|
||||
ttl int // Default: 300
|
||||
}
|
||||
```
|
||||
73
lib/osal/coredns/model.v
Normal file
73
lib/osal/coredns/model.v
Normal file
@@ -0,0 +1,73 @@
|
||||
// Input parameter structs for each record type
|
||||
@[params]
|
||||
struct SRVRecord {
|
||||
pub mut:
|
||||
target string @[required]
|
||||
port int @[required]
|
||||
priority int = 10
|
||||
weight int = 100
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct TXTRecord {
|
||||
pub mut:
|
||||
text string @[required]
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct MXRecord {
|
||||
pub mut:
|
||||
host string @[required]
|
||||
preference int = 10
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct ARecord {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
ip string @[required]
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct AAAARecord {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
ip string @[required]
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct NSRecord {
|
||||
pub mut:
|
||||
host string @[required]
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct SOARecord {
|
||||
pub mut:
|
||||
mbox string @[required]
|
||||
ns string @[required]
|
||||
refresh int = 44
|
||||
retry int = 55
|
||||
expire int = 66
|
||||
minttl int = 100
|
||||
ttl int = 300
|
||||
}
|
||||
|
||||
// DNSRecordSet represents a set of DNS records
|
||||
struct DNSRecordSet {
|
||||
pub mut:
|
||||
srv []SRVRecord
|
||||
txt []TXTRecord
|
||||
mx []MXRecord
|
||||
a []ARecord
|
||||
aaaa []AAAARecord
|
||||
ns []NSRecord
|
||||
soa ?SOARecord
|
||||
redis ?&redisclient.Redis
|
||||
}
|
||||
119
lib/osal/coredns/play.v
Normal file
119
lib/osal/coredns/play.v
Normal file
@@ -0,0 +1,119 @@
|
||||
module coredns
|
||||
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
|
||||
// play_dns processes DNS-related actions from heroscript
|
||||
pub fn play_dns(mut plbook playbook.PlayBook) !DNSRecordSet {
|
||||
mut recordset := new_dns_record_set()
|
||||
|
||||
// Find all actions starting with dns.
|
||||
dns_actions := plbook.find(filter: 'dns.')!
|
||||
|
||||
for action in dns_actions {
|
||||
mut p := action.params
|
||||
|
||||
match action.name {
|
||||
'a_record' {
|
||||
recordset.add_a(
|
||||
name: p.get('name')!
|
||||
ip: p.get('ip')!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'aaaa_record' {
|
||||
recordset.add_aaaa(
|
||||
name: p.get('name')!
|
||||
ip: p.get('ip')!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'mx_record' {
|
||||
recordset.add_mx(
|
||||
host: p.get('host')!
|
||||
preference: p.get_int_default('preference', 10)!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'txt_record' {
|
||||
recordset.add_txt(
|
||||
text: p.get('text')!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'srv_record' {
|
||||
recordset.add_srv(
|
||||
target: p.get('target')!
|
||||
port: p.get_int('port')!
|
||||
priority: p.get_int_default('priority', 10)!
|
||||
weight: p.get_int_default('weight', 100)!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'ns_record' {
|
||||
recordset.add_ns(
|
||||
host: p.get('host')!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
'soa_record' {
|
||||
recordset.set_soa(
|
||||
mbox: p.get('mbox')!
|
||||
ns: p.get('ns')!
|
||||
refresh: p.get_int_default('refresh', 44)!
|
||||
retry: p.get_int_default('retry', 55)!
|
||||
expire: p.get_int_default('expire', 66)!
|
||||
minttl: p.get_int_default('minttl', 100)!
|
||||
ttl: p.get_int_default('ttl', 300)!
|
||||
)
|
||||
}
|
||||
else {
|
||||
// Unknown action, skip
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return recordset
|
||||
}
|
||||
|
||||
// Example usage:
|
||||
/*
|
||||
!!dns.a_record
|
||||
name: 'host1'
|
||||
ip: '1.2.3.4'
|
||||
ttl: 300
|
||||
|
||||
!!dns.aaaa_record
|
||||
name: 'host1'
|
||||
ip: '2001:db8::1'
|
||||
ttl: 300
|
||||
|
||||
!!dns.mx_record
|
||||
host: 'mail.example.com'
|
||||
preference: 10
|
||||
ttl: 300
|
||||
|
||||
!!dns.txt_record
|
||||
text: 'v=spf1 mx ~all'
|
||||
ttl: 300
|
||||
|
||||
!!dns.srv_record
|
||||
target: 'sip.example.com'
|
||||
port: 5060
|
||||
priority: 10
|
||||
weight: 100
|
||||
ttl: 300
|
||||
|
||||
!!dns.ns_record
|
||||
host: 'ns1.example.com'
|
||||
ttl: 300
|
||||
|
||||
!!dns.soa_record
|
||||
mbox: 'hostmaster.example.com'
|
||||
ns: 'ns1.example.com'
|
||||
refresh: 44
|
||||
retry: 55
|
||||
expire: 66
|
||||
minttl: 100
|
||||
ttl: 300
|
||||
*/
|
||||
205
lib/osal/coredns/populator.v
Normal file
205
lib/osal/coredns/populator.v
Normal file
@@ -0,0 +1,205 @@
|
||||
module coredns
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
// new_dns_record_set creates a new DNSRecordSet
|
||||
pub fn new_dns_record_set() DNSRecordSet {
|
||||
return DNSRecordSet{
|
||||
srv: []SRVRecord{}
|
||||
txt: []TXTRecord{}
|
||||
mx: []MXRecord{}
|
||||
a: []ARecord{}
|
||||
aaaa: []AAAARecord{}
|
||||
ns: []NSRecord{}
|
||||
}
|
||||
}
|
||||
|
||||
// add_srv adds an SRV record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_srv(args SRVRecord) {
|
||||
rs.srv << SRVRecord{
|
||||
target: args.target
|
||||
port: args.port
|
||||
priority: args.priority
|
||||
weight: args.weight
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// add_txt adds a TXT record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_txt(args TXTRecord) {
|
||||
rs.txt << TXTRecord{
|
||||
text: args.text
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// add_mx adds an MX record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_mx(args MXRecord) {
|
||||
rs.mx << MXRecord{
|
||||
host: args.host
|
||||
preference: args.preference
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// add_a adds an A record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_a(args ARecord) {
|
||||
rs.a << ARecord{
|
||||
name: args.name
|
||||
ip: args.ip
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// add_aaaa adds an AAAA record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_aaaa(args AAAARecord) {
|
||||
rs.aaaa << AAAARecord{
|
||||
name: args.name
|
||||
ip: args.ip
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// add_ns adds an NS record to the set
|
||||
pub fn (mut rs DNSRecordSet) add_ns(args NSRecord) {
|
||||
rs.ns << NSRecord{
|
||||
host: args.host
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// set_soa sets the SOA record for the set
|
||||
pub fn (mut rs DNSRecordSet) set_soa(args SOARecord) {
|
||||
rs.soa = SOARecord{
|
||||
mbox: args.mbox
|
||||
ns: args.ns
|
||||
refresh: args.refresh
|
||||
retry: args.retry
|
||||
expire: args.expire
|
||||
minttl: args.minttl
|
||||
ttl: args.ttl
|
||||
}
|
||||
}
|
||||
|
||||
// populate_redis populates Redis with the DNS records
|
||||
//domain e.g. example.com. (not sure the . is at end)
|
||||
pub fn (rs DNSRecordSet) set(domain string) ! {
|
||||
mut redis := rs.redis or {redisclient.core_get()!}
|
||||
|
||||
// Store SRV records
|
||||
for srv in rs.srv {
|
||||
key := '_ssh._tcp.host1'
|
||||
value := json.encode({
|
||||
'srv': {
|
||||
'ttl': srv.ttl
|
||||
'target': srv.target
|
||||
'port': srv.port
|
||||
'priority': srv.priority
|
||||
'weight': srv.weight
|
||||
}
|
||||
})
|
||||
redis.hset(domain, key, value)!
|
||||
}
|
||||
|
||||
// Store TXT and MX records for wildcard
|
||||
if rs.txt.len > 0 || rs.mx.len > 0 {
|
||||
mut records := map[string]map[string]json.Any{}
|
||||
if rs.txt.len > 0 {
|
||||
records['txt'] = {
|
||||
'text': rs.txt[0].text
|
||||
'ttl': "${rs.txt[0].ttl}"
|
||||
}
|
||||
}
|
||||
if rs.mx.len > 0 {
|
||||
records['mx'] = {
|
||||
'host': rs.mx[0].host
|
||||
'priority': rs.mx[0].preference
|
||||
'ttl': rs.mx[0].ttl
|
||||
}
|
||||
}
|
||||
redis.hset(domain, '*', json.encode(records))!
|
||||
}
|
||||
|
||||
// Store A records
|
||||
for a in rs.a {
|
||||
value := json.encode({
|
||||
'a': {
|
||||
'ip4': a.ip
|
||||
'ttl': "${a.ttl}"
|
||||
}
|
||||
})
|
||||
redis.hset(domain, a.name, value)!
|
||||
}
|
||||
|
||||
// Store AAAA records
|
||||
for aaaa in rs.aaaa {
|
||||
value := json.encode({
|
||||
'aaaa': {
|
||||
'ip6': aaaa.ip
|
||||
'ttl': aaaa.ttl
|
||||
}
|
||||
})
|
||||
redis.hset(domain, aaaa.name, value)!
|
||||
}
|
||||
|
||||
// Store NS records
|
||||
if rs.ns.len > 0 {
|
||||
mut ns_records := []map[string]json.Any{}
|
||||
for ns in rs.ns {
|
||||
ns_records << {
|
||||
'host': ns.host
|
||||
'ttl': ns.ttl
|
||||
}
|
||||
}
|
||||
value := json.encode({
|
||||
'ns': ns_records
|
||||
})
|
||||
redis.hset(domain, 'subdel', value)!
|
||||
}
|
||||
|
||||
// Store SOA and root NS records at @
|
||||
if soa := rs.soa {
|
||||
mut root_records := map[string]json.Any{}
|
||||
root_records['soa'] = {
|
||||
'ttl': soa.ttl
|
||||
'minttl': soa.minttl
|
||||
'mbox': soa.mbox
|
||||
'ns': soa.ns
|
||||
'refresh': soa.refresh
|
||||
'retry': soa.retry
|
||||
'expire': soa.expire
|
||||
}
|
||||
|
||||
if rs.ns.len > 0 {
|
||||
mut ns_records := []map[string]json.Any{}
|
||||
for ns in rs.ns {
|
||||
ns_records << {
|
||||
'host': ns.host
|
||||
'ttl': ns.ttl
|
||||
}
|
||||
}
|
||||
root_records['ns'] = ns_records
|
||||
}
|
||||
|
||||
redis.hset(domain, '@', json.encode(root_records))!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut rs DNSRecordSet) example() ! {
|
||||
// Create and populate DNS records
|
||||
rs.set_soa(mbox: 'hostmaster.example.net.', ns: 'ns1.example.net.')
|
||||
rs.add_srv(target: 'tcp.example.com.', port: 123)
|
||||
rs.add_txt(text: 'this is a wildcard')
|
||||
rs.add_mx(host: 'host1.example.net.')
|
||||
rs.add_a(name: 'host1', ip: '5.5.5.5')
|
||||
rs.add_aaaa(name: 'host1', ip: '2001:db8::1')
|
||||
rs.add_txt(text: 'this is not a wildcard')
|
||||
rs.add_ns(host: 'ns1.subdel.example.net.')
|
||||
rs.add_ns(host: 'ns2.subdel.example.net.')
|
||||
rs.add_ns(host: 'ns1.example.net.')
|
||||
rs.add_ns(host: 'ns2.example.net.')
|
||||
|
||||
// Store records in Redis
|
||||
rs.set("example.com")!
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import net
|
||||
import time
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core
|
||||
import os
|
||||
|
||||
pub enum PingResult {
|
||||
ok
|
||||
@@ -100,10 +101,43 @@ pub fn tcp_port_test(args TcpPortTestArgs) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the ipaddress as known on the public side
|
||||
// is using resolver4.opendns.com
|
||||
// Returns the public IP address as known on the public side
|
||||
// Uses resolver4.opendns.com to fetch the IP address
|
||||
pub fn ipaddr_pub_get() !string {
|
||||
cmd := 'dig @resolver4.opendns.com myip.opendns.com +short'
|
||||
ipaddr := exec(cmd: cmd)!
|
||||
return ipaddr.output.trim('\n').trim(' \n')
|
||||
public_ip := ipaddr.output.trim('\n').trim(' \n')
|
||||
return public_ip
|
||||
}
|
||||
|
||||
//also check the address is on local interface
|
||||
pub fn ipaddr_pub_get_check() !string {
|
||||
// Check if the public IP matches any local interface
|
||||
public_ip := ipaddr_pub_get_check()!
|
||||
if !is_ip_on_local_interface(public_ip)! {
|
||||
return error('Public IP ${public_ip} is NOT bound to any local interface (possibly behind a NAT firewall).')
|
||||
}
|
||||
return public_ip
|
||||
}
|
||||
|
||||
// Check if the public IP matches any of the local network interfaces
|
||||
pub fn is_ip_on_local_interface(public_ip string) !bool {
|
||||
interfaces := exec(cmd: 'ip addr show', stdout: false) or {
|
||||
return error('Failed to enumerate network interfaces.')
|
||||
}
|
||||
lines := interfaces.output.split('\n')
|
||||
|
||||
// Parse through the `ip addr show` output to find local IPs
|
||||
for line in lines {
|
||||
if line.contains('inet ') {
|
||||
parts := line.trim_space().split(' ')
|
||||
if parts.len > 1 {
|
||||
local_ip := parts[1].split('/')[0] // Extract the IP address
|
||||
if public_ip == local_ip {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -53,9 +53,9 @@ pub fn package_install(name_ string) ! {
|
||||
platform_ := core.platform()!
|
||||
cpu := core.cputype()!
|
||||
|
||||
mut sudo_pre:=""
|
||||
mut sudo_pre := ''
|
||||
if core.sudo_required()! {
|
||||
sudo_pre="sudo "
|
||||
sudo_pre = 'sudo '
|
||||
}
|
||||
if platform_ == .osx {
|
||||
if cpu == .arm {
|
||||
@@ -68,10 +68,11 @@ pub fn package_install(name_ string) ! {
|
||||
}
|
||||
}
|
||||
} else if platform_ == .ubuntu {
|
||||
exec(cmd: 'export DEBIAN_FRONTEND=noninteractive && ${sudo_pre}apt install -y ${name} -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --allow-downgrades --allow-remove-essential --allow-change-held-packages')
|
||||
or { return error('could not install package on Ubuntu: ${name}\nerror:\n${err}')}
|
||||
exec(
|
||||
cmd: 'export DEBIAN_FRONTEND=noninteractive && ${sudo_pre}apt install -y ${name} -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --allow-downgrades --allow-remove-essential --allow-change-held-packages'
|
||||
) or { return error('could not install package on Ubuntu: ${name}\nerror:\n${err}') }
|
||||
} else if platform_ == .alpine {
|
||||
exec(cmd: "${sudo_pre}apk add ${name}") or {
|
||||
exec(cmd: '${sudo_pre}apk add ${name}') or {
|
||||
return error('could not install package on Alpine: ${name}\nerror:\n${err}')
|
||||
}
|
||||
} else if platform_ == .arch {
|
||||
|
||||
@@ -109,7 +109,7 @@ pub fn (mut sm StartupManager) new(args zinit.ZProcessNewArgs) ! {
|
||||
zinitfactory.new(args)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${mycat}')
|
||||
}
|
||||
}
|
||||
// if args.start {
|
||||
@@ -222,7 +222,7 @@ pub fn (mut sm StartupManager) delete(name string) ! {
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now ${mycat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -280,7 +280,7 @@ pub fn (mut sm StartupManager) status(name string) !ProcessStatus {
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now ${mycat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -303,7 +303,7 @@ pub fn (mut sm StartupManager) output(name string) !string {
|
||||
return systemd.journalctl(service: name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now ${mycat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -326,7 +326,7 @@ pub fn (mut sm StartupManager) exists(name string) !bool {
|
||||
return zinitfactory.exists(name)
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now')
|
||||
panic('to implement. startup manager only support screen & systemd for now ${mycat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -347,7 +347,7 @@ pub fn (mut sm StartupManager) list() ![]string {
|
||||
return zinitfactory.names()
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now')
|
||||
panic('to implement. startup manager only support screen & systemd for now: ${mycat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
124
lib/osal/traefik/README.md
Normal file
124
lib/osal/traefik/README.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Traefik Module
|
||||
|
||||
This module provides functionality to manage Traefik configurations using Redis as a Key-Value store provider.
|
||||
|
||||
## Overview
|
||||
|
||||
The module allows you to:
|
||||
- Define HTTP/HTTPS routes
|
||||
- Configure backend services
|
||||
- Set up middlewares
|
||||
- Manage TLS certificates
|
||||
- Store configurations in Redis using Traefik's KV store format
|
||||
|
||||
## Usage Example
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.lib.osal.traefik
|
||||
|
||||
fn main() ! {
|
||||
// Create a new Traefik configuration
|
||||
mut config := traefik.new_traefik_config()
|
||||
|
||||
// Add a router with a service
|
||||
config.add_route(
|
||||
name: 'my-router'
|
||||
rule: 'Host(`example.com`)'
|
||||
service: 'my-service'
|
||||
middlewares: ['auth']
|
||||
tls: true
|
||||
)
|
||||
|
||||
// Add the corresponding service
|
||||
config.add_service(
|
||||
name: 'my-service'
|
||||
load_balancer: traefik.LoadBalancerConfig{
|
||||
servers: [
|
||||
traefik.ServerConfig{url: 'http://localhost:8080'},
|
||||
traefik.ServerConfig{url: 'http://localhost:8081'}
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
// Add a basic auth middleware
|
||||
config.add_middleware(
|
||||
name: 'auth'
|
||||
typ: 'basicAuth'
|
||||
settings: {
|
||||
'users': '["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"]'
|
||||
}
|
||||
)
|
||||
|
||||
// Add TLS configuration
|
||||
config.add_tls(
|
||||
domain: 'example.com'
|
||||
cert_file: '/path/to/cert.pem'
|
||||
key_file: '/path/to/key.pem'
|
||||
)
|
||||
|
||||
// Store configuration in Redis
|
||||
config.set()!
|
||||
}
|
||||
```
|
||||
|
||||
## Redis Key Structure
|
||||
|
||||
The module uses the following Redis key structure as per Traefik's KV store specification:
|
||||
|
||||
- `traefik/http/routers/<name>/*` - Router configurations
|
||||
- `traefik/http/services/<name>/*` - Service configurations
|
||||
- `traefik/http/middlewares/<name>/*` - Middleware configurations
|
||||
- `traefik/tls/certificates` - TLS certificate configurations
|
||||
|
||||
## Configuration Types
|
||||
|
||||
### Router Configuration
|
||||
```v
|
||||
RouteConfig {
|
||||
name: string // Router name
|
||||
rule: string // Routing rule (e.g., "Host(`example.com`)")
|
||||
service: string // Service to forward to
|
||||
middlewares: []string // Middleware chain
|
||||
priority: int // Route priority
|
||||
tls: bool // Enable TLS
|
||||
}
|
||||
```
|
||||
|
||||
### Service Configuration
|
||||
```v
|
||||
ServiceConfig {
|
||||
name: string
|
||||
load_balancer: LoadBalancerConfig
|
||||
}
|
||||
|
||||
LoadBalancerConfig {
|
||||
servers: []ServerConfig
|
||||
}
|
||||
|
||||
ServerConfig {
|
||||
url: string
|
||||
}
|
||||
```
|
||||
|
||||
### Middleware Configuration
|
||||
```v
|
||||
MiddlewareConfig {
|
||||
name: string
|
||||
typ: string // Middleware type
|
||||
settings: map[string]string // Configuration settings
|
||||
}
|
||||
```
|
||||
|
||||
### TLS Configuration
|
||||
```v
|
||||
TLSConfig {
|
||||
domain: string // Domain name
|
||||
cert_file: string // Certificate file path
|
||||
key_file: string // Private key file path
|
||||
}
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [Traefik Redis Provider Documentation](https://doc.traefik.io/traefik/reference/install-configuration/providers/kv/redis/)
|
||||
- [Traefik KV Dynamic Configuration](https://doc.traefik.io/traefik/reference/dynamic-configuration/kv/)
|
||||
59
lib/osal/traefik/model.v
Normal file
59
lib/osal/traefik/model.v
Normal file
@@ -0,0 +1,59 @@
|
||||
module traefik
|
||||
|
||||
// Base configuration structs for Traefik components
|
||||
|
||||
@[params]
|
||||
struct RouteConfig {
|
||||
pub mut:
|
||||
name string @[required] // Name of the router
|
||||
rule string @[required] // Routing rule (e.g., "Host(`example.com`)")
|
||||
service string @[required] // Name of the service to forward to
|
||||
middlewares []string // List of middleware names to apply
|
||||
priority int = 0 // Route priority
|
||||
tls bool // Enable TLS for this router
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct ServiceConfig {
|
||||
pub mut:
|
||||
name string @[required] // Name of the service
|
||||
load_balancer LoadBalancerConfig @[required] // Load balancer configuration
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct LoadBalancerConfig {
|
||||
pub mut:
|
||||
servers []ServerConfig @[required] // List of backend servers
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct ServerConfig {
|
||||
pub mut:
|
||||
url string @[required] // URL of the backend server
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct MiddlewareConfig {
|
||||
pub mut:
|
||||
name string @[required] // Name of the middleware
|
||||
typ string @[required] // Type of middleware (e.g., "basicAuth", "stripPrefix")
|
||||
settings map[string]string // Middleware-specific settings
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct TLSConfig {
|
||||
pub mut:
|
||||
domain string @[required] // Domain for the certificate
|
||||
cert_file string @[required] // Path to certificate file
|
||||
key_file string @[required] // Path to private key file
|
||||
}
|
||||
|
||||
// TraefikConfig represents a complete Traefik configuration
|
||||
struct TraefikConfig {
|
||||
pub mut:
|
||||
routers []RouteConfig
|
||||
services []ServiceConfig
|
||||
middlewares []MiddlewareConfig
|
||||
tls []TLSConfig
|
||||
redis ?&redisclient.Redis
|
||||
}
|
||||
155
lib/osal/traefik/populator.v
Normal file
155
lib/osal/traefik/populator.v
Normal file
@@ -0,0 +1,155 @@
|
||||
module traefik
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
// new_traefik_config creates a new TraefikConfig
|
||||
pub fn new_traefik_config() TraefikConfig {
|
||||
return TraefikConfig{
|
||||
routers: []RouteConfig{}
|
||||
services: []ServiceConfig{}
|
||||
middlewares: []MiddlewareConfig{}
|
||||
tls: []TLSConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
// add_route adds a route configuration
|
||||
pub fn (mut tc TraefikConfig) add_route(args RouteConfig) {
|
||||
tc.routers << RouteConfig{
|
||||
name: args.name
|
||||
rule: args.rule
|
||||
service: args.service
|
||||
middlewares: args.middlewares
|
||||
priority: args.priority
|
||||
tls: args.tls
|
||||
}
|
||||
}
|
||||
|
||||
// add_service adds a service configuration
|
||||
pub fn (mut tc TraefikConfig) add_service(args ServiceConfig) {
|
||||
tc.services << ServiceConfig{
|
||||
name: args.name
|
||||
load_balancer: args.load_balancer
|
||||
}
|
||||
}
|
||||
|
||||
// add_middleware adds a middleware configuration
|
||||
pub fn (mut tc TraefikConfig) add_middleware(args MiddlewareConfig) {
|
||||
tc.middlewares << MiddlewareConfig{
|
||||
name: args.name
|
||||
typ: args.typ
|
||||
settings: args.settings
|
||||
}
|
||||
}
|
||||
|
||||
// add_tls adds a TLS configuration
|
||||
pub fn (mut tc TraefikConfig) add_tls(args TLSConfig) {
|
||||
tc.tls << TLSConfig{
|
||||
domain: args.domain
|
||||
cert_file: args.cert_file
|
||||
key_file: args.key_file
|
||||
}
|
||||
}
|
||||
|
||||
// set populates Redis with the Traefik configuration
|
||||
pub fn (tc TraefikConfig) set() ! {
|
||||
mut redis := tc.redis or { redisclient.core_get()! }
|
||||
|
||||
// Store router configurations
|
||||
for router in tc.routers {
|
||||
base_key := 'traefik/http/routers/${router.name}'
|
||||
|
||||
// Set router rule
|
||||
redis.set('${base_key}/rule', router.rule)!
|
||||
|
||||
// Set service
|
||||
redis.set('${base_key}/service', router.service)!
|
||||
|
||||
// Set middlewares if any
|
||||
if router.middlewares.len > 0 {
|
||||
redis.set('${base_key}/middlewares', json.encode(router.middlewares))!
|
||||
}
|
||||
|
||||
// Set priority if non-zero
|
||||
if router.priority != 0 {
|
||||
redis.set('${base_key}/priority', router.priority.str())!
|
||||
}
|
||||
|
||||
// Set TLS if enabled
|
||||
if router.tls {
|
||||
redis.set('${base_key}/tls', 'true')!
|
||||
}
|
||||
}
|
||||
|
||||
// Store service configurations
|
||||
for service in tc.services {
|
||||
base_key := 'traefik/http/services/${service.name}'
|
||||
|
||||
// Set load balancer servers
|
||||
mut servers := []map[string]string{}
|
||||
for server in service.load_balancer.servers {
|
||||
servers << {'url': server.url}
|
||||
}
|
||||
redis.set('${base_key}/loadbalancer/servers', json.encode(servers))!
|
||||
}
|
||||
|
||||
// Store middleware configurations
|
||||
for middleware in tc.middlewares {
|
||||
base_key := 'traefik/http/middlewares/${middleware.name}'
|
||||
|
||||
// Set middleware type
|
||||
redis.set('${base_key}/${middleware.typ}', json.encode(middleware.settings))!
|
||||
}
|
||||
|
||||
// Store TLS configurations
|
||||
for tls in tc.tls {
|
||||
base_key := 'traefik/tls/certificates'
|
||||
cert_config := {
|
||||
'certFile': tls.cert_file
|
||||
'keyFile': tls.key_file
|
||||
}
|
||||
redis.hset(base_key, tls.domain, json.encode(cert_config))!
|
||||
}
|
||||
}
|
||||
|
||||
// example shows how to use the Traefik configuration
|
||||
pub fn (mut tc TraefikConfig) example() ! {
|
||||
// Add a basic router with service
|
||||
tc.add_route(
|
||||
name: 'my-router'
|
||||
rule: 'Host(`example.com`)'
|
||||
service: 'my-service'
|
||||
middlewares: ['auth']
|
||||
tls: true
|
||||
)
|
||||
|
||||
// Add the corresponding service
|
||||
tc.add_service(
|
||||
name: 'my-service'
|
||||
load_balancer: LoadBalancerConfig{
|
||||
servers: [
|
||||
ServerConfig{url: 'http://localhost:8080'},
|
||||
ServerConfig{url: 'http://localhost:8081'}
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
// Add a basic auth middleware
|
||||
tc.add_middleware(
|
||||
name: 'auth'
|
||||
typ: 'basicAuth'
|
||||
settings: {
|
||||
'users': '["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"]'
|
||||
}
|
||||
)
|
||||
|
||||
// Add TLS configuration
|
||||
tc.add_tls(
|
||||
domain: 'example.com'
|
||||
cert_file: '/path/to/cert.pem'
|
||||
key_file: '/path/to/key.pem'
|
||||
)
|
||||
|
||||
// Store configuration in Redis
|
||||
tc.set()!
|
||||
}
|
||||
62
lib/osal/tun/readme.md
Normal file
62
lib/osal/tun/readme.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# TUN Interface Management
|
||||
|
||||
This module provides functionality to manage TUN (network tunnel) interfaces on Linux and macOS systems.
|
||||
|
||||
## Functions
|
||||
|
||||
### available() !bool
|
||||
Checks if TUN/TAP functionality is available on the system:
|
||||
- Linux: Verifies `/dev/net/tun` exists and is a character device
|
||||
- macOS: Checks for `utun` interfaces using `ifconfig` and `sysctl`
|
||||
|
||||
### free() !string
|
||||
Returns the name of an available TUN interface:
|
||||
- Linux: Returns first available interface from tun0-tun10
|
||||
- macOS: Returns next available utun interface number
|
||||
|
||||
## Example Usage
|
||||
|
||||
```v
|
||||
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.osal.tun
|
||||
|
||||
|
||||
// Check if TUN is available
|
||||
if available := tun.available() {
|
||||
if available {
|
||||
println('TUN is available on this system')
|
||||
|
||||
// Get a free TUN interface name
|
||||
if interface_name := tun.free() {
|
||||
println('Found free TUN interface: ${interface_name}')
|
||||
|
||||
// Example: Now you could use this interface name
|
||||
// to set up your tunnel
|
||||
} else {
|
||||
println('Error finding free interface: ${err}')
|
||||
}
|
||||
} else {
|
||||
println('TUN is not available on this system')
|
||||
}
|
||||
} else {
|
||||
println('Error checking TUN availability: ${err}')
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Platform Support
|
||||
|
||||
The module automatically detects the platform (Linux/macOS) and uses the appropriate methods:
|
||||
|
||||
- On Linux: Uses `/dev/net/tun` and `ip link` commands
|
||||
- On macOS: Uses `utun` interfaces via `ifconfig`
|
||||
|
||||
## Error Handling
|
||||
|
||||
Both functions return a Result type, so errors should be handled appropriately:
|
||||
- Unsupported platform errors
|
||||
- Interface availability errors
|
||||
- System command execution errors
|
||||
64
lib/osal/tun/tun.v
Normal file
64
lib/osal/tun/tun.v
Normal file
@@ -0,0 +1,64 @@
|
||||
module tun
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.core
|
||||
|
||||
// available checks if TUN/TAP is available on the system
|
||||
pub fn available() !bool {
|
||||
if core.is_linux()! {
|
||||
// Check if /dev/net/tun exists and is a character device
|
||||
if !os.exists('/dev/net/tun') {
|
||||
return false
|
||||
}
|
||||
// Try to get file info to verify it's a character device
|
||||
res := os.execute('test -c /dev/net/tun')
|
||||
return res.exit_code == 0
|
||||
} else if core.is_osx()! {
|
||||
// On macOS, check for utun interfaces
|
||||
res := os.execute('ifconfig | grep utun')
|
||||
if res.exit_code == 0 && res.output.len > 0 {
|
||||
return true
|
||||
}
|
||||
// Also try sysctl as alternative check
|
||||
res2 := os.execute('sysctl -a | grep net.inet.ip.tun')
|
||||
return res2.exit_code == 0 && res2.output.len > 0
|
||||
}
|
||||
return error('Unsupported platform')
|
||||
}
|
||||
|
||||
// free returns the name of an available TUN interface e.g. returns 'utun1'
|
||||
pub fn free() !string {
|
||||
if core.is_linux()! {
|
||||
// Try tun0 through tun10
|
||||
for i in 1 .. 11 {
|
||||
name := 'tun${i}'
|
||||
res := os.execute('ip link show ${name}')
|
||||
if res.exit_code != 0 {
|
||||
// Interface doesn't exist, so it's free
|
||||
return name
|
||||
}
|
||||
}
|
||||
return error('No free tun interface found')
|
||||
} else if core.is_osx()! {
|
||||
// On macOS, list existing utun interfaces to find highest number
|
||||
res := os.execute('ifconfig | grep utun')
|
||||
if res.exit_code != 0 {
|
||||
// No utun interfaces exist, so utun0 would be next
|
||||
return 'utun0'
|
||||
}
|
||||
// Find highest utun number
|
||||
mut max_num := -1
|
||||
lines := res.output.split('\n')
|
||||
for line in lines {
|
||||
if line.starts_with('utun') {
|
||||
mynum := line[4..].all_before(':').int()
|
||||
if mynum > max_num {
|
||||
max_num = mynum
|
||||
}
|
||||
}
|
||||
}
|
||||
// Next available number
|
||||
return 'utun${max_num + 1}'
|
||||
}
|
||||
return error('Unsupported platform')
|
||||
}
|
||||
38
lib/security/authentication/backend_memory.v
Normal file
38
lib/security/authentication/backend_memory.v
Normal file
@@ -0,0 +1,38 @@
|
||||
module authentication
|
||||
|
||||
import log
|
||||
|
||||
// // Creates and updates, authenticates email authentication sessions
|
||||
// @[noinit]
|
||||
// struct MemoryBackend {
|
||||
// mut:
|
||||
// sessions map[string]AuthSession
|
||||
// logger &log.Logger = &log.Logger(&log.Log{
|
||||
// level: .info
|
||||
// })
|
||||
// }
|
||||
|
||||
// // factory for
|
||||
// pub fn new_memory_backend() !MemoryBackend {
|
||||
// return MemoryBackend{}
|
||||
// }
|
||||
|
||||
// fn (mut backend MemoryBackend) create_auth_session(session AuthSession) ! {
|
||||
// backend.sessions[session.email] = session
|
||||
// }
|
||||
|
||||
// fn (backend MemoryBackend) read_auth_session(email string) ?AuthSession {
|
||||
// return backend.sessions[email] or { return none }
|
||||
// }
|
||||
|
||||
// fn (mut backend MemoryBackend) update_auth_session(session AuthSession) ! {
|
||||
// backend.sessions[session.email] = session
|
||||
// }
|
||||
|
||||
// fn (mut backend MemoryBackend) set_session_authenticated(email string) ! {
|
||||
// backend.sessions[email].authenticated = true
|
||||
// }
|
||||
|
||||
// fn (mut backend MemoryBackend) delete_auth_session(email string) ! {
|
||||
// backend.sessions.delete(email)
|
||||
// }
|
||||
@@ -109,7 +109,7 @@ pub fn (mut sm StartupManager) new(args zinit.ZProcessNewArgs) ! {
|
||||
zinitfactory.new(args)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${mycat}')
|
||||
}
|
||||
}
|
||||
// if args.start {
|
||||
@@ -141,7 +141,7 @@ pub fn (mut sm StartupManager) start(name string) ! {
|
||||
zinitfactory.start(name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now')
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -171,7 +171,7 @@ pub fn (mut sm StartupManager) stop(name string) ! {
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now')
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -195,7 +195,7 @@ pub fn (mut sm StartupManager) restart(name string) ! {
|
||||
zinitfactory.start(name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now')
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -222,7 +222,7 @@ pub fn (mut sm StartupManager) delete(name string) ! {
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -280,7 +280,7 @@ pub fn (mut sm StartupManager) status(name string) !ProcessStatus {
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -326,7 +326,7 @@ pub fn (mut sm StartupManager) exists(name string) !bool {
|
||||
return zinitfactory.exists(name)
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now')
|
||||
panic('to implement. startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -347,7 +347,7 @@ pub fn (mut sm StartupManager) list() ![]string {
|
||||
return zinitfactory.names()
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now')
|
||||
panic('to implement. startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ pub fn (mut self Builder) inspect() !BuilderInfo {
|
||||
return r
|
||||
}
|
||||
|
||||
// mount the build container to a path and return
|
||||
// mount the build container to a path and return the path where its mounted
|
||||
pub fn (mut self Builder) mount_to_path() !string {
|
||||
cmd := 'buildah mount ${self.containername}'
|
||||
out := osal.execute_silent(cmd)!
|
||||
@@ -227,7 +227,7 @@ pub fn (mut self Builder) commit(image_name string) ! {
|
||||
}
|
||||
|
||||
pub fn (self Builder) set_entrypoint(entrypoint string) ! {
|
||||
cmd := 'buildah config --entrypoint ${entrypoint} ${self.containername}'
|
||||
cmd := 'buildah config --entrypoint \'${entrypoint}\' ${self.containername}'
|
||||
osal.exec(cmd: cmd)!
|
||||
}
|
||||
|
||||
|
||||
22
lib/virt/herocontainers/builder_solutions.v
Normal file
22
lib/virt/herocontainers/builder_solutions.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module herocontainers
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.installers.lang.herolib
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
import json
|
||||
|
||||
// copies the hero from host into guest
|
||||
pub fn (mut self Builder) install_zinit() ! {
|
||||
self.run(
|
||||
cmd: '
|
||||
wget https://github.com/threefoldtech/zinit/releases/download/v0.2.5/zinit -O /sbin/zinit
|
||||
chmod +x /sbin/zinit
|
||||
touch /etc/environment
|
||||
mkdir -p /etc/zinit/
|
||||
'
|
||||
)!
|
||||
|
||||
self.set_entrypoint('/sbin/zinit init --container')!
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import os
|
||||
|
||||
@[params]
|
||||
pub struct GetArgs {
|
||||
pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
@@ -13,16 +14,14 @@ pub struct GetArgs {
|
||||
pub fn (mut e CEngine) builder_base(args GetArgs) !Builder {
|
||||
name := 'base'
|
||||
if !args.reset && e.builder_exists(name)! {
|
||||
|
||||
return e.builder_get(name)!
|
||||
}
|
||||
console.print_header('buildah base build')
|
||||
|
||||
|
||||
mut builder := e.builder_new(name: name, from: 'scratch', delete: true)!
|
||||
mount_path := builder.mount_to_path()!
|
||||
if mount_path.len<4{
|
||||
return error("mount_path needs to be +4 chars")
|
||||
if mount_path.len < 4 {
|
||||
return error('mount_path needs to be +4 chars')
|
||||
}
|
||||
osal.exec(
|
||||
cmd: '
|
||||
@@ -51,7 +50,7 @@ pub fn (mut e CEngine) builder_base(args GetArgs) !Builder {
|
||||
|
||||
# Install required packages
|
||||
echo "Installing essential packages..."
|
||||
chroot \${MOUNT_PATH} apt-get install -yq screen bash coreutils curl mc unzip sudo which openssh-client openssh-server redis
|
||||
chroot \${MOUNT_PATH} apt-get install -yq screen bash coreutils curl mc unzip sudo which openssh-client openssh-server redis wget
|
||||
|
||||
echo "Cleaning up..."
|
||||
umount "\${MOUNT_PATH}/dev" || true
|
||||
@@ -60,6 +59,7 @@ pub fn (mut e CEngine) builder_base(args GetArgs) !Builder {
|
||||
|
||||
'
|
||||
)!
|
||||
builder.install_zinit()!
|
||||
// builder.set_entrypoint('redis-server')!
|
||||
builder.commit('localhost/${name}')!
|
||||
return builder
|
||||
|
||||
@@ -17,10 +17,9 @@ fn (mut e CEngine) builders_load() ! {
|
||||
@[params]
|
||||
pub struct BuilderNewArgs {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
from string = 'docker.io/archlinux:latest'
|
||||
// arch_scratch bool // means start from scratch with arch linux
|
||||
delete bool = true
|
||||
name string = 'default'
|
||||
from string = 'docker.io/ubuntu:latest'
|
||||
delete bool = true
|
||||
}
|
||||
|
||||
pub fn (mut e CEngine) builder_new(args_ BuilderNewArgs) !Builder {
|
||||
@@ -66,14 +65,14 @@ pub fn (mut e CEngine) builder_get(name string) !Builder {
|
||||
}
|
||||
|
||||
pub fn (mut e CEngine) builders_delete_all() ! {
|
||||
console.print_debug("remove all")
|
||||
console.print_debug('remove all')
|
||||
osal.execute_stdout('buildah rm -a')!
|
||||
e.builders_load()!
|
||||
}
|
||||
|
||||
pub fn (mut e CEngine) builder_delete(name string) ! {
|
||||
if e.builder_exists(name)! {
|
||||
console.print_debug("remove ${name}")
|
||||
console.print_debug('remove ${name}')
|
||||
osal.execute_stdout('buildah rm ${name}')!
|
||||
e.builders_load()!
|
||||
}
|
||||
|
||||
@@ -28,37 +28,6 @@ pub mut:
|
||||
command string
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ContainerCreateArgs {
|
||||
name string
|
||||
hostname string
|
||||
forwarded_ports []string // ["80:9000/tcp", "1000, 10000/udp"]
|
||||
mounted_volumes []string // ["/root:/root", ]
|
||||
env map[string]string // map of environment variables that will be passed to the container
|
||||
privileged bool
|
||||
remove_when_done bool = true // remove the container when it shuts down
|
||||
pub mut:
|
||||
image_repo string
|
||||
image_tag string
|
||||
command string = '/bin/bash'
|
||||
}
|
||||
|
||||
// TODO: implement
|
||||
|
||||
// import a container into an image, run podman container with it
|
||||
// image_repo examples ['myimage', 'myimage:latest']
|
||||
// if ContainerCreateArgs contains a name, container will be created and restarted
|
||||
// pub fn (mut e CEngine) container_import(path string, mut args ContainerCreateArgs) !&Container {
|
||||
// mut image := args.image_repo
|
||||
// if args.image_tag != '' {
|
||||
// image = image + ':${args.image_tag}'
|
||||
// }
|
||||
|
||||
// exec(cmd: 'herocontainers import ${path} ${image}', stdout: false)!
|
||||
// // make sure we start from loaded image
|
||||
// return e.container_create(args)
|
||||
// }
|
||||
|
||||
// create/start container (first need to get a herocontainerscontainer before we can start)
|
||||
pub fn (mut container Container) start() ! {
|
||||
exec(cmd: 'podman start ${container.id}')!
|
||||
|
||||
218
lib/virt/herocontainers/container_create.v
Normal file
218
lib/virt/herocontainers/container_create.v
Normal file
@@ -0,0 +1,218 @@
|
||||
module herocontainers
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.osal { exec }
|
||||
import freeflowuniverse.herolib.data.ipaddress
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.virt.utils
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
// info see https://docs.podman.io/en/latest/markdown/podman-run.1.html
|
||||
|
||||
@[params]
|
||||
pub struct ContainerCreateArgs {
|
||||
name string
|
||||
hostname string
|
||||
forwarded_ports []string // ["80:9000/tcp", "1000, 10000/udp"]
|
||||
mounted_volumes []string // ["/root:/root", ]
|
||||
env map[string]string // map of environment variables that will be passed to the container
|
||||
privileged bool
|
||||
remove_when_done bool = true // remove the container when it shuts down
|
||||
// Resource limits
|
||||
memory string // Memory limit (e.g. "100m", "2g")
|
||||
memory_reservation string // Memory soft limit
|
||||
memory_swap string // Memory + swap limit
|
||||
cpus f64 // Number of CPUs (e.g. 1.5)
|
||||
cpu_shares int // CPU shares (relative weight)
|
||||
cpu_period int // CPU CFS period in microseconds (default: 100000)
|
||||
cpu_quota int // CPU CFS quota in microseconds (e.g. 50000 for 0.5 CPU)
|
||||
cpuset_cpus string // CPUs in which to allow execution (e.g. "0-3", "1,3")
|
||||
// Network configuration
|
||||
network string // Network mode (bridge, host, none, container:id)
|
||||
network_aliases []string // Add network-scoped aliases
|
||||
exposed_ports []string // Ports to expose without publishing (e.g. "80/tcp", "53/udp")
|
||||
// DNS configuration
|
||||
dns_servers []string // Set custom DNS servers
|
||||
dns_options []string // Set custom DNS options
|
||||
dns_search []string // Set custom DNS search domains
|
||||
// Device configuration
|
||||
devices []string // Host devices to add (e.g. "/dev/sdc:/dev/xvdc:rwm")
|
||||
device_cgroup_rules []string // Add rules to cgroup allowed devices list
|
||||
// Runtime configuration
|
||||
detach bool = true // Run container in background
|
||||
attach []string // Attach to STDIN, STDOUT, and/or STDERR
|
||||
interactive bool // Keep STDIN open even if not attached (-i)
|
||||
// Storage configuration
|
||||
rootfs string // Use directory as container's root filesystem
|
||||
mounts []string // Mount filesystem (type=bind,src=,dst=,etc)
|
||||
volumes []string // Bind mount a volume (alternative to mounted_volumes)
|
||||
published_ports []string // Publish container ports to host (alternative to forwarded_ports)
|
||||
pub mut:
|
||||
image_repo string
|
||||
image_tag string
|
||||
command string = '/bin/bash'
|
||||
}
|
||||
|
||||
// create a new container from an image
|
||||
pub fn (mut e CEngine) container_create(args_ ContainerCreateArgs) !&Container {
|
||||
mut args := args_
|
||||
|
||||
mut cmd := 'podman run --systemd=false'
|
||||
|
||||
// Handle detach/attach options
|
||||
if args.detach {
|
||||
cmd += ' -d'
|
||||
}
|
||||
for stream in args.attach {
|
||||
cmd += ' -a ${stream}'
|
||||
}
|
||||
|
||||
if args.name != '' {
|
||||
cmd += ' --name ${texttools.name_fix(args.name)}'
|
||||
}
|
||||
|
||||
if args.hostname != '' {
|
||||
cmd += ' --hostname ${args.hostname}'
|
||||
}
|
||||
|
||||
if args.privileged {
|
||||
cmd += ' --privileged'
|
||||
}
|
||||
|
||||
if args.remove_when_done {
|
||||
cmd += ' --rm'
|
||||
}
|
||||
|
||||
// Handle interactive mode
|
||||
if args.interactive {
|
||||
cmd += ' -i'
|
||||
}
|
||||
|
||||
// Handle rootfs
|
||||
if args.rootfs != '' {
|
||||
cmd += ' --rootfs ${args.rootfs}'
|
||||
}
|
||||
|
||||
// Add mount points
|
||||
for mount in args.mounts {
|
||||
cmd += ' --mount ${mount}'
|
||||
}
|
||||
|
||||
// Add volumes (--volume syntax)
|
||||
for volume in args.volumes {
|
||||
cmd += ' --volume ${volume}'
|
||||
}
|
||||
|
||||
// Add published ports (--publish syntax)
|
||||
for port in args.published_ports {
|
||||
cmd += ' --publish ${port}'
|
||||
}
|
||||
|
||||
// Add resource limits
|
||||
if args.memory != '' {
|
||||
cmd += ' --memory ${args.memory}'
|
||||
}
|
||||
|
||||
if args.memory_reservation != '' {
|
||||
cmd += ' --memory-reservation ${args.memory_reservation}'
|
||||
}
|
||||
|
||||
if args.memory_swap != '' {
|
||||
cmd += ' --memory-swap ${args.memory_swap}'
|
||||
}
|
||||
|
||||
if args.cpus > 0 {
|
||||
cmd += ' --cpus ${args.cpus}'
|
||||
}
|
||||
|
||||
if args.cpu_shares > 0 {
|
||||
cmd += ' --cpu-shares ${args.cpu_shares}'
|
||||
}
|
||||
|
||||
if args.cpu_period > 0 {
|
||||
cmd += ' --cpu-period ${args.cpu_period}'
|
||||
}
|
||||
|
||||
if args.cpu_quota > 0 {
|
||||
cmd += ' --cpu-quota ${args.cpu_quota}'
|
||||
}
|
||||
|
||||
if args.cpuset_cpus != '' {
|
||||
cmd += ' --cpuset-cpus ${args.cpuset_cpus}'
|
||||
}
|
||||
|
||||
// Add network configuration
|
||||
if args.network != '' {
|
||||
cmd += ' --network ${args.network}'
|
||||
}
|
||||
|
||||
// Add network aliases
|
||||
for alias in args.network_aliases {
|
||||
cmd += ' --network-alias ${alias}'
|
||||
}
|
||||
|
||||
// Add exposed ports
|
||||
for port in args.exposed_ports {
|
||||
cmd += ' --expose ${port}'
|
||||
}
|
||||
|
||||
// Add devices
|
||||
for device in args.devices {
|
||||
cmd += ' --device ${device}'
|
||||
}
|
||||
|
||||
// Add device cgroup rules
|
||||
for rule in args.device_cgroup_rules {
|
||||
cmd += ' --device-cgroup-rule ${rule}'
|
||||
}
|
||||
|
||||
// Add DNS configuration
|
||||
for server in args.dns_servers {
|
||||
cmd += ' --dns ${server}'
|
||||
}
|
||||
|
||||
for opt in args.dns_options {
|
||||
cmd += ' --dns-option ${opt}'
|
||||
}
|
||||
|
||||
for search in args.dns_search {
|
||||
cmd += ' --dns-search ${search}'
|
||||
}
|
||||
|
||||
// Add port forwarding
|
||||
for port in args.forwarded_ports {
|
||||
cmd += ' -p ${port}'
|
||||
}
|
||||
|
||||
// Add volume mounts
|
||||
for volume in args.mounted_volumes {
|
||||
cmd += ' -v ${volume}'
|
||||
}
|
||||
|
||||
// Add environment variables
|
||||
for key, value in args.env {
|
||||
cmd += ' -e ${key}=${value}'
|
||||
}
|
||||
|
||||
// Add image name and tag
|
||||
mut image_name := args.image_repo
|
||||
if args.image_tag != '' {
|
||||
image_name += ':${args.image_tag}'
|
||||
}
|
||||
cmd += ' ${image_name}'
|
||||
|
||||
// Add command if specified
|
||||
if args.command != '' {
|
||||
cmd += ' ${args.command}'
|
||||
}
|
||||
|
||||
// Create the container
|
||||
mut ljob := exec(cmd: cmd, stdout: false)!
|
||||
container_id := ljob.output.trim_space()
|
||||
|
||||
// Reload containers to get the new one
|
||||
e.load()!
|
||||
|
||||
// Return the newly created container
|
||||
return e.container_get(name: args.name, id: container_id)!
|
||||
}
|
||||
@@ -62,8 +62,53 @@ buildah run --terminal --env TERM=xterm base_go_rust /bin/bash
|
||||
to check inside the container about diskusage
|
||||
|
||||
```bash
|
||||
pacman -Su ncdu
|
||||
apt install ncdu
|
||||
ncdu
|
||||
```
|
||||
|
||||
## create container
|
||||
|
||||
|
||||
```go
|
||||
import freeflowuniverse.herolib.virt.herocontainers
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.builder
|
||||
|
||||
//interative means will ask for login/passwd
|
||||
|
||||
console.print_header("Get a container.")
|
||||
|
||||
mut e:=herocontainers.new()!
|
||||
|
||||
//info see https://docs.podman.io/en/latest/markdown/podman-run.1.html
|
||||
|
||||
mut c:=e.container_create(
|
||||
name: 'mycontainer'
|
||||
image_repo: 'ubuntu'
|
||||
// Resource limits
|
||||
memory: '1g'
|
||||
cpus: 0.5
|
||||
// Network config
|
||||
network: 'bridge'
|
||||
network_aliases: ['myapp', 'api']
|
||||
// DNS config
|
||||
dns_servers: ['8.8.8.8', '8.8.4.4']
|
||||
dns_search: ['example.com']
|
||||
interactive: true // Keep STDIN open
|
||||
mounts: [
|
||||
'type=bind,src=/data,dst=/container/data,ro=true'
|
||||
]
|
||||
volumes: [
|
||||
'/config:/etc/myapp:ro'
|
||||
]
|
||||
published_ports: [
|
||||
'127.0.0.1:8080:80'
|
||||
]
|
||||
)!
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -42,8 +42,8 @@ pub mut:
|
||||
base_url string @[json: 'baseUrl']
|
||||
image string
|
||||
metadata MainMetadata
|
||||
build_dest string @[json: 'buildDest']
|
||||
build_dest_dev string @[json: 'buildDestDev']
|
||||
build_dest []string @[json: 'buildDest']
|
||||
build_dest_dev []string @[json: 'buildDestDev']
|
||||
}
|
||||
|
||||
// Navbar config structures
|
||||
@@ -80,8 +80,37 @@ pub fn load_config(cfg_dir string) !Config {
|
||||
footer := json.decode(Footer, footer_content)!
|
||||
|
||||
// Load and parse main config
|
||||
main_content := os.read_file(os.join_path(cfg_dir, 'main.json'))!
|
||||
main := json.decode(Main, main_content)!
|
||||
main_config_path := os.join_path(cfg_dir, 'main.json')
|
||||
main_content := os.read_file(main_config_path)!
|
||||
main := json.decode(Main, main_content) or {
|
||||
eprintln("${main_config_path} is not in the right format please fix.")
|
||||
println('
|
||||
|
||||
## EXAMPLE OF A GOOD ONE:
|
||||
|
||||
- note the list for buildDest and buildDestDev
|
||||
- note its the full path where the html is pushed too
|
||||
|
||||
{
|
||||
"title": "ThreeFold Web4",
|
||||
"tagline": "ThreeFold Web4",
|
||||
"favicon": "img/favicon.png",
|
||||
"url": "https://docs.threefold.io",
|
||||
"url_home": "docs/introduction",
|
||||
"baseUrl": "/",
|
||||
"image": "img/tf_graph.png",
|
||||
"metadata": {
|
||||
"description": "ThreeFold is laying the foundation for a geo aware Web 4, the next generation of the Internet.",
|
||||
"image": "https://threefold.info/kristof/img/tf_graph.png",
|
||||
"title": "ThreeFold Docs"
|
||||
},
|
||||
"buildDest":["root@info.ourworld.tf:/root/hero/www/info/tfgrid4"],
|
||||
"buildDestDev":["root@info.ourworld.tf:/root/hero/www/infodev/tfgrid4"]
|
||||
|
||||
}
|
||||
')
|
||||
exit(99)
|
||||
}
|
||||
|
||||
// Load and parse navbar config
|
||||
navbar_content := os.read_file(os.join_path(cfg_dir, 'navbar.json'))!
|
||||
|
||||
@@ -30,24 +30,12 @@ pub mut:
|
||||
nameshort string
|
||||
path string
|
||||
url string
|
||||
// publish_path string
|
||||
publish_path string
|
||||
build_path string
|
||||
production bool
|
||||
watch_changes bool = true
|
||||
update bool
|
||||
}
|
||||
|
||||
pub fn (mut f DocusaurusFactory) build_dev(args_ DSiteNewArgs) !&DocSite {
|
||||
mut s := f.add(args_)!
|
||||
s.generate()!
|
||||
osal.exec(
|
||||
cmd: '
|
||||
cd ${s.path_build.path}
|
||||
bash build_dev.sh
|
||||
'
|
||||
retry: 0
|
||||
)!
|
||||
return s
|
||||
deploykey string
|
||||
}
|
||||
|
||||
pub fn (mut f DocusaurusFactory) build(args_ DSiteNewArgs) !&DocSite {
|
||||
@@ -63,6 +51,35 @@ pub fn (mut f DocusaurusFactory) build(args_ DSiteNewArgs) !&DocSite {
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
pub fn (mut f DocusaurusFactory) build_dev_publish(args_ DSiteNewArgs) !&DocSite {
|
||||
mut s := f.add(args_)!
|
||||
s.generate()!
|
||||
osal.exec(
|
||||
cmd: '
|
||||
cd ${s.path_build.path}
|
||||
bash build_dev_publish.sh
|
||||
'
|
||||
retry: 0
|
||||
)!
|
||||
return s
|
||||
}
|
||||
|
||||
pub fn (mut f DocusaurusFactory) build_publish(args_ DSiteNewArgs) !&DocSite {
|
||||
mut s := f.add(args_)!
|
||||
s.generate()!
|
||||
|
||||
|
||||
osal.exec(
|
||||
cmd: '
|
||||
cd ${s.path_build.path}
|
||||
bash build_publish.sh
|
||||
'
|
||||
retry: 0
|
||||
)!
|
||||
return s
|
||||
}
|
||||
|
||||
pub fn (mut f DocusaurusFactory) dev(args_ DSiteNewArgs) !&DocSite {
|
||||
mut s := f.add(args_)!
|
||||
|
||||
@@ -126,8 +143,10 @@ pub fn (mut f DocusaurusFactory) add(args_ DSiteNewArgs) !&DocSite {
|
||||
// if args.publish_path.len == 0 {
|
||||
// args.publish_path = '${f.path_publish.path}/${args.name}'
|
||||
|
||||
// coderoot:"${os.home_dir()}/hero/var/publishcode"
|
||||
mut gs := gittools.new(ssh_key_path:args.deploykey)!
|
||||
|
||||
if args.url.len > 0 {
|
||||
mut gs := gittools.new()!
|
||||
args.path = gs.get_path(url: args.url)!
|
||||
}
|
||||
|
||||
@@ -135,7 +154,6 @@ pub fn (mut f DocusaurusFactory) add(args_ DSiteNewArgs) !&DocSite {
|
||||
return error("Can't get path from docusaurus site, its not specified.")
|
||||
}
|
||||
|
||||
mut gs := gittools.new()!
|
||||
mut r := gs.get_repo(
|
||||
url: 'https://github.com/freeflowuniverse/docusaurus_template.git'
|
||||
pull: args.update
|
||||
@@ -204,6 +222,7 @@ pub fn (mut site DocSite) error(args ErrorArgs) {
|
||||
|
||||
pub fn (mut site DocSite) generate() ! {
|
||||
console.print_header(' site generate: ${site.name} on ${site.path_build.path}')
|
||||
console.print_header(' site source on ${site.path_src.path}')
|
||||
site.template_install()!
|
||||
// osal.exec(
|
||||
// cmd: '
|
||||
@@ -258,9 +277,12 @@ fn (mut site DocSite) template_install() ! {
|
||||
|
||||
cfg := site.config
|
||||
|
||||
profile_include := osal.profile_path_source()!
|
||||
|
||||
develop := $tmpl('templates/develop.sh')
|
||||
build := $tmpl('templates/build.sh')
|
||||
build_dev := $tmpl('templates/build_dev.sh')
|
||||
build_dev_publish := $tmpl('templates/build_dev_publish.sh')
|
||||
build_publish := $tmpl('templates/build_publish.sh')
|
||||
|
||||
mut develop_ := site.path_build.file_get_new('develop.sh')!
|
||||
develop_.template_write(develop, true)!
|
||||
@@ -270,9 +292,13 @@ fn (mut site DocSite) template_install() ! {
|
||||
build_.template_write(build, true)!
|
||||
build_.chmod(0o700)!
|
||||
|
||||
mut build_dev_ := site.path_build.file_get_new('build_dev.sh')!
|
||||
build_dev_.template_write(build_dev, true)!
|
||||
build_dev_.chmod(0o700)!
|
||||
mut build_publish_ := site.path_build.file_get_new('build_publish.sh')!
|
||||
build_publish_.template_write(build_publish, true)!
|
||||
build_publish_.chmod(0o700)!
|
||||
|
||||
mut build_dev_publish_ := site.path_build.file_get_new('build_dev_publish.sh')!
|
||||
build_dev_publish_.template_write(build_dev_publish, true)!
|
||||
build_dev_publish_.chmod(0o700)!
|
||||
|
||||
mut develop2_ := site.path_src.file_get_new('develop.sh')!
|
||||
develop2_.template_write(develop, true)!
|
||||
@@ -282,7 +308,5 @@ fn (mut site DocSite) template_install() ! {
|
||||
build2_.template_write(build, true)!
|
||||
build2_.chmod(0o700)!
|
||||
|
||||
mut build_dev2_ := site.path_src.file_get_new('build_dev.sh')!
|
||||
build_dev2_.template_write(build_dev, true)!
|
||||
build_dev2_.chmod(0o700)!
|
||||
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ module docusaurus
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.installers.web.bun
|
||||
import os
|
||||
|
||||
fn (mut site DocusaurusFactory) template_install(update bool) ! {
|
||||
mut gs := gittools.new()!
|
||||
@@ -24,6 +25,8 @@ fn (mut site DocusaurusFactory) template_install(update bool) ! {
|
||||
|
||||
osal.exec(
|
||||
cmd: '
|
||||
${osal.profile_path_source_and()!}
|
||||
export PATH=/tmp/docusaurus_build/node_modules/.bin:${os.home_dir()}/.bun/bin/:??PATH
|
||||
cd ${site.path_build.path}
|
||||
bun install
|
||||
'
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -ex
|
||||
|
||||
script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "??{script_dir}"
|
||||
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "???script_dir}"
|
||||
|
||||
echo "Docs directory: ??script_dir"
|
||||
|
||||
cd ${site.path_build.path}
|
||||
|
||||
export PATH=/tmp/docusaurus_build/node_modules/.bin:??PATH
|
||||
export PATH=/tmp/docusaurus_build/node_modules/.bin:??{HOME}/.bun/bin/:??PATH
|
||||
|
||||
rm -rf ${site.path_build.path}/build/
|
||||
|
||||
${profile_include}
|
||||
|
||||
bun docusaurus build
|
||||
|
||||
rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest.trim_right("/")}/${cfg.main.name.trim_right("/")}/
|
||||
mkdir -p ${site.args.publish_path.trim_right("/")}
|
||||
echo SYNC TO ${site.args.publish_path.trim_right("/")}
|
||||
rsync -rv --delete ${site.path_build.path}/build/ ${site.args.publish_path.trim_right("/")}/
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "??{script_dir}"
|
||||
|
||||
|
||||
echo "Docs directory: ??script_dir"
|
||||
|
||||
cd ${site.path_build.path}
|
||||
|
||||
export PATH=/tmp/docusaurus_build/node_modules/.bin:??PATH
|
||||
|
||||
rm -rf ${site.path_build.path}/build/
|
||||
|
||||
bun docusaurus build
|
||||
|
||||
rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest_dev.trim_right("/")}/${cfg.main.name.trim_right("/")}/
|
||||
23
lib/web/docusaurus/templates/build_dev_publish.sh
Executable file
23
lib/web/docusaurus/templates/build_dev_publish.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "??{script_dir}"
|
||||
|
||||
|
||||
echo "Docs directory: ??script_dir"
|
||||
|
||||
cd ${site.path_build.path}
|
||||
|
||||
export PATH=/tmp/docusaurus_build/node_modules/.bin:??{HOME}/.bun/bin/:??PATH
|
||||
|
||||
rm -rf ${site.path_build.path}/build/
|
||||
|
||||
${profile_include}
|
||||
|
||||
bun docusaurus build
|
||||
|
||||
@for dest in cfg.main.build_dest_dev
|
||||
rsync -rv --delete ${site.path_build.path}/build/ ${dest.trim_right("/")}/
|
||||
@end
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user