Merge branch 'development' into development_installers

This commit is contained in:
2025-02-19 05:08:41 +03:00
56 changed files with 1639 additions and 416 deletions

2
cli/.gitignore vendored
View File

@@ -1 +1,3 @@
hero
compile
compile_upload

View File

@@ -51,7 +51,7 @@ fn do() ! {
mut cmd := Command{
name: 'hero'
description: 'Your HERO toolset.'
version: '1.0.7'
version: '1.0.13'
}
// herocmds.cmd_run_add_flags(mut cmd)

View File

@@ -0,0 +1,2 @@
bizmodel
dest

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env -S v -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
//#!/usr/bin/env -S v -cg -enable-globals run
import freeflowuniverse.herolib.data.doctree
import freeflowuniverse.herolib.biz.bizmodel
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.core.playcmds
import freeflowuniverse.herolib.web.mdbook
import os
const wikipath = os.dir(@FILE) + '/wiki'
const summarypath = os.dir(@FILE) + '/wiki/summary.md'
// execute the actions so we have the info populated
// mut plb:=playbook.new(path: wikipath)!
// playcmds.run(mut plb,false)!
buildpath := '${os.home_dir()}/hero/var/mdbuild/bizmodel'
// just run the doctree & mdbook and it should
// load the doctree, these are all collections
mut tree := doctree.new(name: 'bizmodel')!
tree.scan(path: wikipath)!
tree.export(dest: buildpath, reset: true)!
// mut bm:=bizmodel.get("test")!
// println(bm)
mut mdbooks := mdbook.get()!
mdbooks.generate(
name: 'bizmodel'
summary_path: summarypath
doctree_path: buildpath
title: 'bizmodel example'
)!
mdbook.book_open('bizmodel')!

View File

@@ -0,0 +1,10 @@
## Loader instructions
this will make sure we load the appropriate biz model
```js
!!bizmodel.load name:'default' url:'https://github.com/freeflowuniverse/herolib/tree/development/bizmodel/example/data'
```

View File

@@ -0,0 +1 @@
name:bizmodel_example

View File

@@ -0,0 +1,10 @@
![](img/ms1bmodel.png)
# bizmodel
OurWorld has developed a tool to generate and keep business models up to date.
Our aim is to make it easy for ourworld to track changes in planning over the multiple projects and even be able to aggregated them. Because the input for such a plan is text (as you can see in this ebook) its easy to see how the modelling and parameters change over time.
This is a very flexible tool which will be extended for budgetting, cashflow management, shareholder tables, ...

View File

@@ -0,0 +1,4 @@
# Debug
Some tools and info to help debug the bizmodel simulator.

View File

@@ -0,0 +1,8 @@
# Hr Overview
!!!bizmodel.employees_wiki bizname:'test'
> note: Nr People like 0:5,20:5 means, month 0 (start) is 5, month 20 its 5 people

View File

@@ -0,0 +1,5 @@
# CTO
!!!bizmodel.employee_wiki bizname:'test' name:'despiegk'
!!wiki.include page:cto_description.md

View File

@@ -0,0 +1,3 @@
## CTO Description
this is a page to test nested includes

View File

@@ -0,0 +1 @@
ms1bmodel.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@@ -0,0 +1,52 @@
# This is our business model planner
## P&L Overview
<!-- period is in months, 3 means every quarter -->
!!!spreadsheet.graph_bar_row rowname:revenue_total unit:million title:'A Title' title_sub:'Sub' sheetname:'bizmodel_test'
Unit is in Million USD.
!!!spreadsheet.graph_bar_row rowname:revenue_total unit:million sheetname:'bizmodel_test'
!!!spreadsheet.graph_line_row rowname:revenue_total unit:million sheetname:'bizmodel_test'
!!!spreadsheet.graph_pie_row rowname:revenue_total unit:million size:'80%' sheetname:'bizmodel_test'
## FUNDING
!!!spreadsheet.sheet_wiki includefilter:'funding' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'REVENUE' includefilter:rev sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'Revenue Total' includefilter:'revtotal' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'REVENUE' includefilter:'revtotal2' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'COGS' includefilter:'cogs' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'Margin' includefilter:'margin' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'HR Teams' includefilter:'hrnr' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'HR Costs' includefilter:'hrcost' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'COSTS' includefilter:'ocost' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'HR Costs' includefilter:'hrcost' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'P&L Overview' includefilter:'pl' sheetname:'bizmodel_test'
!!!spreadsheet.sheet_wiki title:'P&L Overview' includefilter:'pl' sheetname:'bizmodel_test'
## Some Details
> show how we can do per month
!!!spreadsheet.sheet_wiki includefilter:'pl' period_months:1 sheetname:'bizmodel_test'

View File

@@ -0,0 +1,31 @@
# HR Params
## Engineering
Costs can be grouped in cost centers which can then be used to futher process e.g. transcactions between companies.
```js
!!bizmodel.costcenter_define bizname:'test'
name:'tfdmcc'
descr:'TFDMCC executes on near source agreement for TFTech'
min_month:'10000USD'
max_month:'100000USD'
end_date:'1/1/2026' //when does agreement stop
!!bizmodel.costcenter_define bizname:'test'
name:'cs_tftech'
descr:'Nearsource agreement for TFTech towards Codescalers'
min_month:'10000USD'
max_month:'100000USD'
end_date:'1/1/2026'
!!bizmodel.costcenter_define bizname:'test'
name:'cs_tfcloud'
descr:'Nearsource agreement for TFCloud towards Codescalers'
min_month:'10000USD'
max_month:'100000USD'
end_date:'1/1/2026'
```

View File

@@ -0,0 +1,39 @@
# Generic Overhead Costs
possible parameters
- name
- descr: description of the cost
- cost: is 'month:amount,month:amount, ...', no extrapolation
- cost_growth: is 'month:amount,month:amount, ..., or just a nr', will extrapolate
- type: travel, admin, legal, varia, office
- cost_percent_revenue e.g. 4%, will make sure the cost will be at least 4% of revenue
- indexation, e.g. 2%
Other financial flows can be mentioned here as well.
```js
!!bizmodel.cost_define bizname:'test'
name:'rental'
descr:'Office Rental in BE.'
cost:'5000'
indexation:'2%'
type:'office'
!!bizmodel.cost_define bizname:'test'
name:'oneoff'
descr:'Event in Z.'
cost_one:'3:50000'
type:'event'
!!bizmodel.cost_define bizname:'test'
name:'cloud'
descr:'Datacenter and Cloud Costs'
cost:'2000eur'
cost_percent_revenue:'2%'
type:'cloud'
```

View File

@@ -0,0 +1,20 @@
# Department Params
```js
!!bizmodel.department_define bizname:'test'
name:'ops'
title:'Operations'
order:5
!!bizmodel.department_define bizname:'test'
name:'coordination'
title:'Coordination'
order:1
!!bizmodel.department_define bizname:'test'
name:'engineering'
title:'Engineering'
order:4
```

View File

@@ -0,0 +1,29 @@
# Funding Params
possible parameters
- name, e.g. for a specific person
- descr: description of the funding
- investment is month:amount,month:amount, ...
- type: loan or capital
Other financial flows can be mentioned here as well.
```js
!!bizmodel.funding_define bizname:'test'
name:'our_investor'
descr:'A fantastic super investor.'
investment:'3:1000000EUR'
type:'capital'
!!bizmodel.funding_define bizname:'test'
name:'a_founder'
descr:'Together Are Strong'
investment:'2000000'
type:'loan'
```

View File

@@ -0,0 +1,73 @@
# HR Params
## Engineering
possible parameters
- descr, description of the function (e.g. master architect)
- cost, any currency eg. 1000usd
- in case cost changes over time e.g. 1:10000USD,20:20000USD,60:30000USD
- indexation, e.g. 2%
- department
- name, e.g. for a specific person
- nrpeople: how many people per month, growth over time notation e.g. 1:10,60:20 means 10 in month 1 growing to 20 month 60
- cost_percent_revenue e.g. 4%, will make sure the cost will be at least 4% of revenue
```js
!!bizmodel.employee_define bizname:'test'
sid:2
descr:'Senior Engineer'
cost:'1:12000,12:14000' //cost is always per person
department:'engineering'
nrpeople:'0:5,20:5'
!!bizmodel.employee_define bizname:'test'
name:'despiegk'
title: 'CTO and crazy inventor.'
sid:3
descr:'CTO'
cost:'12000EUR' //the salary is the cost independent of the fulltime status
indexation:'10%'
department:'coordination'
page:'cto.md'
fulltime: "50%" //100% means yes
!!bizmodel.employee_define bizname:'test'
descr:'Senior Architect'
cost:'10000USD' indexation:'5%'
department:'engineering'
nrpeople:'0:5,20:10'
!!bizmodel.employee_define bizname:'test'
descr:'Junior Engineer'
cost:'4000USD' indexation:'5%'
department:'engineering'
nrpeople:'0:5,20:10'
```
## Operations
```js
!!bizmodel.employee_define bizname:'test'
descr:'Ops Manager'
cost:'1:8000,12:14000'
department:'ops'
!!bizmodel.employee_define bizname:'test'
descr:'Support Junior'
cost:'2000EUR' indexation:'5%'
department:'ops'
nrpeople:'7:5,18:10'
cost_percent_revenue:'1%'
!!bizmodel.employee_define bizname:'test'
descr:'Support Senior'
cost:'5000EUR' indexation:'5%'
department:'ops'
nrpeople:'3:5,20:10'
cost_percent_revenue:'1%'
costcenter:'tfdmcc:25,cs_tfcloud:75'
generate_page:'../employees/support_senior.md'
```

View File

@@ -0,0 +1,14 @@
# Bizmodel Params
In this section we can find all the parameters for the bizmodel.
## how to use and read
The params are defined in the different instruction files e.g. revenue_params.md
Often you will see something like `revenue_growth:'10:1000,20:1100'` this can be read as month 10 it 1000, month 20 its 1100.
The software will extrapolate.

View File

@@ -0,0 +1,64 @@
# HR Params
## Revenue Items (non recurring)
This company is a cloud company ...
```js
!!bizmodel.revenue_define bizname:'test'
descr:'OEM Deals'
revenue_time:'10:1000000EUR,15:3333,20:1200000'
cogs_perc: '1:5%,20:10%'
!!bizmodel.revenue_define bizname:'test'
descr:'License Deals'
revenue_growth:'10:1000,20:1100'
cogs_perc: '10%'
rev_delay_month: 1
!!bizmodel.revenue_define bizname:'test'
descr:'3NODE License Sales 1 Time'
//means revenue is 100 month 1, 200 month 60
revenue_item:'1:100,60:200'
revenue_nr:'10:1000,24:2000,60:40000'
cogs_perc: '10%'
rev_delay_month: 1
```
## Revenue Items Recurring
possible parameters
- name, e.g. for a specific project
- descr, description of the revenue line item
- revenue_setup, revenue for 1 item '1000usd'
- revenue_monthly, revenue per month for 1 item
- revenue_setup_delay, how many months before revenue comes in after sales
- revenue_monthly_delay, how many months before monthly revenue starts
- cogs_setup, cost of good for 1 item at setup
- cogs_setup_perc: what is percentage of the cogs (can change over time) for setup e.g. 0:50%
- cogs_monthly, cost of goods for the monthly per 1 item
- cogs_monthly_perc: what is percentage of the cogs (can change over time) for monthly e.g. 0:5%,12:10%
- nr_sold: how many do we sell per month (is in growth format e.g. 10:100,20:200)
- nr_months: how many months is recurring
if currency not specified then is always in USD
```js
!!bizmodel.revenue_recurring_define bizname:'test'
name: '3node_lic'
descr:'3NODE License Sales Recurring Basic'
revenue_setup:'1:100,60:50'
// revenue_setup:'5'
revenue_monthly_delay:3
revenue_monthly:'1:1,60:1'
// cogs_setup:'1:0'
cogs_setup_perc:'50%'
revenue_setup_delay:1
cogs_monthly_perc:'50%'
nr_sold:'10:1000,24:2000,60:40000'
60 is the default
nr_months:60
```

View File

@@ -0,0 +1,13 @@
## Revenue
Overview of achieved revenue.
Unit is in Million USD.
!!!spreadsheet.sheet_wiki title:'REVENUE' includefilter:rev sheetname:'bizmodel_test'
!!!spreadsheet.graph_bar_row rowname:revenue_total unit:million sheetname:'bizmodel_test'
!!!spreadsheet.graph_line_row rowname:revenue_total unit:million sheetname:'bizmodel_test'
!!!spreadsheet.graph_pie_row rowname:revenue_total unit:million size:'80%' sheetname:'bizmodel_test'

View File

@@ -0,0 +1,13 @@
- [bizmodel](bizmodel_example/bizmodel.md)
- [Revenue](bizmodel_example/revenue.md)
- [Result](bizmodel_example/overview.md)
- [parameters](bizmodel_example/params.md)
- [revenue_params](bizmodel_example/params/revenue_params.md)
- [funding_params](bizmodel_example/params/funding_params.md)
- [hr_params](bizmodel_example/params/hr_params.md)
- [costs_params](bizmodel_example/params/costs_params.md)
- [rows overview](bizmodel_example/rows_overview.md)
- [employees](bizmodel_example/employees.md)
- [debug](bizmodel_example/debug.md)
- [worksheet](bizmodel_example/worksheet.md)

View File

@@ -0,0 +1,4 @@
# Overview of the rows in the biz model sheet
!!!spreadsheet.sheet_wiki sheetname:'bizmodel_test'

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env -S v -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
// #!/usr/bin/env -S v -cg -enable-globals run
import freeflowuniverse.herolib.data.doctree
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.biz.bizmodel
import freeflowuniverse.herolib.core.playbook
import freeflowuniverse.herolib.core.playcmds
import freeflowuniverse.herolib.web.mdbook
import freeflowuniverse.herolib.biz.spreadsheet
import os
const name = 'tf9_budget'
const wikipath = '${os.home_dir()}/code/git.ourworld.tf/ourworld_holding/info_ourworld/collections/${name}'
const summarypath = '${wikipath}/summary.md'
// mut sh := spreadsheet.sheet_new(name: 'test2') or { panic(err) }
// println(sh)
// sh.row_new(descr: 'this is a description', name: 'something', growth: '0:100aed,55:1000eur')!
// println(sh)
// println(sh.wiki()!)
// exit(0)
// execute the actions so we have the info populated
// mut plb:=playbook.new(path: wikipath)!
// playcmds.run(mut plb,false)!
buildpath := '${os.home_dir()}/hero/var/mdbuild/bizmodel'
// just run the doctree & mdbook and it should
// load the doctree, these are all collections
mut tree := doctree.new(name: name)!
tree.scan(path: wikipath)!
tree.export(dest: buildpath, reset: true)!
// mut bm:=bizmodel.get("test")!
// println(bm)
mut mdbooks := mdbook.get()!
mdbooks.generate(
name: 'bizmodel'
summary_path: summarypath
doctree_path: buildpath
title: 'bizmodel ${name}'
)!
mdbook.book_open('bizmodel')!

View File

@@ -0,0 +1,12 @@
need to find where the manual is
- [manual](bizmodel_example/configuration.md)
- [widgets](bizmodel_example/widgets.md)
- [graph_bar_row](bizmodel_example/graph_bar_row.md)
- [sheet_tables](bizmodel_example/sheet_tables.md)
- [widget_args](bizmodel_example/widget_args.md)
- [params](bizmodel_example/configuration.md)
- [revenue params](bizmodel_example/revenue_params.md)
- [funding params](bizmodel_example/funding_params.md)
- [hr params](bizmodel_example/hr_params.md)
- [costs params](bizmodel_example/costs_params.md)

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.biz.investortool
import freeflowuniverse.herolib.core.playbook
import os
mut plbook := playbook.new(
path: '${os.home_dir()}/code/git.ourworld.tf/ourworld_holding/investorstool/output'
)!
mut it := investortool.play(mut plbook)!
it.check()!

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env -S v -n -w -cg -d use_openssl -enable-globals run
import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer
import freeflowuniverse.herolib.osal.coredns
import freeflowuniverse.herolib.core.playbook
// coredns_installer.delete()!
mut installer := coredns_installer.get()!
// coredns_installer.fix()!
installer.start()!
mut script := "
!!dns.a_record
sub_domain: 'host1'
ip: '1.2.3.4'
ttl: 300
!!dns.aaaa_record
sub_domain: 'host1'
ip: '2001:db8::1'
ttl: 300
!!dns.mx_record
sub_domain: '*'
host: 'mail.example.com'
preference: 10
ttl: 300
!!dns.txt_record
sub_domain: '*'
text: 'v=spf1 mx ~all'
ttl: 300
!!dns.srv_record
service: 'ssh'
protocol: 'tcp'
host: 'host1'
target: 'sip.example.com'
port: 5060
priority: 10
weight: 100
ttl: 300
!!dns.ns_record
host: 'ns1.example.com'
ttl: 300
!!dns.soa_record
mbox: 'hostmaster.example.com'
ns: 'ns1.example.com'
refresh: 44
retry: 55
expire: 66
minttl: 100
ttl: 300
"
mut plbook := playbook.new(text: script)!
mut set := coredns.play_dns(mut plbook)!
set.set(key_prefix: 'dns:', domain: 'heroexample.com')!

View File

@@ -4,7 +4,7 @@ set -e
os_name="$(uname -s)"
arch_name="$(uname -m)"
version='1.0.7'
version='1.0.13'
# Base URL for GitHub releases

View File

@@ -181,7 +181,7 @@ function os_update {
fi
#apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux"
package_install "rclone rsync mc redis-server screen net-tools git dnsutils htop ca-certificates screen lsb-release binutils pkg-config"
package_install "rclone rsync mc redis-server screen net-tools git dnsutils htop ca-certificates screen lsb-release binutils pkg-config libssl-dev iproute2"
elif [[ "${OSNAME}" == "darwin"* ]]; then
if command -v brew >/dev/null 2>&1; then

View File

@@ -124,9 +124,4 @@ fn test_curr() {
console.print_debug(sh.rows['something'].cells[0])
assert sh.rows['something']!.cells[0].val == 25.0
assert sh.rows['something']!.cells[60 - 1].val == 900.0
// TODO: we need to create tests for it
console.print_debug(sh)
panic('test1')
}

View File

@@ -0,0 +1,152 @@
module livekit
// import time
// import rand
// import crypto.hmac
// import crypto.sha256
// import encoding.base64
// import json
// // Define AccessTokenOptions struct
// pub struct AccessTokenOptions {
// pub mut:
// ttl int | string // TTL in seconds or a time span (e.g., '2d', '5h')
// name string // Display name for the participant
// identity string // Identity of the user
// metadata string // Custom metadata to be passed to participants
// }
// // Struct representing grants
// pub struct ClaimGrants {
// pub mut:
// video VideoGrant
// iss string
// exp i64
// nbf int
// sub string
// name string
// }
// // VideoGrant struct placeholder
// pub struct VideoGrant {
// pub mut:
// room string
// room_join bool @[json: 'roomJoin']
// can_publish bool @[json: 'canPublish']
// can_publish_data bool @[json: 'canPublishData']
// can_subscribe bool @[json: 'canSubscribe']
// }
// // SIPGrant struct placeholder
// struct SIPGrant {}
// // AccessToken class
// pub struct AccessToken {
// mut:
// api_key string
// api_secret string
// grants ClaimGrants
// identity string
// ttl int | string
// }
// // Constructor for AccessToken
// pub fn new_access_token(api_key string, api_secret string, options AccessTokenOptions) !AccessToken {
// if api_key == '' || api_secret == '' {
// return error('API key and API secret must be set')
// }
// ttl := if options.ttl is int { options.ttl } else { 21600 } // Default TTL of 6 hours (21600 seconds)
// return AccessToken{
// api_key: api_key
// api_secret: api_secret
// identity: options.identity
// ttl: ttl
// grants: ClaimGrants{
// exp: time.now().unix()+ttl
// iss: api_key
// sub: options.name
// name: options.name
// }
// }
// }
// // Method to add a video grant to the token
// pub fn (mut token AccessToken) add_video_grant(grant VideoGrant) {
// token.grants.video = grant
// }
// // Method to generate a JWT token
// pub fn (token AccessToken) to_jwt() !string {
// // Create JWT payload
// payload := json.encode(token.grants)
// println('payload: ${payload}')
// // Create JWT header
// header := '{"alg":"HS256","typ":"JWT"}'
// // Encode header and payload in base64
// header_encoded := base64.url_encode_str(header)
// payload_encoded := base64.url_encode_str(payload)
// // Create the unsigned token
// unsigned_token := '${header_encoded}.${payload_encoded}'
// // Create the HMAC-SHA256 signature
// signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
// // Encode the signature in base64
// signature_encoded := base64.url_encode(signature)
// // Create the final JWT
// jwt := '${unsigned_token}.${signature_encoded}'
// return jwt
// }
// // TokenVerifier class
// pub struct TokenVerifier {
// api_key string
// api_secret string
// }
// // Constructor for TokenVerifier
// pub fn new_token_verifier(api_key string, api_secret string) !TokenVerifier {
// if api_key == '' || api_secret == '' {
// return error('API key and API secret must be set')
// }
// return TokenVerifier{
// api_key: api_key
// api_secret: api_secret
// }
// }
// // Method to verify the JWT token
// pub fn (verifier TokenVerifier) verify(token string) !ClaimGrants {
// // Split the token into parts
// parts := token.split('.')
// if parts.len != 3 {
// return error('Invalid token')
// }
// // Decode header, payload, and signature
// payload_encoded := parts[1]
// signature_encoded := parts[2]
// // Recompute the HMAC-SHA256 signature
// unsigned_token := '${parts[0]}.${parts[1]}'
// expected_signature := hmac.new(verifier.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size)
// expected_signature_encoded := base64.url_encode(expected_signature)
// // Verify the signature
// if signature_encoded != expected_signature_encoded {
// return error('Invalid token signature')
// }
// // Decode the payload
// payload_json := base64.url_decode_str(payload_encoded)
// // Parse and return the claims as ClaimGrants
// return json.decode(ClaimGrants, payload_json)
// }

View File

@@ -0,0 +1,199 @@
module livekit
import net.http
import json
// // pub struct Client {
// // pub:
// // host string
// // token string
// // }
// // pub struct Room {
// // pub mut:
// // sid string
// // name string
// // empty_timeout string
// // max_participants string
// // creation_time string
// // turn_password string
// // metadata string
// // num_participants u32
// // active_recording bool
// // }
// pub struct ParticipantInfo {
// pub mut:
// sid string
// identity string
// name string
// state string
// tracks []TrackInfo
// metadata string
// joined_at i64
// permission ParticipantPermission
// is_publisher bool
// }
// pub struct TrackInfo {
// pub mut:
// sid string
// typ string @[json: 'type']
// source string
// name string
// mime_type string
// muted bool
// width u32
// height u32
// simulcast bool
// disable_dtx bool
// layers []VideoLayer
// }
// pub struct ParticipantPermission {
// pub mut:
// can_subscribe bool
// can_publish bool
// can_publish_data bool
// }
// pub struct VideoLayer {
// pub mut:
// quality string
// width u32
// height u32
// }
// // Helper method to make POST requests to LiveKit API
// fn (client Client) make_post_request(url string, body string) !http.Response {
// mut headers := http.new_header()
// headers.add_custom('Authorization', 'Bearer ${client.token}')!
// headers.add_custom('Content-Type', 'application/json')!
// req := http.Request{
// method: http.Method.post
// url: url
// data: body
// header: headers
// }
// return req.do()!
// }
// pub struct CreateRoomArgs {
// pub:
// name string
// empty_timeout u32
// max_participants u32
// metadata string
// }
// // RoomService API methods
// pub fn (client Client) create_room(args CreateRoomArgs) !Room {
// body := json.encode(args)
// url := '${client.host}/twirp/livekit.RoomService/CreateRoom'
// response := client.make_post_request(url, body)!
// return json.decode(Room, response.body)!
// }
// // pub fn (client Client) list_rooms(names []string) ![]Room {
// // body := json.encode({
// // 'names': names
// // })
// // url := '${client.host}/twirp/livekit.RoomService/ListRooms'
// // response := client.make_post_request(url, body)!
// // return json.decode([]Room, response.body)!
// // }
// pub fn (client Client) delete_room(room_name string) ! {
// body := json.encode({
// 'room': room_name
// })
// url := '${client.host}/twirp/livekit.RoomService/DeleteRoom'
// _ := client.make_post_request(url, body)!
// }
// pub fn (client Client) list_participants(room_name string) ![]ParticipantInfo {
// body := json.encode({
// 'room': room_name
// })
// url := '${client.host}/twirp/livekit.RoomService/ListParticipants'
// response := client.make_post_request(url, body)!
// return json.decode([]ParticipantInfo, response.body)!
// }
// pub fn (client Client) get_participant(room_name string, identity string) !ParticipantInfo {
// body := json.encode({
// 'room': room_name
// 'identity': identity
// })
// url := '${client.host}/twirp/livekit.RoomService/GetParticipant'
// response := client.make_post_request(url, body)!
// return json.decode(ParticipantInfo, response.body)!
// }
// pub fn (client Client) remove_participant(room_name string, identity string) ! {
// body := json.encode({
// 'room': room_name
// 'identity': identity
// })
// url := '${client.host}/twirp/livekit.RoomService/RemoveParticipant'
// _ := client.make_post_request(url, body)!
// }
// pub struct MutePublishedTrackArgs {
// pub:
// room_name string
// identity string
// track_sid string
// muted bool
// }
// pub fn (client Client) mute_published_track(args MutePublishedTrackArgs) ! {
// body := json.encode(args)
// url := '${client.host}/twirp/livekit.RoomService/MutePublishedTrack'
// _ := client.make_post_request(url, body)!
// }
// pub struct UpdateParticipantArgs {
// pub:
// room_name string @[json: 'room']
// identity string
// metadata string
// permission ParticipantPermission
// }
// pub fn (client Client) update_participant(args UpdateParticipantArgs) ! {
// body := json.encode(args)
// url := '${client.host}/twirp/livekit.RoomService/UpdateParticipant'
// _ := client.make_post_request(url, body)!
// }
// pub struct UpdateRoomMetadataArgs {
// pub:
// room_name string @[json: 'room']
// metadata string
// }
// pub fn (client Client) update_room_metadata(args UpdateRoomMetadataArgs) ! {
// body := json.encode(args)
// url := '${client.host}/twirp/livekit.RoomService/UpdateRoomMetadata'
// _ := client.make_post_request(url, body)!
// }
// pub struct SendDataArgs {
// pub:
// room_name string @[json: 'room']
// data []u8
// kind string
// destination_identities []string
// }
// pub fn (client Client) send_data(args SendDataArgs) ! {
// body := json.encode(args)
// url := '${client.host}/twirp/livekit.RoomService/SendData'
// _ := client.make_post_request(url, body)!
// }

View File

@@ -29,19 +29,36 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
description: 'Url where docusaurus source is.'
})
cmd_run.add_flag(Flag{
flag: .string
required: false
name: 'deploykey'
abbrev: 'dk'
// default: ''
description: 'Path of SSH Key used to deploy.'
})
cmd_run.add_flag(Flag{
flag: .string
required: false
name: 'publish'
// default: ''
description: 'Path where to publish.'
})
cmd_run.add_flag(Flag{
flag: .bool
required: false
name: 'build'
abbrev: 'b'
name: 'buildpublish'
abbrev: 'bp'
description: 'build and publish.'
})
cmd_run.add_flag(Flag{
flag: .bool
required: false
name: 'builddev'
abbrev: 'bd'
name: 'builddevpublish'
abbrev: 'bpd'
description: 'build dev version and publish.'
})
@@ -49,7 +66,6 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
flag: .bool
required: false
name: 'update'
abbrev: 'p'
description: 'update your environment the template and the repo you are working on (git pull).'
})
@@ -67,6 +83,8 @@ pub fn cmd_docusaurus(mut cmdroot Command) {
fn cmd_docusaurus_execute(cmd Command) ! {
mut update := cmd.flags.get_bool('update') or { false }
mut url := cmd.flags.get_string('url') or { '' }
mut publish_path := cmd.flags.get_string('publish') or { '' }
mut deploykey := cmd.flags.get_string('deploykey') or { '' }
// mut path := cmd.flags.get_string('path') or { '' }
// if path == '' {
@@ -74,8 +92,8 @@ fn cmd_docusaurus_execute(cmd Command) ! {
// }
// path = path.replace('~', os.home_dir())
mut build := cmd.flags.get_bool('build') or { false }
mut builddev := cmd.flags.get_bool('builddev') or { false }
mut buildpublish := cmd.flags.get_bool('buildpublish') or { false }
mut builddevpublish := cmd.flags.get_bool('builddevpublish') or { false }
mut dev := cmd.flags.get_bool('dev') or { false }
// if build== false && build== false && build== false {
@@ -85,27 +103,39 @@ fn cmd_docusaurus_execute(cmd Command) ! {
mut docs := docusaurus.new(update: update)!
if build {
// Create a new docusaurus site
if publish_path.len > 0 {
_ := docs.build(
url: url
update: update
url: url
update: update
publish_path: publish_path
deploykey: deploykey
)!
}
if builddev {
if buildpublish {
// Create a new docusaurus site
_ := docs.build_dev(
url: url
update: update
_ := docs.build_publish(
url: url
update: update
deploykey: deploykey
)!
}
if builddevpublish {
// Create a new docusaurus site
_ := docs.build_dev_publish(
url: url
update: update
deploykey: deploykey
)!
}
if dev {
// Create a new docusaurus site
_ := docs.dev(
url: url
update: update
url: url
update: update
deploykey: deploykey
)!
}
}

View File

@@ -20,6 +20,7 @@ pub mut:
log bool = true // If true, logs git commands/statements
debug bool = true
ssh_key_name string // name of ssh key to be used when loading the gitstructure
ssh_key_path string
reload bool
}
@@ -35,6 +36,7 @@ pub fn new(args_ GitStructureArgsNew) !&GitStructure {
log: args.log
debug: args.debug
ssh_key_name: args.ssh_key_name
ssh_key_path: args.ssh_key_path
}
return get(coderoot: args.coderoot, reload: args.reload, cfg: cfg)
@@ -77,7 +79,21 @@ pub fn get(args_ GitStructureArgGet) !&GitStructure {
coderoot: pathlib.get_dir(path: args.coderoot, create: true)!
}
mut cfg := args.cfg or {
mut cfg_ := GitStructureConfig{
coderoot: 'SKIP'
}
cfg_
}
if cfg.coderoot != 'SKIP' {
gs.config_ = cfg
gs.config_save()!
// println(gs.config()!)
}
gs.config()! // will load the config, don't remove
gs.load(false)!
if gs.repos.keys().len == 0 || args.reload {

View File

@@ -14,6 +14,7 @@ pub mut:
log bool = true // If true, logs git commands/statements
debug bool = true
ssh_key_name string
ssh_key_path string
}
// GitStructure holds information about repositories within a specific code root.
@@ -233,6 +234,6 @@ pub fn (mut self GitStructure) config_reset() ! {
pub fn (mut self GitStructure) config_save() ! {
// Retrieve the configuration from Redis.
mut redis := redis_get()
datajson := json.encode(self.config)
datajson := json.encode(self.config()!)
redis.set('${self.cache_key()}:config', datajson)!
}

View File

@@ -34,7 +34,17 @@ pub fn (mut gitstructure GitStructure) clone(args GitCloneArgs) !&GitRepo {
extra = '--depth 1 --no-single-branch '
}
cmd := 'cd ${parent_dir} && git clone ${extra} ${repo.get_http_url()!} ${repo.name}'
cfg := gitstructure.config()!
mut cmd := 'cd ${parent_dir} && git clone ${extra} ${repo.get_http_url()!} ${repo.name}'
mut sshkey_include := ''
if cfg.ssh_key_path.len > 0 {
sshkey_include = "GIT_SSH_COMMAND=\"ssh -i ${cfg.ssh_key_path}\" "
cmd = 'cd ${parent_dir} && ${sshkey_include}git clone ${extra} ${repo.get_ssh_url()!} ${repo.name}'
}
console.print_debug(cmd)
result := os.execute(cmd)
if result.exit_code != 0 {
return error('Cannot clone the repository due to: \n${result.output}')

View File

@@ -24,7 +24,6 @@ fn startupcmd() ![]zinit.ZProcessNewArgs {
}
fn running() !bool {
mut installer := get()!
mut conn := httpconnection.new(name: 'coredns', url: 'http://localhost:3334')!
r := conn.get(prefix: 'health')!
if r.trim_space() == 'OK' {
@@ -51,7 +50,7 @@ fn stop_post() ! {
// checks if a certain version or above is installed
fn installed() !bool {
res := os.execute('${osal.profile_path_source_and()!} coredns version')
res := os.execute('/bin/bash -c "coredns --version"')
if res.exit_code != 0 {
return false
}
@@ -73,39 +72,11 @@ fn ulist_get() !ulist.UList {
// uploads to S3 server if configured
fn upload() ! {
// installers.upload(
// cmdname: 'coredns'
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/coredns'
// )!
}
fn install() ! {
console.print_header('install coredns')
build()! // because we need the plugins
// mut url := ''
// if core.is_linux_arm()! {
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_arm64.tgz'
// } else if core.is_linux_intel()! {
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_linux_amd64.tgz'
// } else if core.is_osx_arm()! {
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_arm64.tgz'
// } else if core.is_osx_intel()! {
// url = 'https://github.com/coredns/coredns/releases/download/v${version}/coredns_${version}_darwin_amd64.tgz'
// } else {
// return error('unsported platform')
// }
// mut dest := osal.download(
// url: url
// minsize_kb: 13000
// expand_dir: '/tmp/coredns'
// )!
// mut binpath := dest.file_get('coredns')!
// osal.cmd_add(
// cmdname: 'coredns'
// source: binpath.path
// )!
}
fn build() ! {
@@ -132,10 +103,7 @@ fn build() ! {
mut path := pathlib.get_file(path: '${gitpath}/plugin.cfg', create: true)!
path.write(pluginsfile)!
cmd := '
cd ${gitpath}
make
'
cmd := 'bash -c "cd ${gitpath} && make"'
osal.execute_stdout(cmd)!
// now copy to the default bin path
@@ -148,33 +116,12 @@ fn build() ! {
}
fn destroy() ! {
// mut systemdfactory := systemd.new()!
// systemdfactory.destroy("zinit")!
for zprocess in startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.delete(zprocess.name) or { return error('failed to delete coredns process: ${err}') }
}
// osal.process_kill_recursive(name:'zinit')!
// osal.cmd_delete('zinit')!
// osal.package_remove('
// podman
// conmon
// buildah
// skopeo
// runc
// ')!
// //will remove all paths where go/bin is found
// osal.profile_path_add_remove(paths2delete:"go/bin")!
// osal.rm("
// podman
// conmon
// buildah
// skopeo
// runc
// /var/lib/containers
// /var/lib/podman
// /var/lib/buildah
// /tmp/podman
// /tmp/conmon
// ")!
osal.execute_silent('sudo rm /usr/local/bin/coredns') or {
return error('failed to delete coredns bin: ${err}')
}
}

View File

@@ -41,9 +41,10 @@ pub fn configure() ! {
mut path := pathlib.get_file(path: args.config_path, create: true)!
path.write(mycorefile)!
if args.example {
example_configure()!
}
// this doesn't work for local machines, needs to be updated
// if args.example {
// example_configure()!
// }
}
pub fn example_configure() ! {

View File

@@ -58,11 +58,12 @@ transfer:transfer
hosts:hosts
file:file
secondary:secondary
# etcd:etcd
redis:github.com/codysnider/coredns-redis
loop:loop
forward:forward
erratic:erratic
whoami:whoami
on:github.com/coredns/caddy/onevent
sign:sign
view:view
redis:github.com/codysnider/coredns-redis
view:view

View File

@@ -71,6 +71,7 @@ fn install() ! {
os.mv('${expand_dir}/go', go_dest)!
os.rmdir_all(expand_dir)!
osal.profile_path_add_remove(paths2add: '${go_dest}/bin')!
os.setenv('PATH', '${go_dest}/bin:${os.getenv('PATH')}', true)
}
fn build() ! {}

View File

@@ -22,7 +22,7 @@ fn installed() !bool {
if r.len != 1 {
return error("couldn't parse bun version.\n${res.output}")
}
println(' ${texttools.version(version)} <= ${texttools.version(r[0])}')
// println(' ${texttools.version(version)} <= ${texttools.version(r[0])}')
if texttools.version(version) <= texttools.version(r[0]) {
return true
}

View File

@@ -1,7 +1,75 @@
// Input parameter structs for each record type
@[params]
struct SRVRecord {
module coredns
import freeflowuniverse.herolib.core.redisclient
// // Input parameter structs for each record type
// DNSRecordSet represents a set of DNS records
struct DNSRecordSet {
pub mut:
redis ?&redisclient.Redis
records map[string]Record
}
pub struct Record {
pub mut:
a ?[]A_Record
aaaa ?[]AAAA_Record
txt ?[]TXT_Record
cname ?[]CNAME_Record
ns ?[]NS_Record
mx ?[]MX_Record
srv ?[]SRV_Record
caa ?[]CAA_Record
soa ?SOA_Record
}
@[params]
pub struct A_Record {
pub:
ip string @[required]
ttl int = 300
}
@[params]
pub struct AAAA_Record {
pub:
ip string @[required]
ttl int = 300
}
@[params]
pub struct TXT_Record {
pub:
text string @[required]
ttl int = 300
}
@[params]
pub struct CNAME_Record {
pub:
host string
ttl int = 300
}
@[params]
pub struct NS_Record {
pub:
host string @[required]
ttl int = 300
}
@[params]
pub struct MX_Record {
pub:
host string @[required]
preference int = 10
ttl int = 300
}
@[params]
pub struct SRV_Record {
pub:
target string @[required]
port int @[required]
priority int = 10
@@ -10,46 +78,16 @@ pub mut:
}
@[params]
struct TXTRecord {
pub mut:
text string @[required]
ttl int = 300
pub struct CAA_Record {
pub:
flag u8
tag string
value string
}
@[params]
struct MXRecord {
pub mut:
host string @[required]
preference int = 10
ttl int = 300
}
@[params]
struct ARecord {
pub mut:
name string @[required]
ip string @[required]
ttl int = 300
}
@[params]
struct AAAARecord {
pub mut:
name string @[required]
ip string @[required]
ttl int = 300
}
@[params]
struct NSRecord {
pub mut:
host string @[required]
ttl int = 300
}
@[params]
struct SOARecord {
pub mut:
pub struct SOA_Record {
pub:
mbox string @[required]
ns string @[required]
refresh int = 44
@@ -58,16 +96,3 @@ pub mut:
minttl int = 100
ttl int = 300
}
// DNSRecordSet represents a set of DNS records
struct DNSRecordSet {
pub mut:
srv []SRVRecord
txt []TXTRecord
mx []MXRecord
a []ARecord
aaaa []AAAARecord
ns []NSRecord
soa ?SOARecord
redis ?&redisclient.Redis
}

View File

@@ -15,20 +15,21 @@ pub fn play_dns(mut plbook playbook.PlayBook) !DNSRecordSet {
match action.name {
'a_record' {
recordset.add_a(
name: p.get('name')!
ip: p.get('ip')!
ttl: p.get_int_default('ttl', 300)!
sub_domain: p.get_default('sub_domain', '@')!
ip: p.get('ip')!
ttl: p.get_int_default('ttl', 300)!
)
}
'aaaa_record' {
recordset.add_aaaa(
name: p.get('name')!
ip: p.get('ip')!
ttl: p.get_int_default('ttl', 300)!
sub_domain: p.get_default('sub_domain', '@')!
ip: p.get('ip')!
ttl: p.get_int_default('ttl', 300)!
)
}
'mx_record' {
recordset.add_mx(
sub_domain: p.get_default('sub_domain', '@')!
host: p.get('host')!
preference: p.get_int_default('preference', 10)!
ttl: p.get_int_default('ttl', 300)!
@@ -36,12 +37,16 @@ pub fn play_dns(mut plbook playbook.PlayBook) !DNSRecordSet {
}
'txt_record' {
recordset.add_txt(
text: p.get('text')!
ttl: p.get_int_default('ttl', 300)!
sub_domain: p.get_default('sub_domain', '@')!
text: p.get('text')!
ttl: p.get_int_default('ttl', 300)!
)
}
'srv_record' {
recordset.add_srv(
host: p.get('host')!
protocol: p.get('protocol')!
service: p.get('service')!
target: p.get('target')!
port: p.get_int('port')!
priority: p.get_int_default('priority', 10)!
@@ -51,8 +56,9 @@ pub fn play_dns(mut plbook playbook.PlayBook) !DNSRecordSet {
}
'ns_record' {
recordset.add_ns(
host: p.get('host')!
ttl: p.get_int_default('ttl', 300)!
sub_domain: p.get_default('sub_domain', '@')!
host: p.get('host')!
ttl: p.get_int_default('ttl', 300)!
)
}
'soa_record' {
@@ -79,25 +85,30 @@ pub fn play_dns(mut plbook playbook.PlayBook) !DNSRecordSet {
// Example usage:
/*
!!dns.a_record
name: 'host1'
sub_domain: 'host1'
ip: '1.2.3.4'
ttl: 300
!!dns.aaaa_record
name: 'host1'
sub_domain: 'host1'
ip: '2001:db8::1'
ttl: 300
!!dns.mx_record
sub_domain: '*'
host: 'mail.example.com'
preference: 10
ttl: 300
!!dns.txt_record
sub_domain: '*'
text: 'v=spf1 mx ~all'
ttl: 300
!!dns.srv_record
service: 'ssh'
protocol: 'tcp'
host: 'host1'
target: 'sip.example.com'
port: 5060
priority: 10

View File

@@ -1,205 +1,166 @@
module coredns
import json
import freeflowuniverse.herolib.core.redisclient
import x.json2
// new_dns_record_set creates a new DNSRecordSet
pub fn new_dns_record_set() DNSRecordSet {
return DNSRecordSet{
srv: []SRVRecord{}
txt: []TXTRecord{}
mx: []MXRecord{}
a: []ARecord{}
aaaa: []AAAARecord{}
ns: []NSRecord{}
}
return DNSRecordSet{}
}
pub struct AddSRVRecordArgs {
SRV_Record
pub:
service string @[required]
protocol string @[required]
host string @[required]
}
// add_srv adds an SRV record to the set
pub fn (mut rs DNSRecordSet) add_srv(args SRVRecord) {
rs.srv << SRVRecord{
target: args.target
port: args.port
priority: args.priority
weight: args.weight
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_srv(args AddSRVRecordArgs) {
key := '_${args.service}._${args.protocol}.${args.host}'
mut rec := rs.records[key] or { Record{} }
if mut v := rec.srv {
v << args.SRV_Record
} else {
rec.srv = [args.SRV_Record]
}
rs.records[key] = rec
}
pub struct AddTXTRecordArgs {
TXT_Record
pub:
sub_domain string = '@'
}
// add_txt adds a TXT record to the set
pub fn (mut rs DNSRecordSet) add_txt(args TXTRecord) {
rs.txt << TXTRecord{
text: args.text
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_txt(args AddTXTRecordArgs) {
mut rec := rs.records[args.sub_domain] or { Record{} }
if mut v := rec.txt {
v << args.TXT_Record
} else {
rec.txt = [args.TXT_Record]
}
rs.records[args.sub_domain] = rec
}
pub struct AddMXRecordArgs {
MX_Record
pub:
sub_domain string = '@'
}
// add_mx adds an MX record to the set
pub fn (mut rs DNSRecordSet) add_mx(args MXRecord) {
rs.mx << MXRecord{
host: args.host
preference: args.preference
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_mx(args AddMXRecordArgs) {
mut rec := rs.records[args.sub_domain] or { Record{} }
if mut v := rec.mx {
v << args.MX_Record
} else {
rec.mx = [args.MX_Record]
}
rs.records[args.sub_domain] = rec
}
pub struct AddARecordArgs {
A_Record
pub:
sub_domain string = '@'
}
// add_a adds an A record to the set
pub fn (mut rs DNSRecordSet) add_a(args ARecord) {
rs.a << ARecord{
name: args.name
ip: args.ip
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_a(args AddARecordArgs) {
mut rec := rs.records[args.sub_domain] or { Record{} }
if mut v := rec.a {
v << args.A_Record
} else {
rec.a = [args.A_Record]
}
rs.records[args.sub_domain] = rec
}
pub struct AddAAAARecordArgs {
AAAA_Record
pub:
sub_domain string = '@'
}
// add_aaaa adds an AAAA record to the set
pub fn (mut rs DNSRecordSet) add_aaaa(args AAAARecord) {
rs.aaaa << AAAARecord{
name: args.name
ip: args.ip
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_aaaa(args AddAAAARecordArgs) {
mut rec := rs.records[args.sub_domain] or { Record{} }
if mut v := rec.aaaa {
v << args.AAAA_Record
} else {
rec.aaaa = [args.AAAA_Record]
}
rs.records[args.sub_domain] = rec
}
pub struct AddNSRecordArgs {
NS_Record
pub:
sub_domain string = '@'
}
// add_ns adds an NS record to the set
pub fn (mut rs DNSRecordSet) add_ns(args NSRecord) {
rs.ns << NSRecord{
host: args.host
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) add_ns(args AddNSRecordArgs) {
mut rec := rs.records[args.sub_domain] or { Record{} }
if mut v := rec.ns {
v << args.NS_Record
} else {
rec.ns = [args.NS_Record]
}
rs.records[args.sub_domain] = rec
}
// set_soa sets the SOA record for the set
pub fn (mut rs DNSRecordSet) set_soa(args SOARecord) {
rs.soa = SOARecord{
mbox: args.mbox
ns: args.ns
refresh: args.refresh
retry: args.retry
expire: args.expire
minttl: args.minttl
ttl: args.ttl
}
pub fn (mut rs DNSRecordSet) set_soa(args SOA_Record) {
mut rec := rs.records['@'] or { Record{} }
rec.soa = args
rs.records['@'] = rec
}
pub struct SetArgs {
pub:
domain string
key_prefix string
}
// populate_redis populates Redis with the DNS records
//domain e.g. example.com. (not sure the . is at end)
pub fn (rs DNSRecordSet) set(domain string) ! {
mut redis := rs.redis or {redisclient.core_get()!}
// domain e.g. example.com. (not sure the . is at end)
pub fn (mut rs DNSRecordSet) set(args SetArgs) ! {
mut redis := rs.redis or {
r := redisclient.core_get()!
rs.redis = r
r
}
// Store SRV records
for srv in rs.srv {
key := '_ssh._tcp.host1'
value := json.encode({
'srv': {
'ttl': srv.ttl
'target': srv.target
'port': srv.port
'priority': srv.priority
'weight': srv.weight
}
})
redis.hset(domain, key, value)!
}
// Store TXT and MX records for wildcard
if rs.txt.len > 0 || rs.mx.len > 0 {
mut records := map[string]map[string]json.Any{}
if rs.txt.len > 0 {
records['txt'] = {
'text': rs.txt[0].text
'ttl': "${rs.txt[0].ttl}"
}
}
if rs.mx.len > 0 {
records['mx'] = {
'host': rs.mx[0].host
'priority': rs.mx[0].preference
'ttl': rs.mx[0].ttl
}
}
redis.hset(domain, '*', json.encode(records))!
}
// Store A records
for a in rs.a {
value := json.encode({
'a': {
'ip4': a.ip
'ttl': "${a.ttl}"
}
})
redis.hset(domain, a.name, value)!
}
// Store AAAA records
for aaaa in rs.aaaa {
value := json.encode({
'aaaa': {
'ip6': aaaa.ip
'ttl': aaaa.ttl
}
})
redis.hset(domain, aaaa.name, value)!
}
// Store NS records
if rs.ns.len > 0 {
mut ns_records := []map[string]json.Any{}
for ns in rs.ns {
ns_records << {
'host': ns.host
'ttl': ns.ttl
}
}
value := json.encode({
'ns': ns_records
})
redis.hset(domain, 'subdel', value)!
}
// Store SOA and root NS records at @
if soa := rs.soa {
mut root_records := map[string]json.Any{}
root_records['soa'] = {
'ttl': soa.ttl
'minttl': soa.minttl
'mbox': soa.mbox
'ns': soa.ns
'refresh': soa.refresh
'retry': soa.retry
'expire': soa.expire
}
if rs.ns.len > 0 {
mut ns_records := []map[string]json.Any{}
for ns in rs.ns {
ns_records << {
'host': ns.host
'ttl': ns.ttl
}
}
root_records['ns'] = ns_records
}
redis.hset(domain, '@', json.encode(root_records))!
}
key := '${args.key_prefix}${args.domain}.'
for field, val in rs.records {
redis.hset(key, field, json2.encode(val))!
}
}
pub fn (mut rs DNSRecordSet) example() ! {
// Create and populate DNS records
rs.set_soa(mbox: 'hostmaster.example.net.', ns: 'ns1.example.net.')
rs.add_srv(target: 'tcp.example.com.', port: 123)
rs.add_txt(text: 'this is a wildcard')
rs.add_mx(host: 'host1.example.net.')
rs.add_a(name: 'host1', ip: '5.5.5.5')
rs.add_aaaa(name: 'host1', ip: '2001:db8::1')
rs.add_txt(text: 'this is not a wildcard')
rs.add_ns(host: 'ns1.subdel.example.net.')
rs.add_ns(host: 'ns2.subdel.example.net.')
rs.add_ns(host: 'ns1.example.net.')
rs.add_ns(host: 'ns2.example.net.')
// Store records in Redis
rs.set("example.com")!
}
// Create and populate DNS records
rs.set_soa(mbox: 'hostmaster.example.net.', ns: 'ns1.example.net.')
rs.add_srv(service: 'ssh', protocol: 'tcp', host: 'host1', target: 'tcp.example.com.', port: 123)
rs.add_txt(sub_domain: '*', text: 'this is a wildcard')
rs.add_mx(sub_domain: '*', host: 'host1.example.net.')
rs.add_a(sub_domain: 'host1', ip: '5.5.5.5')
rs.add_aaaa(sub_domain: 'host1', ip: '2001:db8::1')
rs.add_txt(sub_domain: 'sub.*', text: 'this is not a wildcard')
rs.add_ns(sub_domain: 'subdel', host: 'ns1.subdel.example.net.')
rs.add_ns(sub_domain: 'subdel', host: 'ns2.subdel.example.net.')
rs.add_ns(host: 'ns1.example.net.')
rs.add_ns(host: 'ns2.example.net.')
// Store records in Redis
rs.set(domain: 'example.com')!
}

View File

@@ -110,10 +110,10 @@ pub fn ipaddr_pub_get() !string {
return public_ip
}
//also check the address is on local interface
// also check the address is on local interface
pub fn ipaddr_pub_get_check() !string {
// Check if the public IP matches any local interface
public_ip := ipaddr_pub_get_check()!
public_ip := ipaddr_pub_get()!
if !is_ip_on_local_interface(public_ip)! {
return error('Public IP ${public_ip} is NOT bound to any local interface (possibly behind a NAT firewall).')
}
@@ -123,7 +123,7 @@ pub fn ipaddr_pub_get_check() !string {
// Check if the public IP matches any of the local network interfaces
pub fn is_ip_on_local_interface(public_ip string) !bool {
interfaces := exec(cmd: 'ip addr show', stdout: false) or {
return error('Failed to enumerate network interfaces.')
return error('Failed to enumerate network interfaces: ${err}')
}
lines := interfaces.output.split('\n')

View File

@@ -8,15 +8,15 @@ pub mut:
name string @[required] // Name of the router
rule string @[required] // Routing rule (e.g., "Host(`example.com`)")
service string @[required] // Name of the service to forward to
middlewares []string // List of middleware names to apply
priority int = 0 // Route priority
tls bool // Enable TLS for this router
middlewares []string // List of middleware names to apply
priority int = 0 // Route priority
tls bool // Enable TLS for this router
}
@[params]
struct ServiceConfig {
pub mut:
name string @[required] // Name of the service
name string @[required] // Name of the service
load_balancer LoadBalancerConfig @[required] // Load balancer configuration
}
@@ -35,17 +35,17 @@ pub mut:
@[params]
struct MiddlewareConfig {
pub mut:
name string @[required] // Name of the middleware
typ string @[required] // Type of middleware (e.g., "basicAuth", "stripPrefix")
name string @[required] // Name of the middleware
typ string @[required] // Type of middleware (e.g., "basicAuth", "stripPrefix")
settings map[string]string // Middleware-specific settings
}
@[params]
struct TLSConfig {
pub mut:
domain string @[required] // Domain for the certificate
cert_file string @[required] // Path to certificate file
key_file string @[required] // Path to private key file
domain string @[required] // Domain for the certificate
cert_file string @[required] // Path to certificate file
key_file string @[required] // Path to private key file
}
// TraefikConfig represents a complete Traefik configuration

View File

@@ -6,29 +6,29 @@ import freeflowuniverse.herolib.core.redisclient
// new_traefik_config creates a new TraefikConfig
pub fn new_traefik_config() TraefikConfig {
return TraefikConfig{
routers: []RouteConfig{}
services: []ServiceConfig{}
routers: []RouteConfig{}
services: []ServiceConfig{}
middlewares: []MiddlewareConfig{}
tls: []TLSConfig{}
tls: []TLSConfig{}
}
}
// add_route adds a route configuration
pub fn (mut tc TraefikConfig) add_route(args RouteConfig) {
tc.routers << RouteConfig{
name: args.name
rule: args.rule
service: args.service
name: args.name
rule: args.rule
service: args.service
middlewares: args.middlewares
priority: args.priority
tls: args.tls
priority: args.priority
tls: args.tls
}
}
// add_service adds a service configuration
pub fn (mut tc TraefikConfig) add_service(args ServiceConfig) {
tc.services << ServiceConfig{
name: args.name
name: args.name
load_balancer: args.load_balancer
}
}
@@ -36,8 +36,8 @@ pub fn (mut tc TraefikConfig) add_service(args ServiceConfig) {
// add_middleware adds a middleware configuration
pub fn (mut tc TraefikConfig) add_middleware(args MiddlewareConfig) {
tc.middlewares << MiddlewareConfig{
name: args.name
typ: args.typ
name: args.name
typ: args.typ
settings: args.settings
}
}
@@ -45,9 +45,9 @@ pub fn (mut tc TraefikConfig) add_middleware(args MiddlewareConfig) {
// add_tls adds a TLS configuration
pub fn (mut tc TraefikConfig) add_tls(args TLSConfig) {
tc.tls << TLSConfig{
domain: args.domain
domain: args.domain
cert_file: args.cert_file
key_file: args.key_file
key_file: args.key_file
}
}
@@ -58,23 +58,23 @@ pub fn (tc TraefikConfig) set() ! {
// Store router configurations
for router in tc.routers {
base_key := 'traefik/http/routers/${router.name}'
// Set router rule
redis.set('${base_key}/rule', router.rule)!
// Set service
redis.set('${base_key}/service', router.service)!
// Set middlewares if any
if router.middlewares.len > 0 {
redis.set('${base_key}/middlewares', json.encode(router.middlewares))!
}
// Set priority if non-zero
if router.priority != 0 {
redis.set('${base_key}/priority', router.priority.str())!
}
// Set TLS if enabled
if router.tls {
redis.set('${base_key}/tls', 'true')!
@@ -84,11 +84,13 @@ pub fn (tc TraefikConfig) set() ! {
// Store service configurations
for service in tc.services {
base_key := 'traefik/http/services/${service.name}'
// Set load balancer servers
mut servers := []map[string]string{}
for server in service.load_balancer.servers {
servers << {'url': server.url}
servers << {
'url': server.url
}
}
redis.set('${base_key}/loadbalancer/servers', json.encode(servers))!
}
@@ -96,7 +98,7 @@ pub fn (tc TraefikConfig) set() ! {
// Store middleware configurations
for middleware in tc.middlewares {
base_key := 'traefik/http/middlewares/${middleware.name}'
// Set middleware type
redis.set('${base_key}/${middleware.typ}', json.encode(middleware.settings))!
}
@@ -106,7 +108,7 @@ pub fn (tc TraefikConfig) set() ! {
base_key := 'traefik/tls/certificates'
cert_config := {
'certFile': tls.cert_file
'keyFile': tls.key_file
'keyFile': tls.key_file
}
redis.hset(base_key, tls.domain, json.encode(cert_config))!
}
@@ -116,38 +118,42 @@ pub fn (tc TraefikConfig) set() ! {
pub fn (mut tc TraefikConfig) example() ! {
// Add a basic router with service
tc.add_route(
name: 'my-router'
rule: 'Host(`example.com`)'
service: 'my-service'
name: 'my-router'
rule: 'Host(`example.com`)'
service: 'my-service'
middlewares: ['auth']
tls: true
tls: true
)
// Add the corresponding service
tc.add_service(
name: 'my-service'
name: 'my-service'
load_balancer: LoadBalancerConfig{
servers: [
ServerConfig{url: 'http://localhost:8080'},
ServerConfig{url: 'http://localhost:8081'}
ServerConfig{
url: 'http://localhost:8080'
},
ServerConfig{
url: 'http://localhost:8081'
},
]
}
)
// Add a basic auth middleware
tc.add_middleware(
name: 'auth'
typ: 'basicAuth'
name: 'auth'
typ: 'basicAuth'
settings: {
'users': '["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"]'
'users': '["test:${apr1}${H6uskkkW}${IgXLP6ewTrSuBkTrqE8wj}/"]'
}
)
// Add TLS configuration
tc.add_tls(
domain: 'example.com'
domain: 'example.com'
cert_file: '/path/to/cert.pem'
key_file: '/path/to/key.pem'
key_file: '/path/to/key.pem'
)
// Store configuration in Redis

View File

@@ -0,0 +1,38 @@
module authentication
import log
// // Creates and updates, authenticates email authentication sessions
// @[noinit]
// struct MemoryBackend {
// mut:
// sessions map[string]AuthSession
// logger &log.Logger = &log.Logger(&log.Log{
// level: .info
// })
// }
// // factory for
// pub fn new_memory_backend() !MemoryBackend {
// return MemoryBackend{}
// }
// fn (mut backend MemoryBackend) create_auth_session(session AuthSession) ! {
// backend.sessions[session.email] = session
// }
// fn (backend MemoryBackend) read_auth_session(email string) ?AuthSession {
// return backend.sessions[email] or { return none }
// }
// fn (mut backend MemoryBackend) update_auth_session(session AuthSession) ! {
// backend.sessions[session.email] = session
// }
// fn (mut backend MemoryBackend) set_session_authenticated(email string) ! {
// backend.sessions[email].authenticated = true
// }
// fn (mut backend MemoryBackend) delete_auth_session(email string) ! {
// backend.sessions.delete(email)
// }

View File

@@ -42,8 +42,8 @@ pub mut:
base_url string @[json: 'baseUrl']
image string
metadata MainMetadata
build_dest string @[json: 'buildDest']
build_dest_dev string @[json: 'buildDestDev']
build_dest []string @[json: 'buildDest']
build_dest_dev []string @[json: 'buildDestDev']
}
// Navbar config structures
@@ -80,8 +80,37 @@ pub fn load_config(cfg_dir string) !Config {
footer := json.decode(Footer, footer_content)!
// Load and parse main config
main_content := os.read_file(os.join_path(cfg_dir, 'main.json'))!
main := json.decode(Main, main_content)!
main_config_path := os.join_path(cfg_dir, 'main.json')
main_content := os.read_file(main_config_path)!
main := json.decode(Main, main_content) or {
eprintln('${main_config_path} is not in the right format please fix.')
println('
## EXAMPLE OF A GOOD ONE:
- note the list for buildDest and buildDestDev
- note its the full path where the html is pushed too
{
"title": "ThreeFold Web4",
"tagline": "ThreeFold Web4",
"favicon": "img/favicon.png",
"url": "https://docs.threefold.io",
"url_home": "docs/introduction",
"baseUrl": "/",
"image": "img/tf_graph.png",
"metadata": {
"description": "ThreeFold is laying the foundation for a geo aware Web 4, the next generation of the Internet.",
"image": "https://threefold.info/kristof/img/tf_graph.png",
"title": "ThreeFold Docs"
},
"buildDest":["root@info.ourworld.tf:/root/hero/www/info/tfgrid4"],
"buildDestDev":["root@info.ourworld.tf:/root/hero/www/infodev/tfgrid4"]
}
')
exit(99)
}
// Load and parse navbar config
navbar_content := os.read_file(os.join_path(cfg_dir, 'navbar.json'))!

View File

@@ -26,28 +26,16 @@ pub mut:
@[params]
pub struct DSiteNewArgs {
pub mut:
name string
nameshort string
path string
url string
// publish_path string
name string
nameshort string
path string
url string
publish_path string
build_path string
production bool
watch_changes bool = true
update bool
}
pub fn (mut f DocusaurusFactory) build_dev(args_ DSiteNewArgs) !&DocSite {
mut s := f.add(args_)!
s.generate()!
osal.exec(
cmd: '
cd ${s.path_build.path}
bash build_dev.sh
'
retry: 0
)!
return s
deploykey string
}
pub fn (mut f DocusaurusFactory) build(args_ DSiteNewArgs) !&DocSite {
@@ -63,6 +51,33 @@ pub fn (mut f DocusaurusFactory) build(args_ DSiteNewArgs) !&DocSite {
return s
}
pub fn (mut f DocusaurusFactory) build_dev_publish(args_ DSiteNewArgs) !&DocSite {
mut s := f.add(args_)!
s.generate()!
osal.exec(
cmd: '
cd ${s.path_build.path}
bash build_dev_publish.sh
'
retry: 0
)!
return s
}
pub fn (mut f DocusaurusFactory) build_publish(args_ DSiteNewArgs) !&DocSite {
mut s := f.add(args_)!
s.generate()!
osal.exec(
cmd: '
cd ${s.path_build.path}
bash build_publish.sh
'
retry: 0
)!
return s
}
pub fn (mut f DocusaurusFactory) dev(args_ DSiteNewArgs) !&DocSite {
mut s := f.add(args_)!
@@ -126,8 +141,10 @@ pub fn (mut f DocusaurusFactory) add(args_ DSiteNewArgs) !&DocSite {
// if args.publish_path.len == 0 {
// args.publish_path = '${f.path_publish.path}/${args.name}'
// coderoot:"${os.home_dir()}/hero/var/publishcode"
mut gs := gittools.new(ssh_key_path: args.deploykey)!
if args.url.len > 0 {
mut gs := gittools.new()!
args.path = gs.get_path(url: args.url)!
}
@@ -135,7 +152,6 @@ pub fn (mut f DocusaurusFactory) add(args_ DSiteNewArgs) !&DocSite {
return error("Can't get path from docusaurus site, its not specified.")
}
mut gs := gittools.new()!
mut r := gs.get_repo(
url: 'https://github.com/freeflowuniverse/docusaurus_template.git'
pull: args.update
@@ -204,6 +220,7 @@ pub fn (mut site DocSite) error(args ErrorArgs) {
pub fn (mut site DocSite) generate() ! {
console.print_header(' site generate: ${site.name} on ${site.path_build.path}')
console.print_header(' site source on ${site.path_src.path}')
site.template_install()!
// osal.exec(
// cmd: '
@@ -262,7 +279,8 @@ fn (mut site DocSite) template_install() ! {
develop := $tmpl('templates/develop.sh')
build := $tmpl('templates/build.sh')
build_dev := $tmpl('templates/build_dev.sh')
build_dev_publish := $tmpl('templates/build_dev_publish.sh')
build_publish := $tmpl('templates/build_publish.sh')
mut develop_ := site.path_build.file_get_new('develop.sh')!
develop_.template_write(develop, true)!
@@ -272,9 +290,13 @@ fn (mut site DocSite) template_install() ! {
build_.template_write(build, true)!
build_.chmod(0o700)!
mut build_dev_ := site.path_build.file_get_new('build_dev.sh')!
build_dev_.template_write(build_dev, true)!
build_dev_.chmod(0o700)!
mut build_publish_ := site.path_build.file_get_new('build_publish.sh')!
build_publish_.template_write(build_publish, true)!
build_publish_.chmod(0o700)!
mut build_dev_publish_ := site.path_build.file_get_new('build_dev_publish.sh')!
build_dev_publish_.template_write(build_dev_publish, true)!
build_dev_publish_.chmod(0o700)!
mut develop2_ := site.path_src.file_get_new('develop.sh')!
develop2_.template_write(develop, true)!
@@ -283,8 +305,4 @@ fn (mut site DocSite) template_install() ! {
mut build2_ := site.path_src.file_get_new('build.sh')!
build2_.template_write(build, true)!
build2_.chmod(0o700)!
mut build_dev2_ := site.path_src.file_get_new('build_dev.sh')!
build_dev2_.template_write(build_dev, true)!
build_dev2_.chmod(0o700)!
}

View File

@@ -1,9 +1,9 @@
#!/bin/bash
set -e
set -ex
script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)"
cd "??{script_dir}"
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
cd "???script_dir}"
echo "Docs directory: ??script_dir"
@@ -17,4 +17,6 @@ ${profile_include}
bun docusaurus build
rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest.trim_right("/")}/${cfg.main.name.trim_right("/")}/
mkdir -p ${site.args.publish_path.trim_right("/")}
echo SYNC TO ${site.args.publish_path.trim_right("/")}
rsync -rv --delete ${site.path_build.path}/build/ ${site.args.publish_path.trim_right("/")}/

View File

@@ -2,7 +2,7 @@
set -e
script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)"
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
cd "??{script_dir}"
@@ -18,4 +18,6 @@ ${profile_include}
bun docusaurus build
rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest_dev.trim_right("/")}/${cfg.main.name.trim_right("/")}/
@for dest in cfg.main.build_dest_dev
rsync -rv --delete ${site.path_build.path}/build/ ${dest.trim_right("/")}/
@end

View File

@@ -0,0 +1,22 @@
#!/bin/bash
set -ex
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
cd "??{script_dir}"
echo "Docs directory: ??script_dir"
cd ${site.path_build.path}
export PATH=/tmp/docusaurus_build/node_modules/.bin:??{HOME}/.bun/bin/:??PATH
rm -rf ${site.path_build.path}/build/
${profile_include}
bun docusaurus build
@for dest in cfg.main.build_dest
rsync -rv --delete ${site.path_build.path}/build/ ${dest.trim_right("/")}/
@end

View File

@@ -2,7 +2,7 @@
set -e
script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)"
script_dir="???cd "???dirname "??{BASH_SOURCE[0]}")" && pwd)"
cd "??{script_dir}"
echo "Docs directory: ??script_dir"

View File

@@ -0,0 +1,191 @@
#!/bin/bash
set -euo pipefail
###############################################################################
# WARNING: THIS SCRIPT ERASES DATA!
#
# This script will:
# 1. Identify all internal (nonremovable) SSD and NVMe disks, excluding the
# live USB from which Ubuntu is booted.
# 2. Wipe their partition tables.
# 3. On the first detected disk, create:
# - a 1GB EFI partition (formatted FAT32, flagged as ESP)
# - a ~19GB partition for Ubuntu root (formatted ext4)
# - if any space remains, a partition covering the rest (formatted btrfs)
# 4. On every other disk, create one partition spanning the entire disk and
# format it as btrfs.
#
# Doublecheck that you want to wipe all these disks BEFORE you run this script.
###############################################################################
# Ensure the script is run as root.
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root."
exit 1
fi
# Helper: given a device like /dev/sda1 or /dev/nvme0n1p1, return the base device.
get_base_device() {
local dev="$1"
if [[ "$dev" =~ ^/dev/nvme.*p[0-9]+$ ]]; then
# For NVMe devices, remove the trailing 'pX'
echo "$dev" | sed -E 's/p[0-9]+$//'
else
# For /dev/sdX type devices, remove trailing numbers
echo "$dev" | sed -E 's/[0-9]+$//'
fi
}
# Helper: given a disk (e.g. /dev/sda or /dev/nvme0n1) and a partition number,
# print the proper partition name.
get_partition_name() {
local disk="$1"
local partnum="$2"
if [[ "$disk" =~ nvme ]]; then
echo "${disk}p${partnum}"
else
echo "${disk}${partnum}"
fi
}
# Determine the boot device (i.e. the device from which the live system is running)
boot_dev_full=$(findmnt -n -o SOURCE /)
boot_disk=$(get_base_device "$boot_dev_full")
echo "Detected boot device (from /): $boot_dev_full"
echo "Base boot disk (will be used for Ubuntu install): $boot_disk"
# Now, enumerate candidate target disks.
# We will scan /sys/block for devices starting with "sd" or "nvme".
target_disks=()
# Loop over sd* and nvme* disks.
for dev_path in /sys/block/sd* /sys/block/nvme*; do
[ -e "$dev_path" ] || continue
disk_name=$(basename "$dev_path")
disk="/dev/$disk_name"
# Skip removable devices (e.g. USB sticks)
if [ "$(cat "$dev_path/removable")" -ne 0 ]; then
continue
fi
# Skip disks that are rotational (i.e. likely HDD) if you want only SSD/NVMe.
# (Usually SSD/NVMe have rotational=0.)
if [ -f "$dev_path/queue/rotational" ]; then
if [ "$(cat "$dev_path/queue/rotational")" -ne 0 ]; then
continue
fi
fi
# Add disk to list.
target_disks+=("$disk")
done
# Ensure the boot disk is in our list. (It will be partitioned for Ubuntu.)
if [[ ! " ${target_disks[@]} " =~ " ${boot_disk} " ]]; then
# Check if boot_disk qualifies (nonremovable and nonrotational)
disk_dir="/sys/block/$(basename "$boot_disk")"
if [ -f "$disk_dir/removable" ] && [ "$(cat "$disk_dir/removable")" -eq 0 ]; then
if [ -f "$disk_dir/queue/rotational" ] && [ "$(cat "$disk_dir/queue/rotational")" -eq 0 ]; then
target_disks=("$boot_disk" "${target_disks[@]}")
fi
fi
fi
if [ "${#target_disks[@]}" -eq 0 ]; then
echo "No qualifying internal SSD/NVMe disks found."
exit 1
fi
echo
echo "The following disks will be wiped and re-partitioned:"
for disk in "${target_disks[@]}"; do
echo " $disk"
done
echo
read -p "ARE YOU SURE YOU WANT TO PROCEED? This will permanently erase all data on these disks (type 'yes' to continue): " answer
if [ "$answer" != "yes" ]; then
echo "Aborting."
exit 1
fi
###############################################################################
# Wipe all target disks.
###############################################################################
for disk in "${target_disks[@]}"; do
echo "Wiping partition table on $disk..."
sgdisk --zap-all "$disk"
# Overwrite beginning of disk (optional but recommended)
dd if=/dev/zero of="$disk" bs=512 count=2048 status=none
# Overwrite end of disk (ignoring errors if size is too small)
total_sectors=$(blockdev --getsz "$disk")
dd if=/dev/zero of="$disk" bs=512 count=2048 seek=$(( total_sectors - 2048 )) status=none 2>/dev/null || true
done
###############################################################################
# Partition the FIRST disk for Ubuntu installation.
###############################################################################
boot_install_disk="${target_disks[0]}"
echo
echo "Partitioning boot/install disk: $boot_install_disk"
parted -s "$boot_install_disk" mklabel gpt
# Create EFI partition: from 1MiB to 1025MiB (~1GB).
parted -s "$boot_install_disk" mkpart ESP fat32 1MiB 1025MiB
parted -s "$boot_install_disk" set 1 esp on
# Create root partition: from 1025MiB to 21025MiB (~20GB total for install).
parted -s "$boot_install_disk" mkpart primary ext4 1025MiB 21025MiB
# Determine if theres any space left.
disk_size_bytes=$(blockdev --getsize64 "$boot_install_disk")
# Calculate 21025MiB in bytes.
min_install_bytes=$((21025 * 1024 * 1024))
if [ "$disk_size_bytes" -gt "$min_install_bytes" ]; then
echo "Creating additional partition on $boot_install_disk for btrfs (using remaining space)..."
parted -s "$boot_install_disk" mkpart primary btrfs 21025MiB 100%
boot_disk_partitions=(1 2 3)
else
boot_disk_partitions=(1 2)
fi
# Format the partitions on the boot/install disk.
efi_part=$(get_partition_name "$boot_install_disk" 1)
root_part=$(get_partition_name "$boot_install_disk" 2)
echo "Formatting EFI partition ($efi_part) as FAT32..."
mkfs.fat -F32 "$efi_part"
echo "Formatting root partition ($root_part) as ext4..."
mkfs.ext4 -F "$root_part"
# If a third partition exists, format it as btrfs.
if [ "${boot_disk_partitions[2]:-}" ]; then
btrfs_part=$(get_partition_name "$boot_install_disk" 3)
echo "Formatting extra partition ($btrfs_part) as btrfs..."
mkfs.btrfs -f "$btrfs_part"
fi
###############################################################################
# Partition all OTHER target disks entirely as btrfs.
###############################################################################
if [ "${#target_disks[@]}" -gt 1 ]; then
echo
echo "Partitioning remaining disks for btrfs:"
for disk in "${target_disks[@]:1}"; do
echo "Processing disk $disk..."
parted -s "$disk" mklabel gpt
parted -s "$disk" mkpart primary btrfs 1MiB 100%
# Determine the partition name (e.g. /dev/sdb1 or /dev/nvme0n1p1).
if [[ "$disk" =~ nvme ]]; then
part="${disk}p1"
else
part="${disk}1"
fi
echo "Formatting $part as btrfs..."
mkfs.btrfs -f "$part"
done
fi
echo
echo "All operations complete. Ubuntu install partitions and btrfs volumes have been created."