Merge pull request #219 from Incubaid/development_hetzner

feat: Improve Ubuntu installation and SSH execution
This commit is contained in:
Omdanii
2025-12-01 13:03:24 +02:00
committed by GitHub
55 changed files with 3001 additions and 2339 deletions

View File

@@ -40,4 +40,3 @@ RUN /tmp/install_herolib.vsh && \
ENTRYPOINT ["/bin/bash"]
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
export HETZNER_USER="#ws+JdQtGCdL"
export HETZNER_PASSWORD="Kds007kds!"
export HETZNER_SSHKEY_NAME="mahmoud"

View File

@@ -1,35 +1,34 @@
#!/usr/bin/env hero
// # Configure HetznerManager, replace with your own credentials, server id's and ssh key name and all other parameters
// !!hetznermanager.configure
// name:"main"
// user:"krist"
// whitelist:"2111181, 2392178, 2545053, 2542166, 2550508, 2550378,2550253"
// password:"wontsethere"
// sshkey:"kristof"
!!hetznermanager.configure
user:"user_name"
whitelist:"server_id"
password:"password"
sshkey:"ssh_key_name"
// !!hetznermanager.server_rescue
// server_name: 'kristof21' // The name of the server to manage (or use `id`)
// wait: true // Wait for the operation to complete
// hero_install: true // Automatically install Herolib in the rescue system
!!hetznermanager.server_rescue
server_name: 'server_name' // The name of the server to manage (or use `id`)
wait: true // Wait for the operation to complete
hero_install: true // Automatically install Herolib in the rescue system
// # Reset a server
// !!hetznermanager.server_reset
// instance: 'main'
// server_name: 'your-server-name'
// wait: true
!!hetznermanager.server_reset
instance: 'main'
server_name: 'server_name'
wait: true
// # Add a new SSH key to your Hetzner account
// !!hetznermanager.key_create
// instance: 'main'
// key_name: 'my-laptop-key'
// data: 'ssh-rsa AAAA...'
!!hetznermanager.key_create
instance: 'main'
key_name: 'ssh_key_name'
data: 'ssh-rsa AAAA...'
// Install Ubuntu 24.04 on a server
!!hetznermanager.ubuntu_install
server_name: 'kristof2'
server_name: 'server_name'
wait: true
hero_install: true // Install Herolib on the new OS

View File

@@ -8,23 +8,33 @@ import time
import os
import incubaid.herolib.core.playcmds
name := 'kristof1'
// Server-specific configuration
const server_name = 'kristof1'
const server_whitelist = '2521602'
user := os.environ()['HETZNER_USER'] or {
// Load credentials from environment variables
// Source hetzner_env.sh before running: source examples/virt/hetzner/hetzner_env.sh
hetzner_user := os.environ()['HETZNER_USER'] or {
println('HETZNER_USER not set')
exit(1)
}
passwd := os.environ()['HETZNER_PASSWORD'] or {
hetzner_passwd := os.environ()['HETZNER_PASSWORD'] or {
println('HETZNER_PASSWORD not set')
exit(1)
}
hetzner_sshkey_name := os.environ()['HETZNER_SSHKEY_NAME'] or {
println('HETZNER_SSHKEY_NAME not set')
exit(1)
}
hs := '
!!hetznermanager.configure
user:"${user}"
whitelist:"2521602,2555487,2573047"
password:"${passwd}"
sshkey:"kristof"
user:"${hetzner_user}"
whitelist:"${server_whitelist}"
password:"${hetzner_passwd}"
sshkey:"${hetzner_sshkey_name}"
'
println(hs)
@@ -42,7 +52,7 @@ mut cl := hetznermanager.get()!
println(cl.servers_list()!)
mut serverinfo := cl.server_info_get(name: name)!
mut serverinfo := cl.server_info_get(name: server_name)!
println(serverinfo)
@@ -55,7 +65,7 @@ println(serverinfo)
// console.print_header('SSH login')
cl.ubuntu_install(name: name, wait: true, hero_install: true)!
cl.ubuntu_install(name: server_name, wait: true, hero_install: true)!
// cl.ubuntu_install(name: 'kristof20', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550378, name: 'kristof21', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550508, name: 'kristof22', wait: true, hero_install: true)!

View File

@@ -8,62 +8,47 @@ import time
import os
import incubaid.herolib.core.playcmds
name := 'kristof2'
// Server-specific configuration
const server_name = 'kristof2'
const server_whitelist = '2555487'
user := os.environ()['HETZNER_USER'] or {
// Load credentials from environment variables
// Source hetzner_env.sh before running: source examples/virt/hetzner/hetzner_env.sh
hetzner_user := os.environ()['HETZNER_USER'] or {
println('HETZNER_USER not set')
exit(1)
}
passwd := os.environ()['HETZNER_PASSWORD'] or {
hetzner_passwd := os.environ()['HETZNER_PASSWORD'] or {
println('HETZNER_PASSWORD not set')
exit(1)
}
hs := '
hetzner_sshkey_name := os.environ()['HETZNER_SSHKEY_NAME'] or {
println('HETZNER_SSHKEY_NAME not set')
exit(1)
}
hero_script := '
!!hetznermanager.configure
user:"${user}"
whitelist:"2521602,2555487"
password:"${passwd}"
sshkey:"kristof"
user:"${hetzner_user}"
whitelist:"${server_whitelist}"
password:"${hetzner_passwd}"
sshkey:"${hetzner_sshkey_name}"
'
println(hs)
playcmds.run(heroscript: hero_script)!
mut hetznermanager_ := hetznermanager.get()!
playcmds.run(heroscript: hs)!
mut serverinfo := hetznermanager_.server_info_get(name: server_name)!
console.print_header('Hetzner Test.')
println('${server_name} ${serverinfo.server_ip}')
mut cl := hetznermanager.get()!
// println(cl)
hetznermanager_.server_rescue(name: server_name, wait: true, hero_install: true)!
mut keys := hetznermanager_.keys_get()!
// for i in 0 .. 5 {
// println('test cache, first time slow then fast')
// }
println(cl.servers_list()!)
mut serverinfo := cl.server_info_get(name: name)!
println(serverinfo)
// cl.server_reset(name: 'kristof2', wait: true)!
cl.server_rescue(name: name, wait: true, hero_install: true)!
mut ks := cl.keys_get()!
println(ks)
console.print_header('SSH login')
mut b := builder.new()!
mut n := b.node_new(ipaddr: serverinfo.server_ip)!
// this will put hero in debug mode on the system
// n.hero_install(compile: true)!
hetznermanager_.ubuntu_install(name: server_name, wait: true, hero_install: true)!
n.shell('')!
cl.ubuntu_install(name: name, wait: true, hero_install: true)!
// cl.ubuntu_install(name: 'kristof20', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550378, name: 'kristof21', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550508, name: 'kristof22', wait: true, hero_install: true)!
// cl.ubuntu_install(id: 2550253, name: 'kristof23', wait: true, hero_install: true)!

View File

@@ -8,23 +8,33 @@ import time
import os
import incubaid.herolib.core.playcmds
name := 'kristof3'
// Server-specific configuration
const server_name = 'kristof3'
const server_whitelist = '2573047'
user := os.environ()['HETZNER_USER'] or {
// Load credentials from environment variables
// Source hetzner_env.sh before running: source examples/virt/hetzner/hetzner_env.sh
hetzner_user := os.environ()['HETZNER_USER'] or {
println('HETZNER_USER not set')
exit(1)
}
passwd := os.environ()['HETZNER_PASSWORD'] or {
hetzner_passwd := os.environ()['HETZNER_PASSWORD'] or {
println('HETZNER_PASSWORD not set')
exit(1)
}
hetzner_sshkey_name := os.environ()['HETZNER_SSHKEY_NAME'] or {
println('HETZNER_SSHKEY_NAME not set')
exit(1)
}
hs := '
!!hetznermanager.configure
user:"${user}"
whitelist:"2521602,2555487,2573047"
password:"${passwd}"
sshkey:"kristof"
user:"${hetzner_user}"
whitelist:"${server_whitelist}"
password:"${hetzner_passwd}"
sshkey:"${hetzner_sshkey_name}"
'
println(hs)
@@ -42,7 +52,7 @@ mut cl := hetznermanager.get()!
println(cl.servers_list()!)
mut serverinfo := cl.server_info_get(name: name)!
mut serverinfo := cl.server_info_get(name: server_name)!
println(serverinfo)
@@ -55,7 +65,7 @@ println(serverinfo)
// console.print_header('SSH login')
cl.ubuntu_install(name: name, wait: true, hero_install: true)!
cl.ubuntu_install(name: server_name, wait: true, hero_install: true)!
// cl.ubuntu_install(name: 'kristof20', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550378, name: 'kristof21', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550508, name: 'kristof22', wait: true, hero_install: true)!

View File

@@ -8,23 +8,33 @@ import time
import os
import incubaid.herolib.core.playcmds
name := 'test1'
// Server-specific configuration
const server_name = 'test1'
const server_whitelist = '2575034'
user := os.environ()['HETZNER_USER'] or {
// Load credentials from environment variables
// Source hetzner_env.sh before running: source examples/virt/hetzner/hetzner_env.sh
hetzner_user := os.environ()['HETZNER_USER'] or {
println('HETZNER_USER not set')
exit(1)
}
passwd := os.environ()['HETZNER_PASSWORD'] or {
hetzner_passwd := os.environ()['HETZNER_PASSWORD'] or {
println('HETZNER_PASSWORD not set')
exit(1)
}
hetzner_sshkey_name := os.environ()['HETZNER_SSHKEY_NAME'] or {
println('HETZNER_SSHKEY_NAME not set')
exit(1)
}
hs := '
!!hetznermanager.configure
user:"${user}"
whitelist:"2575034"
password:"${passwd}"
sshkey:"kristof"
user:"${hetzner_user}"
whitelist:"${server_whitelist}"
password:"${hetzner_passwd}"
sshkey:"${hetzner_sshkey_name}"
'
println(hs)
@@ -42,7 +52,7 @@ mut cl := hetznermanager.get()!
println(cl.servers_list()!)
mut serverinfo := cl.server_info_get(name: name)!
mut serverinfo := cl.server_info_get(name: server_name)!
println(serverinfo)
@@ -55,7 +65,7 @@ println(serverinfo)
// console.print_header('SSH login')
cl.ubuntu_install(name: name, wait: true, hero_install: true)!
cl.ubuntu_install(name: server_name, wait: true, hero_install: true)!
// cl.ubuntu_install(name: 'kristof20', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550378, name: 'kristof21', wait: true, hero_install: true)!
// cl.ubuntu_install(id:2550508, name: 'kristof22', wait: true, hero_install: true)!

View File

@@ -1,22 +1,31 @@
# Hetzner Examples
## to get started
## Quick Start
This script is run from your own computer or a VM on which you develop.
### 1. Configure Environment Variables
Make sure you have hero_secrets loaded
Copy `hetzner_env.sh` and fill in your credentials:
```bash
hero git pull https://git.threefold.info/despiegk/hero_secrets
source ~/code/git.ourworld.tf/despiegk/hero_secrets/mysecrets.sh
export HETZNER_USER="your-robot-username" # Hetzner Robot API username
export HETZNER_PASSWORD="your-password" # Hetzner Robot API password
export HETZNER_SSHKEY_NAME="my-key" # Name of SSH key registered in Hetzner
```
## to e.g. install test1
Each script has its own server name and whitelist ID defined at the top.
```
~/code/github/incubaid/herolib/examples/virt/hetzner/hetzner_test1.vsh
### 2. Run a Script
```bash
source hetzner_env.sh
./hetzner_kristof2.vsh
```
keys available:
## SSH Keys
The `HETZNER_SSHKEY_NAME` must be the **name** of an SSH key already registered in your Hetzner Robot account.
Available keys in our Hetzner account:
- hossnys (RSA 2048)
- Jan De Landtsheer (ED25519 256)
@@ -24,17 +33,25 @@ keys available:
- kristof (ED25519 256)
- maxime (ED25519 256)
you can select another key in the script
To add a new key, use `key_create` in your script or the Hetzner Robot web interface.
> still to do, support our example key which is installed using mysecrets.sh
## Alternative: Using hero_secrets
## hetzner troubleshoot info
get the login passwd from:
https://robot.hetzner.com/preferences/index
You can also use the shared secrets repository:
```bash
curl -u "#ws+JdQtGCdL:..." https://robot-ws.your-server.de/server
hero git pull https://git.threefold.info/despiegk/hero_secrets
source ~/code/git.ourworld.tf/despiegk/hero_secrets/mysecrets.sh
```
## Troubleshooting
### Get Robot API credentials
Get your login credentials from: https://robot.hetzner.com/preferences/index
### Test API access
```bash
curl -u "your-username:your-password" https://robot-ws.your-server.de/server
```

View File

@@ -67,7 +67,9 @@ pub fn (mut node Node) hero_install(args HeroInstallArgs) ! {
todo << 'bash /tmp/install_v.sh --herolib '
}
}
node.exec_interactive(todo.join('\n'))!
// Use exec instead of exec_interactive since user interaction is not needed
// exec_interactive uses shell mode which replaces the process and never returns
node.exec(cmd: todo.join('\n'), stdout: true)!
}
@[params]

View File

@@ -99,8 +99,11 @@ pub fn (mut executor ExecutorLocal) download(args SyncArgs) ! {
}
pub fn (mut executor ExecutorLocal) shell(cmd string) ! {
// Note: os.execvp replaces the current process and never returns.
// This is intentional - shell() is designed to hand over control to the shell.
// Do not put shell() before any other code that needs to execute.
if cmd.len > 0 {
os.execvp('/bin/bash', ["-c '${cmd}'"])!
os.execvp('/bin/bash', ['-c', cmd])!
} else {
os.execvp('/bin/bash', [])!
}

View File

@@ -235,11 +235,12 @@ pub fn (mut executor ExecutorSSH) info() map[string]string {
// forwarding ssh traffic to certain container
pub fn (mut executor ExecutorSSH) shell(cmd string) ! {
mut args := ['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null',
'${executor.user}@${executor.ipaddr.addr}', '-p', '${executor.ipaddr.port}']
if cmd.len > 0 {
panic('TODO IMPLEMENT SHELL EXEC OVER SSH')
args << cmd
}
os.execvp('ssh', ['-o StrictHostKeyChecking=no', '${executor.user}@${executor.ipaddr.addr}',
'-p ${executor.ipaddr.port}'])!
os.execvp('ssh', args)!
}
pub fn (mut executor ExecutorSSH) list(path string) ![]string {

View File

@@ -20,6 +20,7 @@ import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import incubaid.herolib.installers.virt.podman
import incubaid.herolib.installers.virt.kubernetes_installer
import incubaid.herolib.installers.infra.gitea
import incubaid.herolib.builder
@@ -80,6 +81,7 @@ pub fn run(args_ PlayArgs) ! {
herolib.play(mut plbook)!
vlang.play(mut plbook)!
podman.play(mut plbook)!
kubernetes_installer.play(mut plbook)!
gitea.play(mut plbook)!
giteaclient.play(mut plbook)!

View File

@@ -1,177 +0,0 @@
module atlas
import incubaid.herolib.core.pathlib
import os
import json
const test_base = '/tmp/atlas_test'
// Test recursive export with chained cross-collection links
// Setup: Collection A links to B, Collection B links to C
// Expected: When exporting A, it should include pages from B and C
fn test_export_recursive_links() {
// Create 3 collections with chained links
col_a_path := '${test_base}/recursive_export/col_a'
col_b_path := '${test_base}/recursive_export/col_b'
col_c_path := '${test_base}/recursive_export/col_c'
os.mkdir_all(col_a_path)!
os.mkdir_all(col_b_path)!
os.mkdir_all(col_c_path)!
// Collection A: links to B
mut cfile_a := pathlib.get_file(path: '${col_a_path}/.collection', create: true)!
cfile_a.write('name:col_a')!
mut page_a := pathlib.get_file(path: '${col_a_path}/page_a.md', create: true)!
page_a.write('# Page A\\n\\nThis is page A.\\n\\n[Link to Page B](col_b:page_b)')!
// Collection B: links to C
mut cfile_b := pathlib.get_file(path: '${col_b_path}/.collection', create: true)!
cfile_b.write('name:col_b')!
mut page_b := pathlib.get_file(path: '${col_b_path}/page_b.md', create: true)!
page_b.write('# Page B\\n\\nThis is page B with link to C.\\n\\n[Link to Page C](col_c:page_c)')!
// Collection C: final page
mut cfile_c := pathlib.get_file(path: '${col_c_path}/.collection', create: true)!
cfile_c.write('name:col_c')!
mut page_c := pathlib.get_file(path: '${col_c_path}/page_c.md', create: true)!
page_c.write('# Page C\\n\\nThis is the final page in the chain.')!
// Create Atlas and add all collections
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_a_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_b_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_c_path)!)!
// Validate links before export to populate page.links
a.validate_links()!
// Export
export_path := '${test_base}/export_recursive'
a.export(destination: export_path)!
// ===== VERIFICATION PHASE =====
// 1. Verify directory structure exists
assert os.exists('${export_path}/content'), 'Export content directory should exist'
assert os.exists('${export_path}/content/col_a'), 'Collection col_a directory should exist'
assert os.exists('${export_path}/meta'), 'Export meta directory should exist'
// 2. Verify all pages exist in col_a export directory
// Note: Exported pages from other collections go to col_a directory
assert os.exists('${export_path}/content/col_a/page_a.md'), 'page_a.md should be exported'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'page_b.md from col_b should be included'
assert os.exists('${export_path}/content/col_a/page_c.md'), 'page_c.md from col_c should be included'
// 3. Verify page content is correct
content_a := os.read_file('${export_path}/content/col_a/page_a.md')!
assert content_a.contains('# Page A'), 'page_a content should have title'
assert content_a.contains('This is page A'), 'page_a content should have expected text'
assert content_a.contains('[Link to Page B]'), 'page_a should have link to page_b'
content_b := os.read_file('${export_path}/content/col_a/page_b.md')!
assert content_b.contains('# Page B'), 'page_b content should have title'
assert content_b.contains('This is page B'), 'page_b content should have expected text'
assert content_b.contains('[Link to Page C]'), 'page_b should have link to page_c'
content_c := os.read_file('${export_path}/content/col_a/page_c.md')!
assert content_c.contains('# Page C'), 'page_c content should have title'
assert content_c.contains('This is the final page'), 'page_c content should have expected text'
// 4. Verify metadata exists and is valid
assert os.exists('${export_path}/meta/col_a.json'), 'Metadata file for col_a should exist'
meta_content := os.read_file('${export_path}/meta/col_a.json')!
assert meta_content.len > 0, 'Metadata file should not be empty'
// // Parse metadata JSON and verify structure
// mut meta := json.decode(map[string]map[string]interface{}, meta_content) or {
// panic('Failed to parse metadata JSON: ${err}')
// }
// assert meta.len > 0, 'Metadata should have content'
// assert meta['name'] != none, 'Metadata should have name field'
// 5. Verify that pages from B and C are NOT exported to separate col_b and col_c directories
// (they should only be in col_a directory)
meta_col_b_exists := os.exists('${export_path}/meta/col_b.json')
meta_col_c_exists := os.exists('${export_path}/meta/col_c.json')
assert !meta_col_b_exists, 'col_b metadata should not exist (pages copied to col_a)'
assert !meta_col_c_exists, 'col_c metadata should not exist (pages copied to col_a)'
// 6. Verify the recursive depth worked
// All three pages should be accessible through the exported col_a
assert os.exists('${export_path}/content/col_a/page_a.md'), 'Level 1 page should exist'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'Level 2 page (via A->B) should exist'
assert os.exists('${export_path}/content/col_a/page_c.md'), 'Level 3 page (via A->B->C) should exist'
// 7. Verify that the link chain is properly documented
// page_a links to page_b, page_b links to page_c
// The links should be preserved in the exported content
page_a_content := os.read_file('${export_path}/content/col_a/page_a.md')!
page_b_content := os.read_file('${export_path}/content/col_a/page_b.md')!
page_c_content := os.read_file('${export_path}/content/col_a/page_c.md')!
// Links are preserved with collection:page format
assert page_a_content.contains('col_b:page_b') || page_a_content.contains('page_b'), 'page_a should reference page_b'
assert page_b_content.contains('col_c:page_c') || page_b_content.contains('page_c'), 'page_b should reference page_c'
println(' Recursive cross-collection export test passed')
println(' - All 3 pages exported to col_a directory (A -> B -> C)')
println(' - Content verified for all pages')
println(' - Metadata validated')
println(' - Link chain preserved')
}
// Test recursive export with cross-collection images
// Setup: Collection A links to image in Collection B
// Expected: Image should be copied to col_a export directory
fn test_export_recursive_with_images() {
col_a_path := '${test_base}/recursive_img/col_a'
col_b_path := '${test_base}/recursive_img/col_b'
os.mkdir_all(col_a_path)!
os.mkdir_all(col_b_path)!
os.mkdir_all('${col_a_path}/img')!
os.mkdir_all('${col_b_path}/img')!
// Collection A with local image
mut cfile_a := pathlib.get_file(path: '${col_a_path}/.collection', create: true)!
cfile_a.write('name:col_a')!
mut page_a := pathlib.get_file(path: '${col_a_path}/page_a.md', create: true)!
page_a.write('# Page A\\n\\n![Local Image](local.png)\\n\\n[Link to B](col_b:page_b)')!
// Create local image
os.write_file('${col_a_path}/img/local.png', 'fake png data')!
// Collection B with image and linked page
mut cfile_b := pathlib.get_file(path: '${col_b_path}/.collection', create: true)!
cfile_b.write('name:col_b')!
mut page_b := pathlib.get_file(path: '${col_b_path}/page_b.md', create: true)!
page_b.write('# Page B\\n\\n![B Image](b_image.jpg)')!
// Create image in collection B
os.write_file('${col_b_path}/img/b_image.jpg', 'fake jpg data')!
// Create Atlas
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_a_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_b_path)!)!
// Validate and export
a.validate_links()!
export_path := '${test_base}/export_recursive_img'
a.export(destination: export_path)!
// Verify pages exported
assert os.exists('${export_path}/content/col_a/page_a.md'), 'page_a should exist'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'page_b from col_b should be included'
// Verify images exported to col_a image directory
assert os.exists('${export_path}/content/col_a/img/local.png'), 'Local image should exist'
assert os.exists('${export_path}/content/col_a/img/b_image.jpg'), 'Image from cross-collection reference should be copied'
println(' Recursive cross-collection with images test passed')
}

View File

@@ -2,7 +2,6 @@ module atlas
import incubaid.herolib.core.pathlib
import os
import json
const test_base = '/tmp/atlas_test'
@@ -382,3 +381,48 @@ fn test_get_edit_url() {
// Assert the URLs are correct
// assert edit_url == 'https://github.com/test/repo/edit/main/test_page.md'
}
fn test_export_recursive_links() {
// Create 3 collections with chained links
col_a_path := '${test_base}/recursive_export/col_a'
col_b_path := '${test_base}/recursive_export/col_b'
col_c_path := '${test_base}/recursive_export/col_c'
os.mkdir_all(col_a_path)!
os.mkdir_all(col_b_path)!
os.mkdir_all(col_c_path)!
// Collection A
mut cfile_a := pathlib.get_file(path: '${col_a_path}/.collection', create: true)!
cfile_a.write('name:col_a')!
mut page_a := pathlib.get_file(path: '${col_a_path}/page_a.md', create: true)!
page_a.write('# Page A\n\n[Link to B](col_b:page_b)')!
// Collection B
mut cfile_b := pathlib.get_file(path: '${col_b_path}/.collection', create: true)!
cfile_b.write('name:col_b')!
mut page_b := pathlib.get_file(path: '${col_b_path}/page_b.md', create: true)!
page_b.write('# Page B\n\n[Link to C](col_c:page_c)')!
// Collection C
mut cfile_c := pathlib.get_file(path: '${col_c_path}/.collection', create: true)!
cfile_c.write('name:col_c')!
mut page_c := pathlib.get_file(path: '${col_c_path}/page_c.md', create: true)!
page_c.write('# Page C\n\nFinal content')!
// Export
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_a_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_b_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_c_path)!)!
export_path := '${test_base}/export_recursive'
a.export(destination: export_path)!
// Verify all pages were exported
assert os.exists('${export_path}/content/col_a/page_a.md')
assert os.exists('${export_path}/content/col_a/page_b.md') // From Collection B
assert os.exists('${export_path}/content/col_a/page_c.md') // From Collection C
// TODO: test not complete
}

View File

@@ -17,8 +17,8 @@ AtlasClient provides methods to:
```v
import incubaid.herolib.web.atlas_client
// Create client, exports will be in $/hero/var/atlas_export by default
mut client := atlas_client.new()!
// Create client
mut client := atlas_client.new(export_dir: '${os.home_dir()}/hero/var/atlas_export')!
// List collections
collections := client.list_collections()!

View File

@@ -247,6 +247,20 @@ pub fn (mut c AtlasClient) get_collection_metadata(collection_name string) !Coll
return metadata
}
// get_page_links returns the links found in a page by reading the metadata
pub fn (mut c AtlasClient) get_page_links(collection_name string, page_name string) ![]LinkMetadata {
// Get collection metadata
metadata := c.get_collection_metadata(collection_name)!
// Apply name normalization to page name
fixed_page_name := texttools.name_fix_no_ext(page_name)
// Find the page in metadata
if fixed_page_name in metadata.pages {
return metadata.pages[fixed_page_name].links
}
return error('page_not_found: Page "${page_name}" not found in collection metadata, for collection: "${collection_name}"')
}
// get_collection_errors returns the errors for a collection from metadata
pub fn (mut c AtlasClient) get_collection_errors(collection_name string) ![]ErrorMetadata {
metadata := c.get_collection_metadata(collection_name)!
@@ -259,30 +273,6 @@ pub fn (mut c AtlasClient) has_errors(collection_name string) bool {
return errors.len > 0
}
pub fn (mut c AtlasClient) copy_pages(collection_name string, page_name string, destination_path string) ! {
// Get page links from metadata
links := c.get_page_links(collection_name, page_name)!
// Create img subdirectory
mut img_dest := pathlib.get_dir(path: '${destination_path}', create: true)!
// Copy only image links
for link in links {
if link.file_type != .page {
continue
}
if link.status == .external {
continue
}
// Get image path and copy
img_path := c.get_page_path(link.target_collection_name, link.target_item_name)!
mut src := pathlib.get_file(path: img_path)!
src.copy(dest: '${img_dest.path}/${src.name_fix_keepext()}')!
console.print_debug(' ********. Copied page: ${src.path} to ${img_dest.path}/${src.name_fix_keepext()}')
}
}
pub fn (mut c AtlasClient) copy_images(collection_name string, page_name string, destination_path string) ! {
// Get page links from metadata
links := c.get_page_links(collection_name, page_name)!

View File

@@ -1,119 +0,0 @@
module client
import incubaid.herolib.core.pathlib
import incubaid.herolib.core.texttools
import incubaid.herolib.ui.console
import os
import json
import incubaid.herolib.core.redisclient
// get_page_links returns all links found in a page and pages linked to it (recursive)
// This includes transitive links through page-to-page references
// External links, files, and images do not recurse further
pub fn (mut c AtlasClient) get_page_links(collection_name string, page_name string) ![]LinkMetadata {
mut visited := map[string]bool{}
mut all_links := []LinkMetadata{}
c.collect_page_links_recursive(collection_name, page_name, mut visited, mut all_links)!
return all_links
}
// collect_page_links_recursive is the internal recursive implementation
// It traverses all linked pages and collects all links found
//
// Thread safety: Each call to get_page_links gets its own visited map
// Circular references are prevented by tracking visited pages
//
// Link types behavior:
// - .page links: Recursively traverse to get links from the target page
// - .file and .image links: Included in results but not recursively expanded
// - .external links: Included in results but not recursively expanded
fn (mut c AtlasClient) collect_page_links_recursive(collection_name string, page_name string, mut visited map[string]bool, mut all_links []LinkMetadata) ! {
// Create unique key for cycle detection
page_key := '${collection_name}:${page_name}'
// Prevent infinite loops on circular page references
// Example: Page A Page B Page A
if page_key in visited {
return
}
visited[page_key] = true
// Get collection metadata
metadata := c.get_collection_metadata(collection_name)!
fixed_page_name := texttools.name_fix_no_ext(page_name)
// Find the page in metadata
if fixed_page_name !in metadata.pages {
return error('page_not_found: Page "${page_name}" not found in collection metadata, for collection: "${collection_name}"')
}
page_meta := metadata.pages[fixed_page_name]
// Add all direct links from this page to the result
// This includes: pages, files, images, and external links
all_links << page_meta.links
// Recursively traverse only page-to-page links
for link in page_meta.links {
// Only recursively process links to other pages within the atlas
// Skip external links (http, https, mailto, etc.)
// Skip file and image links (these don't have "contained" links)
if link.file_type != .page || link.status == .external {
continue
}
// Recursively collect links from the target page
c.collect_page_links_recursive(link.target_collection_name, link.target_item_name, mut visited, mut all_links) or {
// If we encounter an error (e.g., target page doesn't exist in metadata),
// we continue processing other links rather than failing completely
// This provides graceful degradation for broken link references
continue
}
}
}
// get_image_links returns all image links found in a page and related pages (recursive)
// This is a convenience function that filters get_page_links to only image links
pub fn (mut c AtlasClient) get_image_links(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut image_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .image {
image_links << link
}
}
return image_links
}
// get_file_links returns all file links (non-image) found in a page and related pages (recursive)
// This is a convenience function that filters get_page_links to only file links
pub fn (mut c AtlasClient) get_file_links(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut file_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .file {
file_links << link
}
}
return file_links
}
// get_page_link_targets returns all page-to-page link targets found in a page and related pages
// This is a convenience function that filters get_page_links to only page links
pub fn (mut c AtlasClient) get_page_link_targets(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut page_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .page && link.status != .external {
page_links << link
}
}
return page_links
}

View File

@@ -7,7 +7,7 @@ import json
@[params]
pub struct ExportArgs {
pub mut:
destination string @[required]
destination string @[requireds]
reset bool = true
include bool = true
redis bool = true
@@ -90,44 +90,6 @@ pub fn (mut c Collection) export(args CollectionExportArgs) ! {
c.collect_cross_collection_references(mut page, mut cross_collection_pages, mut
cross_collection_files, mut processed_cross_pages)!
// println('------- ${c.name} ${page.key()}')
// if page.key() == 'geoaware:solution' && c.name == 'mycelium_nodes_tiers' {
// println(cross_collection_pages)
// println(cross_collection_files)
// // println(processed_cross_pages)
// $dbg;
// }
// copy the pages to the right exported path
for _, mut ref_page in cross_collection_pages {
mut src_file := ref_page.path()!
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}'
create: true
)!
mut dest_path := '${subdir_path.path}/${ref_page.name}.md'
src_file.copy(dest: dest_path)!
// println(dest_path)
// $dbg;
}
// copy the files to the right exported path
for _, mut ref_file in cross_collection_files {
mut src_file2 := ref_file.path()!
// Determine subdirectory based on file type
mut subdir := if ref_file.is_image() { 'img' } else { 'files' }
// Ensure subdirectory exists
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}/${subdir}'
create: true
)!
mut dest_path := '${subdir_path.path}/${ref_file.name}'
mut dest_file2 := pathlib.get_file(path: dest_path, create: true)!
src_file2.copy(dest: dest_file2.path)!
}
processed_local_pages[page.name] = true
// Redis operations...
@@ -155,6 +117,65 @@ pub fn (mut c Collection) export(args CollectionExportArgs) ! {
mut dest_file := pathlib.get_file(path: dest_path, create: true)!
src_file.copy(dest: dest_file.path)!
}
// Second pass: copy all collected cross-collection pages and process their links recursively
// Keep iterating until no new cross-collection references are found
for {
mut found_new_references := false
// Process all cross-collection pages we haven't processed yet
for page_key, mut ref_page in cross_collection_pages {
if page_key in processed_cross_pages {
continue // Already processed this page's links
}
// Mark as processed to avoid infinite loops
processed_cross_pages[page_key] = true
found_new_references = true
// Get the referenced page content with includes processed
ref_content := ref_page.content_with_fixed_links(
include: args.include
cross_collection: true
export_mode: true
)!
// Write the referenced page to this collection's directory
mut dest_file := pathlib.get_file(
path: '${col_dir.path}/${ref_page.name}.md'
create: true
)!
dest_file.write(ref_content)!
// CRITICAL: Recursively process links in this cross-collection page
// This ensures we get pages/files/images referenced by ref_page
c.collect_cross_collection_references(mut ref_page, mut cross_collection_pages, mut
cross_collection_files, mut processed_cross_pages)!
}
// If we didn't find any new references, we're done with the recursive pass
if !found_new_references {
break
}
}
// Third pass: copy ALL collected cross-collection referenced files/images
for _, mut ref_file in cross_collection_files {
mut src_file := ref_file.path()!
// Determine subdirectory based on file type
mut subdir := if ref_file.is_image() { 'img' } else { 'files' }
// Ensure subdirectory exists
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}/${subdir}'
create: true
)!
mut dest_path := '${subdir_path.path}/${ref_file.name}'
mut dest_file := pathlib.get_file(path: dest_path, create: true)!
src_file.copy(dest: dest_file.path)!
}
}
// Helper function to recursively collect cross-collection references
@@ -163,17 +184,6 @@ fn (mut c Collection) collect_cross_collection_references(mut page Page,
mut all_cross_pages map[string]&Page,
mut all_cross_files map[string]&File,
mut processed_pages map[string]bool) ! {
page_key := page.key()
// If we've already processed this page, skip it (prevents infinite loops with cycles)
if page_key in processed_pages {
return
}
// Mark this page as processed BEFORE recursing (prevents infinite loops with circular references)
processed_pages[page_key] = true
// Process all links in the current page
// Use cached links from validation (before transformation) to preserve collection info
for mut link in page.links {
if link.status != .found {
@@ -182,19 +192,15 @@ fn (mut c Collection) collect_cross_collection_references(mut page Page,
is_local := link.target_collection_name == c.name
// Collect cross-collection page references and recursively process them
// Collect cross-collection page references
if link.file_type == .page && !is_local {
page_ref := '${link.target_collection_name}:${link.target_item_name}'
page_key := '${link.target_collection_name}:${link.target_item_name}'
// Only add if not already collected
if page_ref !in all_cross_pages {
if page_key !in all_cross_pages {
mut target_page := link.target_page()!
all_cross_pages[page_ref] = target_page
// Recursively process the target page's links to find more cross-collection references
// This ensures we collect ALL transitive cross-collection page and file references
c.collect_cross_collection_references(mut target_page, mut all_cross_pages, mut
all_cross_files, mut processed_pages)!
all_cross_pages[page_key] = target_page
// Don't mark as processed yet - we'll do that when we actually process its links
}
}

View File

@@ -0,0 +1,15 @@
in atlas/
check format of groups
see content/groups
now the groups end with .group
check how the include works, so we can include another group in the group as defined, only works in same folder
in the scan function in atlas, now make scan_groups function, find groups, only do this for collection as named groups
do not add collection groups to atlas, this is a system collection
make the groups and add them to atlas
give clear instructions for coding agent how to write the code

View File

@@ -0,0 +1,4 @@
- first find all pages
- then for each page find all links

View File

@@ -33,7 +33,7 @@ put in .hero file and execute with hero or but shebang line on top of .hero scri
!!atlas.scan git_url:"https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/collections/tests"
!!atlas.export
!!atlas.export destination: '/tmp/atlas_export'
```

View File

@@ -22,8 +22,8 @@ pub mut:
recursive bool
pull bool
reload bool // means reload the info into the cache
script bool = true // run non interactive
reset bool = true // means we will lose changes (only relevant for clone, pull)
script bool // run non interactive
reset bool // means we will lose changes (only relevant for clone, pull)
}
// do group actions on repo
@@ -38,14 +38,12 @@ pub mut:
// url string
// pull bool
// reload bool //means reload the info into the cache
// script bool = true // run non interactive
// reset bool = true // means we will lose changes (only relevant for clone, pull)
// script bool // run non interactive
// reset bool// means we will lose changes (only relevant for clone, pull)
//```
pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string {
mut args := args_
console.print_debug('git do ${args.cmd}')
// println(args)
// $dbg;
if args.path.len > 0 && args.url.len > 0 {
panic('bug')
@@ -99,7 +97,9 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string {
provider: args.provider
)!
if repos.len < 4 || args.cmd in 'pull,push,commit,delete'.split(',') {
// println(repos.map(it.name))
if repos.len < 4 || args.cmd in 'pull,push,commit'.split(',') {
args.reload = true
}

View File

@@ -19,7 +19,7 @@ pub fn (mut repo GitRepo) status_update(args StatusUpdateArgs) ! {
}
if args.reset || repo.last_load == 0 {
// console.print_debug('${repo.name} : Cache get')
// console.print_debug('${repo.name} : Cache Get')
repo.cache_get()!
}
@@ -30,6 +30,8 @@ pub fn (mut repo GitRepo) status_update(args StatusUpdateArgs) ! {
// Decide if a full load is needed.
if args.reset || repo.last_load == 0
|| current_time - repo.last_load >= repo.config.remote_check_period {
// console.print_debug("reload ${repo.name}:\n args reset:${args.reset}\n lastload:${repo.last_load}\n currtime-lastload:${current_time- repo.last_load}\n period:${repo.config.remote_check_period}")
// $dbg;
repo.load_internal() or {
// Persist the error state to the cache
console.print_stderr('Failed to load repository ${repo.name} at ${repo.path()}: ${err}')
@@ -51,7 +53,8 @@ fn (mut repo GitRepo) load_internal() ! {
repo.exec('fetch --all') or {
repo.status.error = 'Failed to fetch updates: ${err}'
return error('Failed to fetch updates for ${repo.name} at ${repo.path()}: ${err}. Please check network connection and repository access.')
console.print_stderr('Failed to fetch updates for ${repo.name} at ${repo.path()}: ${err}. \nPlease check git repo source, network connection and repository access.')
return
}
repo.load_branches()!
repo.load_tags()!

View File

@@ -1,217 +0,0 @@
need to install following
#!/bin/bash
set -euo pipefail
EXTRA_ARGS=""
log_info() {
echo '[INFO] ' "$@"
}
log_fatal() {
echo '[ERROR] ' "$@" >&2
exit 1
}
source_env_file() {
local env_file="${1:-}"
if [ ! -f "$env_file" ]; then
log_fatal "Environment file not found: $env_file"
fi
set -a
source "$env_file"
set +a
}
check_root() {
if [ "$EUID" -ne 0 ]; then
log_fatal "This script must be run as root"
fi
}
install_deps() {
log_info "Updating package lists..."
if ! apt-get update -qq > /dev/null 2>&1; then
log_fatal "Failed to update package lists"
fi
if ! command -v curl &> /dev/null; then
log_info "Installing curl..."
apt-get install -y -qq curl > /dev/null 2>&1 || log_fatal "Failed to install curl"
fi
if ! command -v ip &> /dev/null; then
log_info "Installing iproute2 for ip command..."
apt-get install -y -qq iproute2 > /dev/null 2>&1 || log_fatal "Failed to install iproute2"
fi
if ! command -v k3s &> /dev/null; then
log_info "Installing k3s..."
if ! curl -fsSL -o /usr/local/bin/k3s https://github.com/k3s-io/k3s/releases/download/v1.33.1+k3s1/k3s 2>/dev/null; then
log_fatal "Failed to download k3s"
fi
chmod +x /usr/local/bin/k3s
fi
if ! command -v kubectl &> /dev/null; then
log_info "Installing kubectl..."
if ! curl -fsSL -o /usr/local/bin/kubectl https://dl.k8s.io/release/v1.33.1/bin/linux/amd64/kubectl 2>/dev/null; then
log_fatal "Failed to download kubectl"
fi
chmod +x /usr/local/bin/kubectl
fi
}
get_iface_ipv6() {
local iface="$1"
# Step 1: Find the next-hop for 400::/7
local route_line
route_line=$(ip -6 route | grep "^400::/7.*dev ${iface}" || true)
if [ -z "$route_line" ]; then
log_fatal "No 400::/7 route found via interface ${iface}"
fi
# Extract next-hop IPv6
local nexthop
nexthop=$(echo "$route_line" | awk '{for(i=1;i<=NF;i++) if ($i=="via") print $(i+1)}')
local prefix
prefix=$(echo "$nexthop" | cut -d':' -f1-4)
# Step 3: Get global IPv6 addresses and match subnet
local ipv6_list
ipv6_list=$(ip -6 addr show dev "$iface" scope global | awk '/inet6/ {print $2}' | cut -d'/' -f1)
local ip ip_prefix
for ip in $ipv6_list; do
ip_prefix=$(echo "$ip" | cut -d':' -f1-4)
if [ "$ip_prefix" = "$prefix" ]; then
echo "$ip"
return 0
fi
done
log_fatal "No global IPv6 address found on ${iface} matching prefix ${prefix}"
}
prepare_args() {
log_info "Preparing k3s arguments..."
if [ -z "${K3S_FLANNEL_IFACE:-}" ]; then
log_fatal "K3S_FLANNEL_IFACE not set, it should be your mycelium interface"
else
local ipv6
ipv6=$(get_iface_ipv6 "$K3S_FLANNEL_IFACE")
EXTRA_ARGS="$EXTRA_ARGS --node-ip=$ipv6"
fi
if [ -n "${K3S_DATA_DIR:-}" ]; then
log_info "k3s data-dir set to: $K3S_DATA_DIR"
if [ -d "/var/lib/rancher/k3s" ] && [ -n "$(ls -A /var/lib/rancher/k3s 2>/dev/null)" ]; then
cp -r /var/lib/rancher/k3s/* $K3S_DATA_DIR && rm -rf /var/lib/rancher/k3s
fi
EXTRA_ARGS="$EXTRA_ARGS --data-dir $K3S_DATA_DIR --kubelet-arg=root-dir=$K3S_DATA_DIR/kubelet"
fi
if [[ "${MASTER:-}" = "true" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --cluster-cidr=2001:cafe:42::/56"
EXTRA_ARGS="$EXTRA_ARGS --service-cidr=2001:cafe:43::/112"
EXTRA_ARGS="$EXTRA_ARGS --flannel-ipv6-masq"
fi
if [ -z "${K3S_URL:-}" ]; then
# Add additional SANs for planetary network IP, public IPv4, and public IPv6
# https://github.com/threefoldtech/tf-images/issues/98
local ifaces=( "tun0" "eth1" "eth2" )
for iface in "${ifaces[@]}"
do
# Check if interface exists before querying
if ! ip addr show "$iface" &>/dev/null; then
continue
fi
local addrs
addrs=$(ip addr show "$iface" 2>/dev/null | grep -E "inet |inet6 " | grep "global" | cut -d '/' -f1 | awk '{print $2}' || true)
local addr
for addr in $addrs
do
# Validate the IP address by trying to route to it
if ip route get "$addr" &>/dev/null; then
EXTRA_ARGS="$EXTRA_ARGS --tls-san $addr"
fi
done
done
if [ "${HA:-}" = "true" ]; then
EXTRA_ARGS="$EXTRA_ARGS --cluster-init"
fi
else
if [ -z "${K3S_TOKEN:-}" ]; then
log_fatal "K3S_TOKEN must be set when K3S_URL is specified (joining a cluster)"
fi
fi
}
patch_manifests() {
log_info "Patching manifests..."
dir="${K3S_DATA_DIR:-/var/lib/rancher/k3s}"
manifest="$dir/server/manifests/tfgw-crd.yaml"
# If K3S_URL found, remove manifest and exit. it is an agent node
if [[ -n "${K3S_URL:-}" ]]; then
rm -f "$manifest"
log_info "Agent node detected, removed manifest: $manifest"
exit 0
fi
# If K3S_URL not found, patch the manifest. it is a server node
[[ ! -f "$manifest" ]] && echo "Manifest not found: $manifest" >&2 && exit 1
sed -i \
-e "s|\${MNEMONIC}|${MNEMONIC:-}|g" \
-e "s|\${NETWORK}|${NETWORK:-}|g" \
-e "s|\${TOKEN}|${TOKEN:-}|g" \
"$manifest"
}
run_node() {
if [ -z "${K3S_URL:-}" ]; then
log_info "Starting k3s server (initializing new cluster)..."
log_info "Command: k3s server --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s server --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
elif [ "${MASTER:-}" = "true" ]; then
log_info "Starting k3s server (joining existing cluster as master)..."
log_info "Command: k3s server --server $K3S_URL --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s server --server "$K3S_URL" --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
else
log_info "Starting k3s agent (joining existing cluster as worker)..."
log_info "Command: k3s agent --server $K3S_URL --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s agent --server "$K3S_URL" --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
fi
}
main() {
source_env_file "${1:-}"
check_root
install_deps
prepare_args
patch_manifests
run_node
}
main "$@"
INSTRUCTIONS: USE HEROLIB AS MUCH AS POSSIBLE e.g. SAL

View File

@@ -2,119 +2,537 @@ module kubernetes_installer
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core
import incubaid.herolib.core.pathlib
import incubaid.herolib.installers.ulist
import incubaid.herolib.osal.startupmanager
import os
//////////////////// following actions are not specific to instance of the object
//////////////////// STARTUP COMMAND ////////////////////
// checks if kubectl is installed and meets minimum version requirement
fn installed() !bool {
fn (self &KubernetesInstaller) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Get Mycelium IPv6 address
ipv6 := self.get_mycelium_ipv6()!
// Build K3s command based on node type
mut cmd := ''
mut extra_args := '--node-ip=${ipv6} --flannel-iface ${self.mycelium_interface}'
// Add data directory if specified
if self.data_dir != '' {
extra_args += ' --data-dir ${self.data_dir} --kubelet-arg=root-dir=${self.data_dir}/kubelet'
}
// Add token
if self.token != '' {
extra_args += ' --token ${self.token}'
}
if self.is_master {
// Master node configuration
extra_args += ' --cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 --flannel-ipv6-masq'
if self.is_first_master {
// First master: initialize cluster
cmd = 'k3s server --cluster-init ${extra_args}'
} else {
// Additional master: join existing cluster
if self.master_url == '' {
return error('master_url is required for joining as additional master')
}
cmd = 'k3s server --server ${self.master_url} ${extra_args}'
}
} else {
// Worker node: join as agent
if self.master_url == '' {
return error('master_url is required for worker nodes')
}
cmd = 'k3s agent --server ${self.master_url} ${extra_args}'
}
res << startupmanager.ZProcessNewArgs{
name: 'k3s_${self.name}'
startuptype: .systemd
cmd: cmd
env: {
'HOME': os.home_dir()
}
}
return res
}
//////////////////// RUNNING CHECK ////////////////////
fn running() !bool {
// Check if k3s process is running
res := osal.exec(cmd: 'pgrep -f "k3s (server|agent)"', stdout: false, raise_error: false)!
if res.exit_code == 0 {
// K3s process is running, that's enough for basic check
// We don't check kubectl connectivity here as it might not be ready immediately
// and could hang if kubeconfig is not properly configured
return true
}
return false
}
//////////////////// OS CHECK ////////////////////
fn check_ubuntu() ! {
// Check if running on Ubuntu
if !core.is_linux()! {
return error('K3s installer requires Linux. Current OS is not supported.')
}
// Check /etc/os-release for Ubuntu
content := os.read_file('/etc/os-release') or {
return error('Could not read /etc/os-release. Is this Ubuntu?')
}
if !content.contains('Ubuntu') && !content.contains('ubuntu') {
return error('This installer requires Ubuntu. Current OS is not Ubuntu.')
}
console.print_debug('OS check passed: Running on Ubuntu')
}
//////////////////// DEPENDENCY INSTALLATION ////////////////////
fn install_deps(k3s_version string) ! {
console.print_header('Installing dependencies...')
// Check and install curl
if !osal.cmd_exists('curl') {
console.print_header('Installing curl...')
osal.package_install('curl')!
}
// Check and install iproute2 (for ip command)
if !osal.cmd_exists('ip') {
console.print_header('Installing iproute2...')
osal.package_install('iproute2')!
}
// Install K3s binary
if !osal.cmd_exists('k3s') {
console.print_header('Installing K3s ${k3s_version}...')
k3s_url := 'https://github.com/k3s-io/k3s/releases/download/${k3s_version}+k3s1/k3s'
osal.download(
url: k3s_url
dest: '/tmp/k3s'
)!
// Make it executable and move to /usr/local/bin
osal.exec(cmd: 'chmod +x /tmp/k3s')!
osal.cmd_add(
cmdname: 'k3s'
source: '/tmp/k3s'
)!
}
// Install kubectl
if !osal.cmd_exists('kubectl') {
return false
console.print_header('Installing kubectl...')
// Extract version number from k3s_version (e.g., v1.33.1)
kubectl_version := k3s_version
kubectl_url := 'https://dl.k8s.io/release/${kubectl_version}/bin/linux/amd64/kubectl'
osal.download(
url: kubectl_url
dest: '/tmp/kubectl'
)!
osal.exec(cmd: 'chmod +x /tmp/kubectl')!
osal.cmd_add(
cmdname: 'kubectl'
source: '/tmp/kubectl'
)!
}
res := os.execute('${osal.profile_path_source_and()!} kubectl version --client --output=json')
if res.exit_code != 0 {
// Try older kubectl version command format
res2 := os.execute('${osal.profile_path_source_and()!} kubectl version --client --short')
if res2.exit_code != 0 {
return false
}
// Parse version from output like "Client Version: v1.31.0"
lines := res2.output.split_into_lines().filter(it.contains('Client Version'))
if lines.len == 0 {
return false
}
version_str := lines[0].all_after('v').trim_space()
if texttools.version(version) <= texttools.version(version_str) {
return true
}
return false
console.print_header('All dependencies installed successfully')
}
// For newer kubectl versions with JSON output
// Just check if kubectl exists and runs - version checking is optional
return true
//////////////////// INSTALLATION ACTIONS ////////////////////
fn installed() !bool {
return osal.cmd_exists('k3s') && osal.cmd_exists('kubectl')
}
// get the Upload List of the files
// Install first master node
pub fn (mut self KubernetesInstaller) install_master() ! {
console.print_header('Installing K3s as first master node')
// Check OS
check_ubuntu()!
// Set flags
self.is_master = true
self.is_first_master = true
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s first master installation completed')
console.print_header('Token: ${self.token}')
console.print_header('To start K3s, run: kubernetes_installer.start')
// Generate join script
join_script := self.generate_join_script()!
console.print_header('Join script generated. Save this for other nodes:\n${join_script}')
}
// Join as additional master
pub fn (mut self KubernetesInstaller) join_master() ! {
console.print_header('Joining K3s cluster as additional master')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = true
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s additional master installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
// Install worker node
pub fn (mut self KubernetesInstaller) install_worker() ! {
console.print_header('Installing K3s as worker node')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = false
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s worker installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
//////////////////// UTILITY FUNCTIONS ////////////////////
// Get kubeconfig content
pub fn (self &KubernetesInstaller) get_kubeconfig() !string {
kubeconfig_path := self.kubeconfig_path()
mut kubeconfig_file := pathlib.get_file(path: kubeconfig_path) or {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
if !kubeconfig_file.exists() {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
return kubeconfig_file.read()!
}
// Generate join script for other nodes
pub fn (self &KubernetesInstaller) generate_join_script() !string {
if !self.is_first_master {
return error('Can only generate join script from first master node')
}
// Get Mycelium IPv6 of this master
master_ipv6 := self.get_mycelium_ipv6()!
master_url := 'https://[${master_ipv6}]:6443'
mut script := '#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Join Script
// Generated from master node: ${self.node_name}
// ============================================================================
// Section 1: Join as Additional Master (HA)
// Uncomment to join as additional master node
/*
!!kubernetes_installer.configure
name:\'k3s_master_2\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'master-2\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.join_master name:\'k3s_master_2\'
!!kubernetes_installer.start name:\'k3s_master_2\'
*/
// Section 2: Join as Worker Node
// Uncomment to join as worker node
/*
!!kubernetes_installer.configure
name:\'k3s_worker_1\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'worker-1\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.install_worker name:\'k3s_worker_1\'
!!kubernetes_installer.start name:\'k3s_worker_1\'
*/
'
return script
}
//////////////////// CLEANUP ////////////////////
fn destroy() ! {
console.print_header('Destroying K3s installation')
// Get configuration to find data directory
// Try to get from current configuration, otherwise use common paths
mut data_dirs := []string{}
if cfg := get() {
data_dirs << cfg.data_dir
console.print_debug('Found configured data directory: ${cfg.data_dir}')
} else {
console.print_debug('No configuration found, will clean up common K3s paths')
}
// Always add common K3s directories to ensure complete cleanup
data_dirs << '/var/lib/rancher/k3s'
data_dirs << '/root/hero/var/k3s'
// CRITICAL: Complete systemd service deletion FIRST before any other cleanup
// This prevents the service from auto-restarting during cleanup
// Step 1: Stop and delete ALL k3s systemd services using startupmanager
console.print_header('Stopping and removing systemd services...')
// Get systemd startup manager
mut sm := startupmanager_get(.systemd) or {
console.print_debug('Failed to get systemd manager: ${err}')
return error('Could not get systemd manager: ${err}')
}
// List all k3s services
all_services := sm.list() or {
console.print_debug('Failed to list services: ${err}')
[]string{}
}
// Filter and delete k3s services
for service_name in all_services {
if service_name.starts_with('k3s_') {
console.print_debug('Deleting systemd service: ${service_name}')
// Use startupmanager.delete() which properly stops, disables, and removes the service
sm.delete(service_name) or {
console.print_debug('Failed to delete service ${service_name}: ${err}')
}
}
}
console.print_header(' Systemd services removed')
// Step 2: Kill any remaining K3s processes
console.print_header('Killing any remaining K3s processes...')
osal.exec(cmd: 'killall -9 k3s 2>/dev/null || true', stdout: false, raise_error: false) or {
console.print_debug('No k3s processes to kill or killall failed')
}
// Wait for processes to fully terminate
osal.exec(cmd: 'sleep 2', stdout: false) or {}
// Step 3: Unmount kubelet mounts (before network cleanup)
cleanup_mounts()!
// Step 4: Clean up network interfaces (after processes are stopped)
cleanup_network()!
// Step 5: Remove data directories
console.print_header('Removing data directories...')
// Remove all K3s data directories (deduplicated)
mut cleaned_dirs := map[string]bool{}
for data_dir in data_dirs {
if data_dir != '' && data_dir !in cleaned_dirs {
cleaned_dirs[data_dir] = true
console.print_debug('Removing data directory: ${data_dir}')
osal.exec(cmd: 'rm -rf ${data_dir}', stdout: false, raise_error: false) or {
console.print_debug('Failed to remove ${data_dir}: ${err}')
}
}
}
// Also remove /etc/rancher which K3s creates
console.print_debug('Removing /etc/rancher')
osal.exec(cmd: 'rm -rf /etc/rancher', stdout: false, raise_error: false) or {}
// Step 6: Clean up CNI
console.print_header('Cleaning up CNI directories...')
osal.exec(cmd: 'rm -rf /var/lib/cni/', stdout: false, raise_error: false) or {}
// Step 7: Clean up iptables rules
console.print_header('Cleaning up iptables rules')
osal.exec(
cmd: 'iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore'
stdout: false
raise_error: false
) or {}
osal.exec(
cmd: 'ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore'
stdout: false
raise_error: false
) or {}
console.print_header('K3s destruction completed')
}
fn cleanup_network() ! {
console.print_header('Cleaning up network interfaces')
// Remove interfaces that are slaves of cni0
// Get the list first, then delete one by one
if veth_result := osal.exec(
cmd: 'ip link show | grep "master cni0" | awk -F: \'{print $2}\' | xargs'
stdout: false
raise_error: false
) {
if veth_result.output.trim_space() != '' {
veth_interfaces := veth_result.output.trim_space().split(' ')
for veth in veth_interfaces {
veth_trimmed := veth.trim_space()
if veth_trimmed != '' {
console.print_debug('Deleting veth interface: ${veth_trimmed}')
osal.exec(cmd: 'ip link delete ${veth_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to delete ${veth_trimmed}, continuing...')
}
}
}
}
} else {
console.print_debug('No veth interfaces found or error getting list')
}
// Remove CNI-related interfaces
interfaces := ['cni0', 'flannel.1', 'flannel-v6.1', 'kube-ipvs0', 'flannel-wg', 'flannel-wg-v6']
for iface in interfaces {
console.print_debug('Deleting interface: ${iface}')
// Use timeout to prevent hanging, and redirect stderr to avoid blocking
osal.exec(cmd: 'timeout 5 ip link delete ${iface} 2>/dev/null || true', stdout: false, raise_error: false) or {
console.print_debug('Interface ${iface} not found or already deleted')
}
}
// Remove CNI namespaces
if ns_result := osal.exec(
cmd: 'ip netns show | grep cni- | xargs'
stdout: false
raise_error: false
) {
if ns_result.output.trim_space() != '' {
namespaces := ns_result.output.trim_space().split(' ')
for ns in namespaces {
ns_trimmed := ns.trim_space()
if ns_trimmed != '' {
console.print_debug('Deleting namespace: ${ns_trimmed}')
osal.exec(cmd: 'ip netns delete ${ns_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to delete namespace ${ns_trimmed}')
}
}
}
}
} else {
console.print_debug('No CNI namespaces found')
}
}
fn cleanup_mounts() ! {
console.print_header('Cleaning up mounts')
// Unmount and remove kubelet directories
paths := ['/run/k3s', '/var/lib/kubelet/pods', '/var/lib/kubelet/plugins', '/run/netns/cni-']
for path in paths {
// Find all mounts under this path and unmount them
if mount_result := osal.exec(
cmd: 'mount | grep "${path}" | awk \'{print $3}\' | sort -r'
stdout: false
raise_error: false
) {
if mount_result.output.trim_space() != '' {
mount_points := mount_result.output.split_into_lines()
for mount_point in mount_points {
mp_trimmed := mount_point.trim_space()
if mp_trimmed != '' {
console.print_debug('Unmounting: ${mp_trimmed}')
osal.exec(cmd: 'umount -f ${mp_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to unmount ${mp_trimmed}')
}
}
}
}
} else {
console.print_debug('No mounts found for ${path}')
}
// Remove the directory
console.print_debug('Removing directory: ${path}')
osal.exec(cmd: 'rm -rf ${path}', stdout: false, raise_error: false) or {}
}
}
//////////////////// GENERIC INSTALLER FUNCTIONS ////////////////////
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// Not applicable for kubectl
// Not applicable for K3s
}
fn install() ! {
console.print_header('install kubectl')
mut url := ''
mut dest_path := '/tmp/kubectl'
// Determine download URL based on platform
if core.is_linux_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/arm64/kubectl'
} else if core.is_linux_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/amd64/kubectl'
} else if core.is_osx_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/arm64/kubectl'
} else if core.is_osx_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/amd64/kubectl'
} else {
return error('unsupported platform for kubectl installation')
}
console.print_header('downloading kubectl from ${url}')
// Download kubectl binary
osal.download(
url: url
// minsize_kb: 40000 // kubectl is ~45MB
dest: dest_path
)!
// Make it executable
os.chmod(dest_path, 0o755)!
// Install to system
osal.cmd_add(
cmdname: 'kubectl'
source: dest_path
)!
// Create .kube directory with proper permissions
kube_dir := os.join_path(os.home_dir(), '.kube')
if !os.exists(kube_dir) {
console.print_header('creating ${kube_dir} directory')
os.mkdir_all(kube_dir)!
os.chmod(kube_dir, 0o700)! // read/write/execute for owner only
console.print_header('${kube_dir} directory created with permissions 0700')
} else {
// Ensure correct permissions even if directory exists
os.chmod(kube_dir, 0o700)!
console.print_header('${kube_dir} directory permissions set to 0700')
}
console.print_header('kubectl installed successfully')
}
fn destroy() ! {
console.print_header('destroy kubectl')
if !installed()! {
console.print_header('kubectl is not installed')
return
}
// Remove kubectl command
osal.cmd_delete('kubectl')!
// Clean up any temporary files
osal.rm('/tmp/kubectl')!
console.print_header('kubectl destruction completed')
return error('Use install_master, join_master, or install_worker instead of generic install')
}

View File

@@ -4,6 +4,9 @@ import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.osal.core as osal
import time
__global (
kubernetes_installer_global map[string]&KubernetesInstaller
@@ -125,22 +128,70 @@ pub fn play(mut plbook PlayBook) ! {
}
mut install_actions := plbook.find(filter: 'kubernetes_installer.configure')!
if install_actions.len > 0 {
return error("can't configure kubernetes_installer, because no configuration allowed for this installer.")
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'kubernetes_installer.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install'] {
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut k8s_obj := get(name: name, create: true)!
console.print_debug('action object:\n${k8s_obj}')
if other_action.name in ['destroy', 'install', 'build'] {
if other_action.name == 'destroy' || reset {
console.print_debug('install action kubernetes_installer.destroy')
destroy()!
k8s_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action kubernetes_installer.install')
install()!
k8s_obj.install(reset: reset)!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
if other_action.name == 'start' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.restart()!
}
}
// K3s-specific actions
if other_action.name in ['install_master', 'join_master', 'install_worker'] {
if other_action.name == 'install_master' {
console.print_debug('install action kubernetes_installer.install_master')
k8s_obj.install_master()!
}
if other_action.name == 'join_master' {
console.print_debug('install action kubernetes_installer.join_master')
k8s_obj.join_master()!
}
if other_action.name == 'install_worker' {
console.print_debug('install action kubernetes_installer.install_worker')
k8s_obj.install_worker()!
}
}
if other_action.name == 'get_kubeconfig' {
console.print_debug('install action kubernetes_installer.get_kubeconfig')
kubeconfig := k8s_obj.get_kubeconfig()!
console.print_header('Kubeconfig:\n${kubeconfig}')
}
if other_action.name == 'generate_join_script' {
console.print_debug('install action kubernetes_installer.generate_join_script')
script := k8s_obj.generate_join_script()!
console.print_header('Join Script:\n${script}')
}
other_action.done = true
}
}
@@ -149,12 +200,107 @@ pub fn play(mut plbook PlayBook) ! {
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
match cat {
.screen {
console.print_debug("installer: kubernetes_installer' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: kubernetes_installer' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: kubernetes_installer' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: kubernetes_installer' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self KubernetesInstaller) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self KubernetesInstaller) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: kubernetes_installer start')
if !installed()! {
return error('K3s is not installed. Please run install_master, join_master, or install_worker first.')
}
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Create manifests directory for auto-apply
manifests_dir := '${self.data_dir}/server/manifests'
osal.dir_ensure(manifests_dir)!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: kubernetes_installer starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('kubernetes_installer did not start properly.')
}
pub fn (mut self KubernetesInstaller) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self KubernetesInstaller) stop() ! {
switch(self.name)
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
}
pub fn (mut self KubernetesInstaller) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self KubernetesInstaller) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
@@ -170,6 +316,7 @@ pub fn (mut self KubernetesInstaller) install(args InstallArgs) ! {
pub fn (mut self KubernetesInstaller) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}

View File

@@ -1,27 +1,203 @@
module kubernetes_installer
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import os
import rand
pub const version = '1.31.0'
pub const version = 'v1.33.1'
const singleton = true
const default = true
// Kubernetes installer - handles kubectl installation
// K3s installer - handles K3s cluster installation with Mycelium IPv6 networking
@[heap]
pub struct KubernetesInstaller {
pub mut:
name string = 'default'
// K3s version to install
k3s_version string = version
// Data directory for K3s (default: ~/hero/var/k3s)
data_dir string
// Unique node name/identifier
node_name string
// Mycelium interface name (auto-detected if not specified)
mycelium_interface string
// Cluster token for authentication (auto-generated if empty)
token string
// Master URL for joining cluster (e.g., 'https://[ipv6]:6443')
master_url string
// Node IPv6 address (auto-detected from Mycelium if empty)
node_ip string
// Is this a master/control-plane node?
is_master bool
// Is this the first master (uses --cluster-init)?
is_first_master bool
}
// your checking & initialization code if needed
fn obj_init(mycfg_ KubernetesInstaller) !KubernetesInstaller {
mut mycfg := mycfg_
// Set default data directory if not provided
if mycfg.data_dir == '' {
mycfg.data_dir = os.join_path(os.home_dir(), 'hero/var/k3s')
}
// Expand home directory in data_dir if it contains ~
if mycfg.data_dir.starts_with('~') {
mycfg.data_dir = mycfg.data_dir.replace_once('~', os.home_dir())
}
// Set default node name if not provided
if mycfg.node_name == '' {
hostname := os.execute('hostname').output.trim_space()
mycfg.node_name = if hostname != '' { hostname } else { 'k3s-node-${rand.hex(4)}' }
}
// Auto-detect Mycelium interface if not provided
if mycfg.mycelium_interface == '' {
mycfg.mycelium_interface = detect_mycelium_interface()!
}
// Generate token if not provided and this is the first master
if mycfg.token == '' && mycfg.is_first_master {
// Generate a secure random token
mycfg.token = rand.hex(32)
}
// Note: Validation of token/master_url is done in the specific action functions
// (join_master, install_worker) where the context is clear
return mycfg
}
// Get path to kubeconfig file
pub fn (self &KubernetesInstaller) kubeconfig_path() string {
return '${self.data_dir}/server/cred/admin.kubeconfig'
}
// Get Mycelium IPv6 address from interface
pub fn (self &KubernetesInstaller) get_mycelium_ipv6() !string {
// If node_ip is already set, use it
if self.node_ip != '' {
return self.node_ip
}
// Otherwise, detect from Mycelium interface
return get_mycelium_ipv6_from_interface(self.mycelium_interface)!
}
// Auto-detect Mycelium interface by finding 400::/7 route
fn detect_mycelium_interface() !string {
// Find all 400::/7 routes
route_result := osal.exec(
cmd: 'ip -6 route | grep "^400::/7"'
stdout: false
raise_error: false
)!
if route_result.exit_code != 0 || route_result.output.trim_space() == '' {
return error('No Mycelium interface found (no 400::/7 route detected). Please ensure Mycelium is installed and running.')
}
// Parse interface name from route (format: "400::/7 dev <interface> ...")
route_line := route_result.output.trim_space()
parts := route_line.split(' ')
for i, part in parts {
if part == 'dev' && i + 1 < parts.len {
iface := parts[i + 1]
return iface
}
}
return error('Could not parse Mycelium interface from route output: ${route_line}')
}
// Helper function to detect Mycelium IPv6 from interface
fn get_mycelium_ipv6_from_interface(iface string) !string {
// Step 1: Find the 400::/7 route via the interface
route_result := osal.exec(
cmd: 'ip -6 route | grep "^400::/7.*dev ${iface}"'
stdout: false
) or { return error('No 400::/7 route found via interface ${iface}') }
route_line := route_result.output.trim_space()
if route_line == '' {
return error('No 400::/7 route found via interface ${iface}')
}
// Step 2: Get all global IPv6 addresses on the interface
addr_result := osal.exec(
cmd: 'ip -6 addr show dev ${iface} scope global | grep inet6 | awk \'{print $2}\' | cut -d/ -f1'
stdout: false
)!
ipv6_list := addr_result.output.split_into_lines()
// Check if route has a next-hop (via keyword)
parts := route_line.split(' ')
mut nexthop := ''
for i, part in parts {
if part == 'via' && i + 1 < parts.len {
nexthop = parts[i + 1]
break
}
}
if nexthop != '' {
// Route has a next-hop: match by prefix (first 4 segments)
prefix_parts := nexthop.split(':')
if prefix_parts.len < 4 {
return error('Invalid IPv6 next-hop format: ${nexthop}')
}
prefix := prefix_parts[0..4].join(':')
// Step 3: Match the one with the same prefix
for ip in ipv6_list {
ip_trimmed := ip.trim_space()
if ip_trimmed == '' {
continue
}
ip_parts := ip_trimmed.split(':')
if ip_parts.len >= 4 {
ip_prefix := ip_parts[0..4].join(':')
if ip_prefix == prefix {
return ip_trimmed
}
}
}
return error('No global IPv6 address found on ${iface} matching prefix ${prefix}')
} else {
// Direct route (no via): return the first IPv6 address in 400::/7 range
for ip in ipv6_list {
ip_trimmed := ip.trim_space()
if ip_trimmed == '' {
continue
}
// Check if IP is in 400::/7 range (starts with 4 or 5)
if ip_trimmed.starts_with('4') || ip_trimmed.starts_with('5') {
return ip_trimmed
}
}
return error('No global IPv6 address found on ${iface} in 400::/7 range')
}
}
// called before start if done
fn configure() ! {
// No configuration needed for kubectl
mut cfg := get()!
// Ensure data directory exists
osal.dir_ensure(cfg.data_dir)!
// Create manifests directory for auto-apply
manifests_dir := '${cfg.data_dir}/server/manifests'
osal.dir_ensure(manifests_dir)!
}
/////////////NORMALLY NO NEED TO TOUCH

View File

@@ -1,3 +0,0 @@
https://github.com/codescalers/kubecloud/blob/master/k3s/native_guide/k3s_killall.sh
still need to implement this

View File

@@ -1,44 +1,224 @@
# kubernetes_installer
# K3s Installer
Complete K3s cluster installer with multi-master HA support, worker nodes, and Mycelium IPv6 networking.
## Features
To get started
- **Multi-Master HA**: Install multiple master nodes with `--cluster-init`
- **Worker Nodes**: Add worker nodes to the cluster
- **Mycelium IPv6**: Automatic detection of Mycelium IPv6 addresses from the 400::/7 range
- **Lifecycle Management**: Start, stop, restart K3s via startupmanager (systemd/zinit/screen)
- **Join Scripts**: Auto-generate heroscripts for joining additional nodes
- **Complete Cleanup**: Destroy removes all K3s components, network interfaces, and data
## Quick Start
### Install First Master
```v
import incubaid.herolib.installers.something.kubernetes_installer as kubernetes_installer_installer
import incubaid.herolib.installers.virt.kubernetes_installer
heroscript := "
!!kubernetes_installer.configure name:'test'
password: '1234'
port: 7701
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
node_name:'master-1'
mycelium_interface:'mycelium0'
!!kubernetes_installer.start name:'test' reset:1
!!kubernetes_installer.install_master name:'k3s_master_1'
!!kubernetes_installer.start name:'k3s_master_1'
"
kubernetes_installer_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= kubernetes_installer_installer.get()!
//installer.start(reset:true)!
kubernetes_installer.play(heroscript: heroscript)!
```
## example heroscript
### Join Additional Master (HA)
```hero
```v
heroscript := "
!!kubernetes_installer.configure
homedir: '/home/user/kubernetes_installer'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
name:'k3s_master_2'
node_name:'master-2'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.join_master name:'k3s_master_2'
!!kubernetes_installer.start name:'k3s_master_2'
"
kubernetes_installer.play(heroscript: heroscript)!
```
### Install Worker Node
```v
heroscript := "
!!kubernetes_installer.configure
name:'k3s_worker_1'
node_name:'worker-1'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.install_worker name:'k3s_worker_1'
!!kubernetes_installer.start name:'k3s_worker_1'
"
kubernetes_installer.play(heroscript: heroscript)!
```
## Configuration Options
| Field | Type | Default | Description |
|-------|------|---------|-------------|
| `name` | string | 'default' | Instance name |
| `k3s_version` | string | 'v1.33.1' | K3s version to install |
| `data_dir` | string | '~/hero/var/k3s' | Data directory for K3s |
| `node_name` | string | hostname | Unique node identifier |
| `mycelium_interface` | string | auto-detected | Mycelium interface name (auto-detected from 400::/7 route) |
| `token` | string | auto-generated | Cluster authentication token |
| `master_url` | string | - | Master URL for joining (e.g., 'https://[ipv6]:6443') |
| `node_ip` | string | auto-detected | Node IPv6 (auto-detected from Mycelium) |
## Actions
### Installation Actions
- `install_master` - Install first master node (generates token, uses --cluster-init)
- `join_master` - Join as additional master (requires token + master_url)
- `install_worker` - Install worker node (requires token + master_url)
### Lifecycle Actions
- `start` - Start K3s via startupmanager
- `stop` - Stop K3s
- `restart` - Restart K3s
- `destroy` - Complete cleanup (removes all K3s components)
### Utility Actions
- `get_kubeconfig` - Get kubeconfig content
- `generate_join_script` - Generate heroscript for joining nodes
## Requirements
- **OS**: Ubuntu (installer checks and fails on non-Ubuntu systems)
- **Mycelium**: Must be installed and running with interface in 400::/7 range
- **Root Access**: Required for installing system packages and managing network
## How It Works
### Mycelium IPv6 Detection
The installer automatically detects your Mycelium IPv6 address by:
1. Finding the 400::/7 route via the Mycelium interface
2. Extracting the next-hop IPv6 and getting the prefix (first 4 segments)
3. Matching global IPv6 addresses on the interface with the same prefix
4. Using the matched IPv6 for K3s `--node-ip`
This ensures K3s binds to the correct Mycelium IPv6 even if the server has other IPv6 addresses.
### Cluster Setup
**First Master:**
- Uses `--cluster-init` flag
- Auto-generates secure token
- Configures IPv6 CIDRs: cluster=2001:cafe:42::/56, service=2001:cafe:43::/112
- Generates join script for other nodes
**Additional Masters:**
- Joins with `--server <master_url>`
- Requires token and master_url from first master
- Provides HA for control plane
**Workers:**
- Joins as agent with `--server <master_url>`
- Requires token and master_url from first master
### Cleanup
The `destroy` action performs complete cleanup:
- Stops K3s process
- Removes network interfaces (cni0, flannel.*, etc.)
- Unmounts kubelet mounts
- Removes data directory
- Cleans up iptables/ip6tables rules
- Removes CNI namespaces
## Example Workflow
1. **Install first master on server1:**
```bash
hero run templates/examples.heroscript
# Note the token and IPv6 address displayed
```
2. **Join additional master on server2:**
```bash
# Edit examples.heroscript Section 2 with token and master_url
hero run templates/examples.heroscript
```
3. **Add worker on server3:**
```bash
# Edit examples.heroscript Section 3 with token and master_url
hero run templates/examples.heroscript
```
4. **Verify cluster:**
```bash
kubectl get nodes
kubectl get pods --all-namespaces
```
## Kubeconfig
The kubeconfig is located at: `<data_dir>/server/cred/admin.kubeconfig`
To use kubectl:
```bash
export KUBECONFIG=~/hero/var/k3s/server/cred/admin.kubeconfig
kubectl get nodes
```
Or copy to default location:
```bash
mkdir -p ~/.kube
cp ~/hero/var/k3s/server/cred/admin.kubeconfig ~/.kube/config
```
## Troubleshooting
**K3s won't start:**
- Check if Mycelium is running: `ip -6 addr show mycelium0`
- Verify 400::/7 route exists: `ip -6 route | grep 400::/7`
- Check logs: `journalctl -u k3s_* -f`
**Can't join cluster:**
- Verify token matches first master
- Ensure master_url uses correct IPv6 in brackets: `https://[ipv6]:6443`
- Check network connectivity over Mycelium: `ping6 <master_ipv6>`
**Cleanup issues:**
- Run destroy with sudo if needed
- Manually check for remaining processes: `pgrep -f k3s`
- Check for remaining mounts: `mount | grep k3s`
## See Also
- [K3s Documentation](https://docs.k3s.io/)
- [Mycelium Documentation](https://github.com/threefoldtech/mycelium)
- [Example Heroscript](templates/examples.heroscript)

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Installation Examples
// ============================================================================
//
// This file contains examples for installing K3s clusters with Mycelium IPv6
// networking. Choose the appropriate section based on your node type.
//
// Prerequisites:
// - Ubuntu OS
// - Mycelium installed and running
// - Mycelium interface (default: mycelium0)
// ============================================================================
// ============================================================================
// SECTION 1: Install First Master Node
// ============================================================================
// This creates the initial master node and initializes the cluster.
// The token will be auto-generated and displayed for use with other nodes.
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-1'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
// Install as first master (will generate token and use --cluster-init)
!!kubernetes_installer.install_master name:'k3s_master_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_1'
// Get kubeconfig (optional - to verify installation)
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Generate join script for other nodes (optional)
// !!kubernetes_installer.generate_join_script name:'k3s_master_1'
// ============================================================================
// SECTION 2: Join as Additional Master (HA Setup)
// ============================================================================
// Use this to add more master nodes for high availability.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_master_2'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-2'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Join as additional master
!!kubernetes_installer.join_master name:'k3s_master_2'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_2'
*/
// ============================================================================
// SECTION 3: Install Worker Node
// ============================================================================
// Use this to add worker nodes to the cluster.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_worker_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'worker-1'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Install as worker
!!kubernetes_installer.install_worker name:'k3s_worker_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_worker_1'
*/
// ============================================================================
// SECTION 4: Lifecycle Management
// ============================================================================
// Common operations for managing K3s
// Stop K3s
// !!kubernetes_installer.stop name:'k3s_master_1'
// Restart K3s
// !!kubernetes_installer.restart name:'k3s_master_1'
// Get kubeconfig
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Destroy K3s (complete cleanup)
// !!kubernetes_installer.destroy name:'k3s_master_1'
// ============================================================================
// NOTES:
// ============================================================================
// 1. Replace <TOKEN_FROM_FIRST_MASTER> with the actual token displayed after
// installing the first master
// 2. Replace <MASTER_IPV6> with the Mycelium IPv6 address of the first master
// 3. The data_dir defaults to ~/hero/var/k3s if not specified
// 4. The mycelium_interface defaults to 'mycelium0' if not specified
// 5. The k3s_version defaults to 'v1.33.1' if not specified
// 6. After installation, use kubectl to manage your cluster:
// - kubectl get nodes
// - kubectl get pods --all-namespaces
// 7. The kubeconfig is located at: <data_dir>/server/cred/admin.kubeconfig

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
// ============================================================================
// K3s Join Additional Master (HA Setup)
// ============================================================================
// This script shows how to join an additional master node to an existing
// K3s cluster for high availability.
//
// Prerequisites:
// 1. First master must be running
// 2. You need the token from the first master
// 3. You need the master URL (IPv6 address and port)
// ============================================================================
console.print_header('='.repeat(80))
console.print_header('K3s Join Additional Master Node')
console.print_header('='.repeat(80))
// IMPORTANT: Replace these values with your actual cluster information
// You can get these from the first master's join script or by running:
// !!kubernetes_installer.generate_join_script name:"k3s_master_1"
master_token := 'YOUR_CLUSTER_TOKEN_HERE' // Get from first master
master_url := 'https://[YOUR_MASTER_IPV6]:6443' // First master's IPv6 address
join_master_script := '
!!kubernetes_installer.configure
name:"k3s_master_2"
k3s_version:"v1.33.1"
data_dir:"~/hero/var/k3s"
node_name:"master-2"
mycelium_interface:"mycelium"
token:"${master_token}"
master_url:"${master_url}"
!!kubernetes_installer.join_master name:"k3s_master_2"
!!kubernetes_installer.start name:"k3s_master_2"
'
console.print_header('⚠️ Before running, make sure to:')
console.print_header(' 1. Update master_token with your cluster token')
console.print_header(' 2. Update master_url with your first master IPv6')
console.print_header(' 3. Ensure first master is running')
console.print_header('')
// Uncomment the line below to actually run the join
// playcmds.run(heroscript: join_master_script)!
console.print_header('✅ Script ready. Uncomment playcmds.run() to execute.')
console.print_header('='.repeat(80))

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
// ============================================================================
// K3s Join Worker Node
// ============================================================================
// This script shows how to join a worker node to an existing K3s cluster.
//
// Prerequisites:
// 1. At least one master must be running
// 2. You need the token from the master
// 3. You need the master URL (IPv6 address and port)
// ============================================================================
console.print_header('='.repeat(80))
console.print_header('K3s Join Worker Node')
console.print_header('='.repeat(80))
// IMPORTANT: Replace these values with your actual cluster information
// You can get these from the master's join script or by running:
// !!kubernetes_installer.generate_join_script name:"k3s_master_1"
master_token := 'YOUR_CLUSTER_TOKEN_HERE' // Get from master
master_url := 'https://[YOUR_MASTER_IPV6]:6443' // Master's IPv6 address
join_worker_script := '
!!kubernetes_installer.configure
name:"k3s_worker_1"
k3s_version:"v1.33.1"
data_dir:"~/hero/var/k3s"
node_name:"worker-1"
mycelium_interface:"mycelium"
token:"${master_token}"
master_url:"${master_url}"
!!kubernetes_installer.install_worker name:"k3s_worker_1"
!!kubernetes_installer.start name:"k3s_worker_1"
'
console.print_header('⚠️ Before running, make sure to:')
console.print_header(' 1. Update master_token with your cluster token')
console.print_header(' 2. Update master_url with your master IPv6')
console.print_header(' 3. Ensure master is running')
console.print_header('')
// Uncomment the line below to actually run the join
// playcmds.run(heroscript: join_worker_script)!
console.print_header('✅ Script ready. Uncomment playcmds.run() to execute.')
console.print_header('='.repeat(80))

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
console.print_header('='*.repeat(80))
console.print_header('K3s Install/Uninstall Lifecycle Test')
console.print_header('='*.repeat(80))
// ============================================================================
// PHASE 1: Install Master
// ============================================================================
console.print_header('\n📦 PHASE 1: Installing K3s Master')
install_script := '
!!kubernetes_installer.configure
name:"k3s_test"
node_name:"test-master"
!!kubernetes_installer.install_master name:"k3s_test"
!!kubernetes_installer.start name:"k3s_test"
'
playcmds.run(heroscript: install_script)!
console.print_header('✅ Installation completed!')
// ============================================================================
// PHASE 2: Uninstall
// ============================================================================
console.print_header('\n🧹 PHASE 2: Uninstalling K3s')
uninstall_script := '
!!kubernetes_installer.configure
name:"k3s_test"
!!kubernetes_installer.destroy name:"k3s_test"
'
playcmds.run(heroscript: uninstall_script)!
console.print_header('✅ Uninstallation completed!')
console.print_header('\n' + '='.repeat(80))
console.print_header('✅ FULL LIFECYCLE TEST COMPLETED!')
console.print_header('='.repeat(80))

View File

@@ -4,15 +4,55 @@ This module provides a V client for interacting with Hetzner's Robot API, allowi
## 1. Configuration
Before using the module, you need to configure at least one client instance with your Hetzner Robot credentials. This is done using the `hetznermanager.configure` action in HeroScript. It's recommended to store your password in an environment variable for security.
Before using the module, you need to configure at least one client instance with your Hetzner Robot credentials. It's recommended to store your credentials in environment variables for security.
### 1.1 Environment Variables
Create an environment file (e.g., `hetzner_env.sh`) with your credentials:
```bash
export HETZNER_USER="your-robot-username" # Hetzner Robot API username
export HETZNER_PASSWORD="your-password" # Hetzner Robot API password
export HETZNER_SSHKEY_NAME="my-key" # Name of SSH key registered in Hetzner (NOT the key content)
```
Each script defines its own server name and whitelist at the top of the file.
Source the env file before running your scripts:
```bash
source hetzner_env.sh
./your_script.vsh
```
### 1.2 SSH Key Configuration
**Important:** The `sshkey` parameter expects the **name** of an SSH key already registered in your Hetzner Robot account, not the actual key content.
To register a new SSH key with Hetzner, use `key_create`:
```hs
!!hetznermanager.key_create
key_name: 'my-laptop-key'
data: 'ssh-ed25519 AAAAC3...' # The actual public key content
```
Once registered, you can reference the key by name in `configure`:
```hs
!!hetznermanager.configure
sshkey: 'my-laptop-key' # Reference the registered key by name
```
### 1.3 HeroScript Configuration
```hs
!!hetznermanager.configure
name:"main"
user:"<your_robot_username>"
user:"${HETZNER_USER}"
password:"${HETZNER_PASSWORD}"
whitelist:"2111181, 2392178" // Optional: comma-separated list of server IDs to operate on
sshkey: "name of sshkey as used with hetzner"
whitelist:"1234567" // Server ID(s) specific to your script
sshkey:"${HETZNER_SSHKEY_NAME}"
```
## 2. Usage
@@ -61,7 +101,7 @@ HeroScript provides a simple, declarative way to execute server operations. You
* `user` (string): Hetzner Robot username.
* `password` (string): Hetzner Robot password.
* `whitelist` (string, optional): Comma-separated list of server IDs to restrict operations to.
* `sshkey` (string, optional): Default public SSH key to deploy in rescue mode.
* `sshkey` (string, optional): **Name** of an SSH key registered in your Hetzner account (not the key content).
* `!!hetznermanager.server_rescue`: Activates the rescue system.
* `instance` (string, optional): The client instance to use (defaults to 'default').
* `server_name` or `id` (string/int): Identifies the target server.

View File

@@ -1,12 +1,16 @@
module hetznermanager
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
import incubaid.herolib.osal.core as osal
import incubaid.herolib.builder
import os
// Ubuntu installation timeout constants
const install_timeout_seconds = 600 // 10 minutes max for installation
const install_poll_interval_seconds = 5 // Check installation status every 5 seconds
const install_progress_interval = 6 // Show progress every 6 polls (30 seconds)
// ///////////////////////////RESCUE
pub struct RescueInfo {
@@ -51,19 +55,29 @@ fn (mut h HetznerManager) server_rescue_internal(args_ ServerRescueArgs) !Server
if serverinfo.rescue && !args.reset {
if osal.ssh_test(address: serverinfo.server_ip, port: 22)! == .ok {
console.print_debug('test server ${serverinfo.server_name} is in rescue mode?')
console.print_debug('test server ${serverinfo.server_name} - checking if actually in rescue mode...')
mut b := builder.new()!
mut n := b.node_new(ipaddr: serverinfo.server_ip)!
res := n.exec(cmd: 'ls /root/.oldroot/nfs/install/installimage', stdout: false) or {
'ERROR'
}
if res.contains('nfs/install/installimage') {
// Check if the server is actually in rescue mode using file_exists
if n.file_exists('/root/.oldroot/nfs/install/installimage') {
console.print_debug('server ${serverinfo.server_name} is in rescue mode')
return serverinfo
}
// Server is reachable but not in rescue mode - check if it's running Ubuntu
// This happens when the API reports rescue=true but the server already booted into the installed OS
if n.platform == .ubuntu {
console.print_debug('server ${serverinfo.server_name} is already running Ubuntu, not in rescue mode')
} else {
console.print_debug('server ${serverinfo.server_name} is running ${n.platform}, not in rescue mode')
}
// Server is not in rescue mode - the rescue flag in API is stale
serverinfo.rescue = false
} else {
// SSH not reachable - server might be rebooting or in unknown state
serverinfo.rescue = false
}
}
// only do it if its not in rescue yet
if serverinfo.rescue == false || args.reset {
@@ -132,16 +146,48 @@ pub mut:
hero_install bool
hero_install_compile bool
raid bool
install_timeout int = install_timeout_seconds // timeout in seconds for installation
reinstall bool // if true, always reinstall even if Ubuntu is already running
}
pub fn (mut h HetznerManager) ubuntu_install(args ServerInstallArgs) !&builder.Node {
h.check_whitelist(name: args.name, id: args.id)!
mut serverinfo := h.server_rescue(
mut serverinfo := h.server_info_get(id: args.id, name: args.name)!
// Check if Ubuntu is already installed and running (skip reinstallation unless forced)
if !args.reinstall {
if osal.ssh_test(address: serverinfo.server_ip, port: 22)! == .ok {
mut b := builder.new()!
mut n := b.node_new(ipaddr: serverinfo.server_ip)!
// Check if server is running Ubuntu and NOT in rescue mode using Node's methods
is_rescue := n.file_exists('/root/.oldroot/nfs/install/installimage')
if n.platform == .ubuntu && !is_rescue {
console.print_debug('server ${serverinfo.server_name} is already running Ubuntu, skipping installation')
// Still install hero if requested
if args.hero_install {
n.exec_silent('apt update && apt install -y mc redis libpq5 libpq-dev')!
n.hero_install(compile: args.hero_install_compile)!
}
return n
}
}
}
// Server needs Ubuntu installation - go into rescue mode
serverinfo = h.server_rescue(
id: args.id
name: args.name
wait: true
)!
// Get the SSH key data to copy to the installed system
mykey := h.key_get(h.sshkey)!
ssh_pubkey := mykey.data
mut b := builder.new()!
mut n := b.node_new(ipaddr: serverinfo.server_ip)!
@@ -155,11 +201,15 @@ pub fn (mut h HetznerManager) ubuntu_install(args ServerInstallArgs) !&builder.N
rstr = '-r yes -l 1 '
}
n.exec(
cmd: '
set -ex
// Write the installation script to the server
// We run it with nohup in the background to avoid SSH timeout during long installations
install_script := '#!/bin/bash
set -e
echo "go into install mode, try to install ubuntu 24.04"
# Cleanup any previous installation state
rm -f /tmp/install_complete /tmp/install_failed
if [ -d /sys/firmware/efi ]; then
echo "UEFI system detected → need ESP"
PARTS="/boot/efi:esp:256M,swap:swap:4G,/boot:ext3:1024M,/:btrfs:all"
@@ -168,13 +218,89 @@ pub fn (mut h HetznerManager) ubuntu_install(args ServerInstallArgs) !&builder.N
PARTS="swap:swap:4G,/boot:ext3:1024M,/:btrfs:all"
fi
# installimage invocation
/root/.oldroot/nfs/install/installimage -a -n "${args.name}" ${rstr} -i /root/.oldroot/nfs/images/Ubuntu-2404-noble-amd64-base.tar.gz -f yes -t yes -p "\$PARTS"
# installimage invocation with error handling
if ! /root/.oldroot/nfs/install/installimage -a -n "${args.name}" ${rstr} -i /root/.oldroot/nfs/images/Ubuntu-2404-noble-amd64-base.tar.gz -f yes -t yes -p "\$PARTS"; then
echo "INSTALL_FAILED" > /tmp/install_failed
echo "installimage failed, check /root/debug.txt for details"
exit 1
fi
# Copy SSH key to the installed system before rebooting
# After installimage, the new system is mounted at /mnt
echo "Copying SSH key to installed system..."
mkdir -p /mnt/root/.ssh
chmod 700 /mnt/root/.ssh
echo "${ssh_pubkey}" > /mnt/root/.ssh/authorized_keys
chmod 600 /mnt/root/.ssh/authorized_keys
echo "SSH key copied successfully"
# Mark installation as complete before rebooting
# sync to ensure marker file is written to disk before reboot
echo "INSTALL_COMPLETE" > /tmp/install_complete
sync
reboot
'
n.file_write('/tmp/ubuntu_install.sh', install_script)!
// Start the installation in background using nohup to avoid SSH timeout
// The script will run independently of the SSH session
n.exec(
cmd: 'chmod +x /tmp/ubuntu_install.sh && nohup /tmp/ubuntu_install.sh > /tmp/install.log 2>&1 &'
stdout: false
)!
console.print_debug('Installation script started in background, waiting for completion...')
// Poll for completion by checking if the marker file exists or if the server goes down (reboot)
max_iterations := args.install_timeout / install_poll_interval_seconds
mut install_complete := false
for i := 0; i < max_iterations; i++ {
time.sleep(install_poll_interval_seconds * time.second)
// Check if server is still up and installation status
result := n.exec(
cmd: 'cat /tmp/install_failed 2>/dev/null && echo "FAILED" || (cat /tmp/install_complete 2>/dev/null || echo "NOT_COMPLETE")'
stdout: false
) or {
// SSH connection failed - server might be rebooting after successful installation
console.print_debug('SSH connection lost - server is likely rebooting after installation')
install_complete = true
break
}
// Check for installation failure
if result.contains('INSTALL_FAILED') || result.contains('FAILED') {
// Try to get error details from install log
error_log := n.exec(
cmd: 'tail -20 /tmp/install.log 2>/dev/null || cat /root/debug.txt 2>/dev/null || echo "No error details available"'
stdout: false
) or { 'Could not retrieve error details' }
return error('Installation failed: ${error_log.trim_space()}')
}
if result.contains('INSTALL_COMPLETE') {
console.print_debug('Installation complete, server should reboot soon')
install_complete = true
break
}
// Show progress at configured interval
if i % install_progress_interval == 0 {
// Try to get the last line of the install log for progress
log_tail := n.exec(
cmd: 'tail -3 /tmp/install.log 2>/dev/null || echo "waiting..."'
stdout: false
) or { 'waiting...' }
console.print_debug('Installation in progress: ${log_tail.trim_space()}')
}
}
if !install_complete {
return error('Installation timed out after ${args.install_timeout} seconds')
}
os.execute_opt('ssh-keygen -R ${serverinfo.server_ip}')!
console.print_debug('server ${serverinfo.server_name} is installed in ubuntu now, should be restarting.')
@@ -187,15 +313,20 @@ pub fn (mut h HetznerManager) ubuntu_install(args ServerInstallArgs) !&builder.N
console.print_debug('server ${serverinfo.server_name} is reacheable over ping, lets now try ssh.')
// wait 20 sec to make sure ssh is there
osal.ssh_wait(address: serverinfo.server_ip, timeout: 20)!
// wait 20 seconds to make sure ssh is there (timeout is in milliseconds)
osal.ssh_wait(address: serverinfo.server_ip, timeout: 20000)!
console.print_debug('server ${serverinfo.server_name} is reacheable over ssh, lets now install hero if asked for.')
// Create a new node connection to the freshly installed Ubuntu system
// The old 'n' was connected to the rescue system which no longer exists after reboot
mut b2 := builder.new()!
mut n2 := b2.node_new(ipaddr: serverinfo.server_ip)!
if args.hero_install {
n.exec_silent('apt update && apt install -y mc redis libpq5 libpq-dev')!
n.hero_install(compile: args.hero_install_compile)!
n2.exec_silent('apt update && apt install -y mc redis libpq5 libpq-dev')!
n2.hero_install(compile: args.hero_install_compile)!
}
return n
return n2
}

View File

@@ -72,8 +72,8 @@ pub mut:
host string = 'localhost'
port int = 3000
open bool = true // whether to open the browser automatically
watch_changes bool // whether to watch for changes in docs and rebuild automatically
skip_generate bool // whether to skip generation (useful when docs are pre-generated, e.g., from atlas)
watch_changes bool = false // whether to watch for changes in docs and rebuild automatically
skip_generate bool = false // whether to skip generation (useful when docs are pre-generated, e.g., from atlas)
}
pub fn (mut s DocSite) open(args DevArgs) ! {

View File

@@ -142,10 +142,6 @@ fn (mut generator SiteGenerator) page_generate(args_ Page) ! {
pagefile.write(c)!
generator.client.copy_pages(collection_name, page_name, pagefile.path_dir()) or {
generator.error("Couldn't copy pages for page:'${page_name}' in collection:'${collection_name}'\nERROR:${err}")!
return
}
generator.client.copy_images(collection_name, page_name, pagefile.path_dir()) or {
generator.error("Couldn't copy images for page:'${page_name}' in collection:'${collection_name}'\nERROR:${err}")!
return

View File

@@ -0,0 +1,30 @@
module docusaurus
pub interface IDocClient {
mut:
// Path methods - get absolute paths to resources
get_page_path(collection_name string, page_name string) !string
get_file_path(collection_name string, file_name string) !string
get_image_path(collection_name string, image_name string) !string
// Existence checks - verify if resources exist
page_exists(collection_name string, page_name string) bool
file_exists(collection_name string, file_name string) bool
image_exists(collection_name string, image_name string) bool
// Content retrieval
get_page_content(collection_name string, page_name string) !string
// Listing methods - enumerate resources
list_collections() ![]string
list_pages(collection_name string) ![]string
list_files(collection_name string) ![]string
list_images(collection_name string) ![]string
list_pages_map() !map[string][]string
list_markdown() !string
// Image operations
// get_page_paths(collection_name string, page_name string) !(string, []string)
copy_images(collection_name string, page_name string, destination_path string) !
copy_files(collection_name string, page_name string, destination_path string) !
}

View File

@@ -0,0 +1,536 @@
# AI Instructions for Site Module HeroScript
This document provides comprehensive instructions for AI agents working with the Site module's HeroScript format.
## HeroScript Format Overview
HeroScript is a declarative configuration language with the following characteristics:
### Basic Syntax
```heroscript
!!actor.action
param1: "value1"
param2: "value2"
multiline_param: "
This is a multiline value.
It can span multiple lines.
"
arg1 arg2 // Arguments without keys
```
**Key Rules:**
1. Actions start with `!!` followed by `actor.action` format
2. Parameters are indented and use `key: "value"` or `key: value` format
3. Values with spaces must be quoted
4. Multiline values are supported with quotes
5. Arguments without keys are space-separated
6. Comments start with `//`
## Site Module Actions
### 1. Site Configuration (`!!site.config`)
**Purpose:** Define the main site configuration including title, description, and metadata.
**Required Parameters:**
- `name`: Site identifier (will be normalized to snake_case)
**Optional Parameters:**
- `title`: Site title (default: "Documentation Site")
- `description`: Site description
- `tagline`: Site tagline
- `favicon`: Path to favicon (default: "img/favicon.png")
- `image`: Default site image (default: "img/tf_graph.png")
- `copyright`: Copyright text
- `url`: Main site URL
- `base_url`: Base URL path (default: "/")
- `url_home`: Home page path
**Example:**
```heroscript
!!site.config
name: "my_documentation"
title: "My Documentation Site"
description: "Comprehensive technical documentation"
tagline: "Learn everything you need"
url: "https://docs.example.com"
base_url: "/"
```
**AI Guidelines:**
- Always include `name` parameter
- Use descriptive titles and descriptions
- Ensure URLs are properly formatted with protocol
### 2. Metadata Configuration (`!!site.config_meta`)
**Purpose:** Override specific metadata for SEO purposes.
**Optional Parameters:**
- `title`: SEO-specific title (overrides site.config title for meta tags)
- `image`: SEO-specific image (overrides site.config image for og:image)
- `description`: SEO-specific description
**Example:**
```heroscript
!!site.config_meta
title: "My Docs - Complete Guide"
image: "img/social-preview.png"
description: "The ultimate guide to using our platform"
```
**AI Guidelines:**
- Use only when SEO metadata needs to differ from main config
- Keep titles concise for social media sharing
- Use high-quality images for social previews
### 3. Navigation Bar (`!!site.navbar` or `!!site.menu`)
**Purpose:** Configure the main navigation bar.
**Optional Parameters:**
- `title`: Navigation title (defaults to site.config title)
- `logo_alt`: Logo alt text
- `logo_src`: Logo image path
- `logo_src_dark`: Dark mode logo path
**Example:**
```heroscript
!!site.navbar
title: "My Site"
logo_alt: "My Site Logo"
logo_src: "img/logo.svg"
logo_src_dark: "img/logo-dark.svg"
```
**AI Guidelines:**
- Use `!!site.navbar` for modern syntax (preferred)
- `!!site.menu` is supported for backward compatibility
- Provide both light and dark logos when possible
### 4. Navigation Items (`!!site.navbar_item` or `!!site.menu_item`)
**Purpose:** Add items to the navigation bar.
**Required Parameters (one of):**
- `to`: Internal link path
- `href`: External URL
**Optional Parameters:**
- `label`: Display text (required in practice)
- `position`: "left" or "right" (default: "right")
**Example:**
```heroscript
!!site.navbar_item
label: "Documentation"
to: "docs/intro"
position: "left"
!!site.navbar_item
label: "GitHub"
href: "https://github.com/myorg/repo"
position: "right"
```
**AI Guidelines:**
- Use `to` for internal navigation
- Use `href` for external links
- Position important items on the left, secondary items on the right
### 5. Footer Configuration (`!!site.footer`)
**Purpose:** Configure footer styling.
**Optional Parameters:**
- `style`: "dark" or "light" (default: "dark")
**Example:**
```heroscript
!!site.footer
style: "dark"
```
### 6. Footer Items (`!!site.footer_item`)
**Purpose:** Add links to the footer, grouped by title.
**Required Parameters:**
- `title`: Group title (items with same title are grouped together)
- `label`: Link text
**Required Parameters (one of):**
- `to`: Internal link path
- `href`: External URL
**Example:**
```heroscript
!!site.footer_item
title: "Docs"
label: "Introduction"
to: "intro"
!!site.footer_item
title: "Docs"
label: "API Reference"
to: "api"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
```
**AI Guidelines:**
- Group related links under the same title
- Use consistent title names across related items
- Provide both internal and external links as appropriate
### 7. Page Categories (`!!site.page_category`)
**Purpose:** Create a section/category to organize pages.
**Required Parameters:**
- `name`: Category identifier (snake_case)
**Optional Parameters:**
- `label`: Display name (auto-generated from name if not provided)
- `position`: Manual sort order (auto-incremented if not specified)
- `path`: URL path segment (defaults to normalized label)
**Example:**
```heroscript
!!site.page_category
name: "getting_started"
label: "Getting Started"
position: 100
!!site.page_category
name: "advanced_topics"
label: "Advanced Topics"
```
**AI Guidelines:**
- Use descriptive snake_case names
- Let label be auto-generated when possible (name_fix converts to Title Case)
- Categories persist for all subsequent pages until a new category is declared
- Position values should leave gaps (100, 200, 300) for future insertions
### 8. Pages (`!!site.page`)
**Purpose:** Define individual pages in the site.
**Required Parameters:**
- `src`: Source reference as `collection:page_name` (required for first page in a collection)
**Optional Parameters:**
- `name`: Page identifier (extracted from src if not provided)
- `title`: Page title (extracted from markdown if not provided)
- `description`: Page description for metadata
- `slug`: Custom URL slug
- `position`: Manual sort order (auto-incremented if not specified)
- `draft`: Mark as draft (default: false)
- `hide_title`: Hide title in rendering (default: false)
- `path`: Custom path (defaults to current category name)
- `category`: Override current category
- `title_nr`: Title numbering level
**Example:**
```heroscript
!!site.page src: "docs:introduction"
description: "Introduction to the platform"
slug: "/"
!!site.page src: "quickstart"
description: "Get started in 5 minutes"
!!site.page src: "installation"
title: "Installation Guide"
description: "How to install and configure"
position: 10
```
**AI Guidelines:**
- **Collection Persistence:** Specify collection once (e.g., `docs:introduction`), then subsequent pages only need page name (e.g., `quickstart`)
- **Category Persistence:** Pages belong to the most recently declared category
- **Title Extraction:** Prefer extracting titles from markdown files
- **Position Management:** Use automatic positioning unless specific order is required
- **Description Required:** Always provide descriptions for SEO
- **Slug Usage:** Use slug for special pages like homepage (`slug: "/"`)
### 9. Import External Content (`!!site.import`)
**Purpose:** Import content from external sources.
**Optional Parameters:**
- `name`: Import identifier
- `url`: Git URL or HTTP URL
- `path`: Local file system path
- `dest`: Destination path in site
- `replace`: Comma-separated key:value pairs for variable replacement
- `visible`: Whether imported content is visible (default: true)
**Example:**
```heroscript
!!site.import
url: "https://github.com/example/docs"
dest: "external"
replace: "VERSION:1.0.0,PROJECT:MyProject"
visible: true
```
**AI Guidelines:**
- Use for shared documentation across multiple sites
- Replace variables using `${VARIABLE}` syntax in source content
- Set `visible: false` for imported templates or partials
### 10. Publish Destinations (`!!site.publish` and `!!site.publish_dev`)
**Purpose:** Define where to publish the built site.
**Optional Parameters:**
- `path`: File system path or URL
- `ssh_name`: SSH connection name for remote deployment
**Example:**
```heroscript
!!site.publish
path: "/var/www/html/docs"
ssh_name: "production_server"
!!site.publish_dev
path: "/tmp/docs-preview"
```
**AI Guidelines:**
- Use `!!site.publish` for production deployments
- Use `!!site.publish_dev` for development/preview deployments
- Can specify multiple destinations
## File Organization Best Practices
### Naming Convention
Use numeric prefixes to control execution order:
```
0_config.heroscript # Site configuration
1_navigation.heroscript # Menu and footer
2_intro.heroscript # Introduction pages
3_guides.heroscript # User guides
4_reference.heroscript # API reference
```
**AI Guidelines:**
- Always use numeric prefixes (0_, 1_, 2_, etc.)
- Leave gaps in numbering (0, 10, 20) for future insertions
- Group related configurations in the same file
- Process order matters: config → navigation → pages
### Execution Order Rules
1. **Configuration First:** `!!site.config` must be processed before other actions
2. **Categories Before Pages:** Declare `!!site.page_category` before pages in that category
3. **Collection Persistence:** First page in a collection must specify `collection:page_name`
4. **Category Persistence:** Pages inherit the most recent category declaration
## Common Patterns
### Pattern 1: Simple Documentation Site
```heroscript
!!site.config
name: "simple_docs"
title: "Simple Documentation"
!!site.navbar
title: "Simple Docs"
!!site.page src: "docs:index"
description: "Welcome page"
slug: "/"
!!site.page src: "getting-started"
description: "Getting started guide"
!!site.page src: "api"
description: "API reference"
```
### Pattern 2: Multi-Section Documentation
```heroscript
!!site.config
name: "multi_section_docs"
title: "Complete Documentation"
!!site.page_category
name: "introduction"
label: "Introduction"
!!site.page src: "docs:welcome"
description: "Welcome to our documentation"
!!site.page src: "overview"
description: "Platform overview"
!!site.page_category
name: "tutorials"
label: "Tutorials"
!!site.page src: "tutorial_basics"
description: "Basic tutorial"
!!site.page src: "tutorial_advanced"
description: "Advanced tutorial"
```
### Pattern 3: Complex Site with External Links
```heroscript
!!site.config
name: "complex_site"
title: "Complex Documentation Site"
url: "https://docs.example.com"
!!site.navbar
title: "My Platform"
logo_src: "img/logo.svg"
!!site.navbar_item
label: "Docs"
to: "docs/intro"
position: "left"
!!site.navbar_item
label: "API"
to: "api"
position: "left"
!!site.navbar_item
label: "GitHub"
href: "https://github.com/example/repo"
position: "right"
!!site.footer
style: "dark"
!!site.footer_item
title: "Documentation"
label: "Getting Started"
to: "docs/intro"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
!!site.page_category
name: "getting_started"
!!site.page src: "docs:introduction"
description: "Introduction to the platform"
slug: "/"
!!site.page src: "installation"
description: "Installation guide"
```
## Error Prevention
### Common Mistakes to Avoid
1. **Missing Collection on First Page:**
```heroscript
# WRONG - no collection specified
!!site.page src: "introduction"
# CORRECT
!!site.page src: "docs:introduction"
```
2. **Category Without Name:**
```heroscript
# WRONG - missing name
!!site.page_category
label: "Getting Started"
# CORRECT
!!site.page_category
name: "getting_started"
label: "Getting Started"
```
3. **Missing Description:**
```heroscript
# WRONG - no description
!!site.page src: "docs:intro"
# CORRECT
!!site.page src: "docs:intro"
description: "Introduction to the platform"
```
4. **Incorrect File Ordering:**
```
# WRONG - pages before config
pages.heroscript
config.heroscript
# CORRECT - config first
0_config.heroscript
1_pages.heroscript
```
## Validation Checklist
When generating HeroScript for the Site module, verify:
- [ ] `!!site.config` includes `name` parameter
- [ ] All pages have `description` parameter
- [ ] First page in each collection specifies `collection:page_name`
- [ ] Categories are declared before their pages
- [ ] Files use numeric prefixes for ordering
- [ ] Navigation items have either `to` or `href`
- [ ] Footer items are grouped by `title`
- [ ] External URLs include protocol (https://)
- [ ] Paths don't have trailing slashes unless intentional
- [ ] Draft pages are marked with `draft: true`
## Integration with V Code
When working with the Site module in V code:
```v
import incubaid.herolib.web.site
import incubaid.herolib.core.playbook
// Process HeroScript files
mut plbook := playbook.new(path: '/path/to/heroscripts')!
site.play(mut plbook)!
// Access configured site
mut mysite := site.get(name: 'my_site')!
// Iterate through pages
for page in mysite.pages {
println('Page: ${page.name} - ${page.description}')
}
// Iterate through sections
for section in mysite.sections {
println('Section: ${section.label}')
}
```
## Summary
The Site module's HeroScript format provides a declarative way to configure websites with:
- Clear separation of concerns (config, navigation, content)
- Automatic ordering and organization
- Collection and category persistence for reduced repetition
- Flexible metadata and SEO configuration
- Support for both internal and external content
Always follow the execution order rules, use numeric file prefixes, and provide complete metadata for best results.

View File

@@ -3,7 +3,7 @@ module site
import incubaid.herolib.core.texttools
__global (
mywebsites map[string]&Site
websites map[string]&Site
)
@[params]
@@ -15,13 +15,7 @@ pub mut:
pub fn new(args FactoryArgs) !&Site {
name := texttools.name_fix(args.name)
// Check if a site with this name already exists
if name in mywebsites {
// Return the existing site instead of creating a new one
return get(name: name)!
}
mywebsites[name] = &Site{
websites[name] = &Site{
siteconfig: SiteConfig{
name: name
}
@@ -31,17 +25,18 @@ pub fn new(args FactoryArgs) !&Site {
pub fn get(args FactoryArgs) !&Site {
name := texttools.name_fix(args.name)
mut sc := mywebsites[name] or { return error('siteconfig with name "${name}" does not exist') }
mut sc := websites[name] or { return error('siteconfig with name "${name}" does not exist') }
return sc
}
pub fn exists(args FactoryArgs) bool {
name := texttools.name_fix(args.name)
return name in mywebsites
mut sc := websites[name] or { return false }
return true
}
pub fn default() !&Site {
if mywebsites.len == 0 {
if websites.len == 0 {
return new(name: 'default')!
}
return get()!
@@ -49,5 +44,5 @@ pub fn default() !&Site {
// list returns all site names that have been created
pub fn list() []string {
return mywebsites.keys()
return websites.keys()
}

View File

@@ -1,143 +0,0 @@
module site
import json
// Top-level config
pub struct NavConfig {
pub mut:
my_sidebar []NavItem
// myTopbar []NavItem //not used yet
// myFooter []NavItem //not used yet
}
// -------- Variant Type --------
pub type NavItem = NavDoc | NavCat | NavLink
// --------- DOC ITEM ----------
pub struct NavDoc {
pub:
id string // is the page id
label string
}
// --------- CATEGORY ----------
pub struct NavCat {
pub mut:
label string
collapsible bool
collapsed bool
items []NavItem
}
// --------- LINK ----------
pub struct NavLink {
pub:
label string
href string
description string
}
// -------- JSON SERIALIZATION --------
// NavItemJson is used for JSON export with type discrimination
pub struct NavItemJson {
pub mut:
type_field string @[json: 'type']
// For doc
id string @[omitempty]
label string @[omitempty]
// For link
href string @[omitempty]
description string @[omitempty]
// For category
collapsible bool
collapsed bool
items []NavItemJson @[omitempty]
}
// Convert a single NavItem to JSON-serializable format
fn nav_item_to_json(item NavItem) !NavItemJson {
return match item {
NavDoc {
NavItemJson{
type_field: 'doc'
id: item.id
label: item.label
collapsible: false
collapsed: false
}
}
NavLink {
NavItemJson{
type_field: 'link'
label: item.label
href: item.href
description: item.description
collapsible: false
collapsed: false
}
}
NavCat {
mut json_items := []NavItemJson{}
for sub_item in item.items {
json_items << nav_item_to_json(sub_item)!
}
NavItemJson{
type_field: 'category'
label: item.label
collapsible: item.collapsible
collapsed: item.collapsed
items: json_items
}
}
}
}
// Convert entire NavConfig sidebar to JSON string
fn (nc NavConfig) sidebar_to_json() !string {
mut result := []NavItemJson{}
for item in nc.my_sidebar {
result << nav_item_to_json(item)!
}
return json.encode_pretty(result)
}
// // Convert entire NavConfig topbar to JSON-serializable array
// fn (nc NavConfig) topbar_to_json() ![]NavItemJson {
// mut result := []NavItemJson{}
// for item in nc.myTopbar {
// result << nav_item_to_json(item)!
// }
// return result
// }
// // Convert entire NavConfig footer to JSON-serializable array
// fn (nc NavConfig) footer_to_json() ![]NavItemJson {
// mut result := []NavItemJson{}
// for item in nc.myFooter {
// result << nav_item_to_json(item)!
// }
// return result
// }
// port topbar as formatted JSON string
// pub fn (nc NavConfig) jsondump_topbar() !string {
// items := nc.topbar_to_json()!
// return json.encode_pretty(items)
// }
// // Export footer as formatted JSON string
// pub fn (nc NavConfig) jsondump_footer() !string {
// items := nc.footer_to_json()!
// return json.encode_pretty(items)
// }
// // Export all navigation as object with sidebar, topbar, footer
// pub fn (nc NavConfig) jsondump_all() !string {
// all_nav := map[string][]NavItemJson{
// 'sidebar': nc.sidebar_to_json()!
// 'topbar': nc.topbar_to_json()!
// 'footer': nc.footer_to_json()!
// }
// return json.encode_pretty(all_nav)
// }

View File

@@ -1,12 +1,16 @@
module site
// Page represents a single documentation page
pub struct Page {
pub mut:
id string // Unique identifier: "collection:page_name"
title string // Display title (optional, extracted from markdown if empty)
description string // Brief description for metadata
draft bool // Mark as draft (hidden from navigation)
hide_title bool // Hide the title when rendering
src string // Source reference (same as id in this format)
name string
title string
description string
draft bool
position int
hide_title bool
src string @[required] // always in format collection:page_name, can use the default collection if no : specified
path string @[required] // is without the page name, so just the path to the folder where the page is in
section_name string
title_nr int
slug string
}

View File

@@ -1,9 +0,0 @@
module site
@[heap]
pub struct Site {
pub mut:
pages map[string]Page // key: "collection:page_name"
nav NavConfig // Navigation sidebar configuration
siteconfig SiteConfig // Full site configuration
}

View File

@@ -0,0 +1,18 @@
module site
@[heap]
pub struct Site {
pub mut:
pages []Page
sections []Section
siteconfig SiteConfig
}
pub struct Section {
pub mut:
name string
position int
path string
label string
description string
}

View File

@@ -4,93 +4,222 @@ import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// Main entry point for processing site HeroScript
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'site.') {
return
}
console.print_header('Processing Site Configuration')
// ============================================================
// STEP 1: Initialize core site configuration
// ============================================================
console.print_item('Step 1: Loading site configuration')
mut config_action := plbook.ensure_once(filter: 'site.config')!
mut p := config_action.params
name := p.get_default('name', 'default')!
mut p := config_action.params
name := p.get_default('name', 'default')! // Use 'default' as fallback name
// configure the website
mut website := new(name: name)!
mut config := &website.siteconfig
// Load core configuration
config.name = texttools.name_fix(name)
config.title = p.get_default('title', 'Documentation Site')!
config.description = p.get_default('description', 'Comprehensive documentation built with Docusaurus.')!
config.tagline = p.get_default('tagline', 'Your awesome documentation')!
config.favicon = p.get_default('favicon', 'img/favicon.png')!
config.image = p.get_default('image', 'img/tf_graph.png')!
config.copyright = p.get_default('copyright', '© ${time.now().year} Example Organization')!
config.copyright = p.get_default('copyright', '© ' + time.now().year.str() +
' Example Organization')!
config.url = p.get_default('url', '')!
config.base_url = p.get_default('base_url', '/')!
config.url_home = p.get_default('url_home', '')!
config_action.done = true
// ============================================================
// STEP 2: Apply optional metadata overrides
// ============================================================
console.print_item('Step 2: Applying metadata overrides')
if plbook.exists_once(filter: 'site.config_meta') {
mut meta_action := plbook.get(filter: 'site.config_meta')!
// Process !!site.config_meta for specific metadata overrides
mut meta_action := plbook.ensure_once(filter: 'site.config_meta')!
mut p_meta := meta_action.params
// If 'title' is present in site.config_meta, it overrides. Otherwise, meta_title remains empty or uses site.config.title logic in docusaurus model.
config.meta_title = p_meta.get_default('title', config.title)!
// If 'image' is present in site.config_meta, it overrides. Otherwise, meta_image remains empty or uses site.config.image logic.
config.meta_image = p_meta.get_default('image', config.image)!
// If 'description' is present in site.config_meta, it overrides the main description
if p_meta.exists('description') {
config.description = p_meta.get('description')!
}
config_action.done = true // Mark the action as done
meta_action.done = true
}
// ============================================================
// STEP 3: Configure content imports
// ============================================================
console.print_item('Step 3: Configuring content imports')
play_imports(mut plbook, mut config)!
// ============================================================
// STEP 4: Configure navigation menu
// ============================================================
console.print_item('Step 4: Configuring navigation menu')
play_navbar(mut plbook, mut config)!
// ============================================================
// STEP 5: Configure footer
// ============================================================
console.print_item('Step 5: Configuring footer')
play_import(mut plbook, mut config)!
play_menu(mut plbook, mut config)!
play_footer(mut plbook, mut config)!
// ============================================================
// STEP 6: Configure announcement bar (optional)
// ============================================================
console.print_item('Step 6: Configuring announcement bar (if present)')
play_announcement(mut plbook, mut config)!
// ============================================================
// STEP 7: Configure publish destinations
// ============================================================
console.print_item('Step 7: Configuring publish destinations')
play_publishing(mut plbook, mut config)!
// ============================================================
// STEP 8: Build pages and navigation structure
// ============================================================
console.print_item('Step 8: Processing pages and building navigation')
play_publish(mut plbook, mut config)!
play_publish_dev(mut plbook, mut config)!
play_pages(mut plbook, mut website)!
console.print_green('Site configuration complete')
}
fn play_import(mut plbook PlayBook, mut config SiteConfig) ! {
mut import_actions := plbook.find(filter: 'site.import')!
// println('import_actions: ${import_actions}')
for mut action in import_actions {
mut p := action.params
mut replace_map := map[string]string{}
if replace_str := p.get_default('replace', '') {
parts := replace_str.split(',')
for part in parts {
kv := part.split(':')
if kv.len == 2 {
replace_map[kv[0].trim_space()] = kv[1].trim_space()
}
}
}
mut importpath := p.get_default('path', '')!
if importpath != '' {
if !importpath.starts_with('/') {
importpath = os.abs_path('${plbook.path}/${importpath}')
}
}
mut import_ := ImportItem{
name: p.get_default('name', '')!
url: p.get_default('url', '')!
path: importpath
dest: p.get_default('dest', '')!
replace: replace_map
visible: p.get_default_false('visible')
}
config.imports << import_
action.done = true // Mark the action as done
}
}
fn play_menu(mut plbook PlayBook, mut config SiteConfig) ! {
mut navbar_actions := plbook.find(filter: 'site.navbar')!
if navbar_actions.len > 0 {
for mut action in navbar_actions { // Should ideally be one, but loop for safety
mut p := action.params
config.menu.title = p.get_default('title', config.title)! // Use existing config.title as ultimate fallback
config.menu.logo_alt = p.get_default('logo_alt', '')!
config.menu.logo_src = p.get_default('logo_src', '')!
config.menu.logo_src_dark = p.get_default('logo_src_dark', '')!
action.done = true // Mark the action as done
}
} else {
// Fallback to site.menu for title if site.navbar is not found
mut menu_actions := plbook.find(filter: 'site.menu')!
for mut action in menu_actions {
mut p := action.params
config.menu.title = p.get_default('title', config.title)!
config.menu.logo_alt = p.get_default('logo_alt', '')!
config.menu.logo_src = p.get_default('logo_src', '')!
config.menu.logo_src_dark = p.get_default('logo_src_dark', '')!
action.done = true // Mark the action as done
}
}
mut menu_item_actions := plbook.find(filter: 'site.navbar_item')!
if menu_item_actions.len == 0 {
// Fallback to site.menu_item if site.navbar_item is not found
menu_item_actions = plbook.find(filter: 'site.menu_item')!
}
// Clear existing menu items to prevent duplication
config.menu.items = []MenuItem{}
for mut action in menu_item_actions {
mut p := action.params
mut item := MenuItem{
label: p.get_default('label', 'Documentation')!
href: p.get_default('href', '')!
to: p.get_default('to', '')!
position: p.get_default('position', 'right')!
}
config.menu.items << item
action.done = true // Mark the action as done
}
}
fn play_footer(mut plbook PlayBook, mut config SiteConfig) ! {
mut footer_actions := plbook.find(filter: 'site.footer')!
for mut action in footer_actions {
mut p := action.params
config.footer.style = p.get_default('style', 'dark')!
action.done = true // Mark the action as done
}
mut footer_item_actions := plbook.find(filter: 'site.footer_item')!
mut links_map := map[string][]FooterItem{}
// Clear existing footer links to prevent duplication
config.footer.links = []FooterLink{}
for mut action in footer_item_actions {
mut p := action.params
title := p.get_default('title', 'Docs')!
mut item := FooterItem{
label: p.get_default('label', 'Introduction')!
href: p.get_default('href', '')!
to: p.get_default('to', '')!
}
if title !in links_map {
links_map[title] = []FooterItem{}
}
links_map[title] << item
action.done = true // Mark the action as done
}
// Convert map to footer links array
for title, items in links_map {
config.footer.links << FooterLink{
title: title
items: items
}
}
}
fn play_announcement(mut plbook PlayBook, mut config SiteConfig) ! {
mut announcement_actions := plbook.find(filter: 'site.announcement')!
if announcement_actions.len > 0 {
// Only process the first announcement action
mut action := announcement_actions[0]
mut p := action.params
config.announcement = AnnouncementBar{
id: p.get_default('id', 'announcement')!
content: p.get_default('content', '')!
background_color: p.get_default('background_color', '#20232a')!
text_color: p.get_default('text_color', '#fff')!
is_closeable: p.get_default_true('is_closeable')
}
action.done = true // Mark the action as done
}
}
fn play_publish(mut plbook PlayBook, mut config SiteConfig) ! {
mut build_dest_actions := plbook.find(filter: 'site.publish')!
for mut action in build_dest_actions {
mut p := action.params
mut dest := BuildDest{
path: p.get_default('path', '')! // can be url
ssh_name: p.get_default('ssh_name', '')!
}
config.build_dest << dest
action.done = true // Mark the action as done
}
}
fn play_publish_dev(mut plbook PlayBook, mut config SiteConfig) ! {
mut build_dest_actions := plbook.find(filter: 'site.publish_dev')!
for mut action in build_dest_actions {
mut p := action.params
mut dest := BuildDest{
path: p.get_default('path', '')! // can be url
ssh_name: p.get_default('ssh_name', '')!
}
config.build_dest_dev << dest
action.done = true // Mark the action as done
}
}

View File

@@ -1,34 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// ANNOUNCEMENT: Process announcement bar (optional)
// ============================================================
fn play_announcement(mut plbook PlayBook, mut config SiteConfig) ! {
mut announcement_actions := plbook.find(filter: 'site.announcement')!
if announcement_actions.len > 0 {
// Only process the first announcement action
mut action := announcement_actions[0]
mut p := action.params
content := p.get('content') or {
return error('!!site.announcement: must specify "content"')
}
config.announcement = AnnouncementBar{
id: p.get_default('id', 'announcement')!
content: content
background_color: p.get_default('background_color', '#20232a')!
text_color: p.get_default('text_color', '#fff')!
is_closeable: p.get_default_true('is_closeable')
}
action.done = true
}
}

View File

@@ -1,62 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// FOOTER: Process footer configuration
// ============================================================
fn play_footer(mut plbook PlayBook, mut config SiteConfig) ! {
// Process footer style (optional)
mut footer_actions := plbook.find(filter: 'site.footer')!
for mut action in footer_actions {
mut p := action.params
config.footer.style = p.get_default('style', 'dark')!
action.done = true
}
// Process footer items (multiple)
mut footer_item_actions := plbook.find(filter: 'site.footer_item')!
mut links_map := map[string][]FooterItem{}
// Clear existing links to prevent duplication
config.footer.links = []FooterLink{}
for mut action in footer_item_actions {
mut p := action.params
title := p.get_default('title', 'Docs')!
label := p.get('label') or {
return error('!!site.footer_item: must specify "label"')
}
mut item := FooterItem{
label: label
href: p.get_default('href', '')!
to: p.get_default('to', '')!
}
// Validate that href or to is specified
if item.href.len == 0 && item.to.len == 0 {
return error('!!site.footer_item for "${label}": must specify either "href" or "to"')
}
if title !in links_map {
links_map[title] = []FooterItem{}
}
links_map[title] << item
action.done = true
}
// Convert map to footer links array
for title, items in links_map {
config.footer.links << FooterLink{
title: title
items: items
}
}
}

View File

@@ -1,51 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// IMPORTS: Process content imports
// ============================================================
fn play_imports(mut plbook PlayBook, mut config SiteConfig) ! {
mut import_actions := plbook.find(filter: 'site.import')!
for mut action in import_actions {
mut p := action.params
// Parse replacement patterns (comma-separated key:value pairs)
mut replace_map := map[string]string{}
if replace_str := p.get_default('replace', '') {
parts := replace_str.split(',')
for part in parts {
kv := part.split(':')
if kv.len == 2 {
replace_map[kv[0].trim_space()] = kv[1].trim_space()
}
}
}
// Get path (can be relative to playbook path)
mut import_path := p.get_default('path', '')!
if import_path != '' {
if !import_path.starts_with('/') {
import_path = os.abs_path('${plbook.path}/${import_path}')
}
}
// Create import item
mut import_item := ImportItem{
name: p.get_default('name', '')!
url: p.get_default('url', '')!
path: import_path
dest: p.get_default('dest', '')!
replace: replace_map
visible: p.get_default_false('visible')
}
config.imports << import_item
action.done = true
}
}

View File

@@ -1,60 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// NAVBAR: Process navigation menu
// ============================================================
fn play_navbar(mut plbook PlayBook, mut config SiteConfig) ! {
// Try 'site.navbar' first, then fallback to deprecated 'site.menu'
mut navbar_actions := plbook.find(filter: 'site.navbar')!
if navbar_actions.len == 0 {
navbar_actions = plbook.find(filter: 'site.menu')!
}
// Configure navbar metadata
if navbar_actions.len > 0 {
for mut action in navbar_actions {
mut p := action.params
config.menu.title = p.get_default('title', config.title)!
config.menu.logo_alt = p.get_default('logo_alt', '')!
config.menu.logo_src = p.get_default('logo_src', '')!
config.menu.logo_src_dark = p.get_default('logo_src_dark', '')!
action.done = true
}
}
// Process navbar items
mut navbar_item_actions := plbook.find(filter: 'site.navbar_item')!
if navbar_item_actions.len == 0 {
navbar_item_actions = plbook.find(filter: 'site.menu_item')!
}
// Clear existing items to prevent duplication
config.menu.items = []MenuItem{}
for mut action in navbar_item_actions {
mut p := action.params
label := p.get('label') or { return error('!!site.navbar_item: must specify "label"') }
mut item := MenuItem{
label: label
href: p.get_default('href', '')!
to: p.get_default('to', '')!
position: p.get_default('position', 'right')!
}
// Validate that at least href or to is specified
if item.href.len == 0 && item.to.len == 0 {
return error('!!site.navbar_item: must specify either "href" or "to" for label "${label}"')
}
config.menu.items << item
action.done = true
}
}

135
lib/web/site/play_page.v Normal file
View File

@@ -0,0 +1,135 @@
module site
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
// plays the sections & pages
fn play_pages(mut plbook PlayBook, mut site Site) ! {
// mut siteconfig := &site.siteconfig
// if only 1 doctree is specified, then we use that as the default doctree name
// mut doctreename := 'main' // Not used for now, keep commented for future doctree integration
// if plbook.exists(filter: 'site.doctree') {
// if plbook.exists_once(filter: 'site.doctree') {
// mut action := plbook.get(filter: 'site.doctree')!
// mut p := action.params
// doctreename = p.get('name') or { return error('need to specify name in site.doctree') }
// } else {
// return error("can't have more than one site.doctree")
// }
// }
mut section_current := Section{} // is the category
mut position_section := 1
mut position_category := 100 // Start categories at position 100
mut collection_current := '' // current collection we are working on
mut all_actions := plbook.find(filter: 'site.')!
for mut action in all_actions {
if action.done {
continue
}
mut p := action.params
if action.name == 'page_category' {
mut section := Section{}
section.name = p.get('name') or {
return error('need to specify name in site.page_category. Action: ${action}')
}
position_section = 1 // go back to default position for pages in the category
section.position = p.get_int_default('position', position_category)!
if section.position == position_category {
position_category += 100 // Increment for next category
}
section.label = p.get_default('label', texttools.name_fix_snake_to_pascal(section.name))!
section.path = p.get_default('path', texttools.name_fix(section.label))!
section.description = p.get_default('description', '')!
site.sections << section
action.done = true // Mark the action as done
section_current = section
continue // next action
}
if action.name == 'page' {
mut pagesrc := p.get_default('src', '')!
mut pagename := p.get_default('name', '')!
mut pagecollection := ''
if pagesrc.contains(':') {
pagecollection = pagesrc.split(':')[0]
pagename = pagesrc.split(':')[1]
} else {
if collection_current.len > 0 {
pagecollection = collection_current
pagename = pagesrc // ADD THIS LINE - use pagesrc as the page name
} else {
return error('need to specify collection in page.src path as collection:page_name or make sure someone before you did. Got src="${pagesrc}" with no collection set. Action: ${action}')
}
}
pagecollection = texttools.name_fix(pagecollection)
collection_current = pagecollection
pagename = texttools.name_fix_keepext(pagename)
if pagename.ends_with('.md') {
pagename = pagename.replace('.md', '')
}
if pagename == '' {
return error('need to specify name in page.src or specify in path as collection:page_name. Action: ${action}')
}
if pagecollection == '' {
return error('need to specify collection in page.src or specify in path as collection:page_name. Action: ${action}')
}
// recreate the pagepath
pagesrc = '${pagecollection}:${pagename}'
// get sectionname from category, page_category or section, if not specified use current section
section_name := p.get_default('category', p.get_default('page_category', p.get_default('section',
section_current.name)!)!)!
mut pagepath := p.get_default('path', section_current.path)!
pagepath = pagepath.trim_space().trim('/')
// Only apply name_fix if it's a simple name (no path separators)
// For paths like 'appendix/internet_today', preserve the structure
if !pagepath.contains('/') {
pagepath = texttools.name_fix(pagepath)
}
// Ensure pagepath ends with / to indicate it's a directory path
if pagepath.len > 0 && !pagepath.ends_with('/') {
pagepath += '/'
}
mut mypage := Page{
section_name: section_name
name: pagename
path: pagepath
src: pagesrc
}
mypage.position = p.get_int_default('position', 0)!
if mypage.position == 0 {
mypage.position = section_current.position + position_section
position_section += 1
}
mypage.title = p.get_default('title', '')!
mypage.description = p.get_default('description', '')!
mypage.slug = p.get_default('slug', '')!
mypage.draft = p.get_default_false('draft')
mypage.hide_title = p.get_default_false('hide_title')
mypage.title_nr = p.get_int_default('title_nr', 0)!
site.pages << mypage
action.done = true // Mark the action as done
}
// println(action)
// println(section_current)
// println(site.pages.last())
// $dbg;
}
}

View File

@@ -1,203 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// Helper function: normalize name while preserving .md extension handling
// ============================================================
fn normalize_page_name(name string) string {
mut result := name
// Remove .md extension if present for processing
if result.ends_with('.md') {
result = result[0..result.len - 3]
}
// Apply name fixing
return texttools.name_fix(result)
}
// ============================================================
// Internal structure for tracking category information
// ============================================================
struct CategoryInfo {
pub mut:
name string
label string
position int
nav_items []NavItem
}
// ============================================================
// PAGES: Process pages and build navigation structure
// ============================================================
fn play_pages(mut plbook PlayBook, mut website Site) ! {
mut collection_current := '' // Track current collection for reuse
mut categories := map[string]CategoryInfo{} // Map of category name -> info
mut category_current := '' // Track current active category
mut root_nav_items := []NavItem{} // Root-level items (pages without category)
mut next_category_position := 100 // Auto-increment position for categories
// ============================================================
// PASS 1: Process all page and category actions
// ============================================================
mut all_actions := plbook.find(filter: 'site.')!
for mut action in all_actions {
if action.done {
continue
}
// ========== PAGE CATEGORY ==========
if action.name == 'page_category' {
mut p := action.params
category_name := p.get('name') or {
return error('!!site.page_category: must specify "name"')
}
category_name_fixed := texttools.name_fix(category_name)
// Get label (derive from name if not specified)
mut label := p.get_default('label', texttools.name_fix_snake_to_pascal(category_name_fixed))!
mut position := p.get_int_default('position', next_category_position)!
// Auto-increment position if using default
if position == next_category_position {
next_category_position += 100
}
// Create and store category info
categories[category_name_fixed] = CategoryInfo{
name: category_name_fixed
label: label
position: position
nav_items: []NavItem{}
}
category_current = category_name_fixed
console.print_item('Created page category: "${label}" (${category_name_fixed})')
action.done = true
continue
}
// ========== PAGE ==========
if action.name == 'page' {
mut p := action.params
mut page_src := p.get_default('src', '')!
mut page_collection := ''
mut page_name := ''
// Parse collection:page format from src
if page_src.contains(':') {
parts := page_src.split(':')
page_collection = texttools.name_fix(parts[0])
page_name = normalize_page_name(parts[1])
} else {
// Use previously specified collection if available
if collection_current.len > 0 {
page_collection = collection_current
page_name = normalize_page_name(page_src)
} else {
return error('!!site.page: must specify source as "collection:page_name" in "src".\nGot src="${page_src}" with no collection previously set.\nEither specify "collection:page_name" or define a collection first.')
}
}
// Validation
if page_name.len == 0 {
return error('!!site.page: could not extract valid page name from src="${page_src}"')
}
if page_collection.len == 0 {
return error('!!site.page: could not determine collection')
}
// Store collection for subsequent pages
collection_current = page_collection
// Build page ID
page_id := '${page_collection}:${page_name}'
// Get optional page metadata
page_title := p.get_default('title', '')!
page_description := p.get_default('description', '')!
page_draft := p.get_default_false('draft')
page_hide_title := p.get_default_false('hide_title')
// Create page
mut page := Page{
id: page_id
title: page_title
description: page_description
draft: page_draft
hide_title: page_hide_title
src: page_id
}
website.pages[page_id] = page
// Create navigation item
nav_doc := NavDoc{
id: page_id
label: if page_title.len > 0 { page_title } else { page_name }
}
// Add to appropriate category or root
if category_current.len > 0 {
if category_current in categories {
mut cat_info := categories[category_current]
cat_info.nav_items << nav_doc
categories[category_current] = cat_info
console.print_debug('Added page "${page_id}" to category "${category_current}"')
}
} else {
root_nav_items << nav_doc
console.print_debug('Added root page "${page_id}"')
}
action.done = true
continue
}
}
// ============================================================
// PASS 2: Build final navigation structure from categories
// ============================================================
console.print_item('Building navigation structure...')
mut final_nav_items := []NavItem{}
// Add root items first
for item in root_nav_items {
final_nav_items << item
}
// Sort categories by position and add them
mut sorted_categories := []CategoryInfo{}
for _, cat_info in categories {
sorted_categories << cat_info
}
// Sort by position
sorted_categories.sort(a.position < b.position)
// Convert categories to NavCat items and add to navigation
for cat_info in sorted_categories {
// Unwrap NavDoc items from cat_info.nav_items (they're already NavItem)
nav_cat := NavCat{
label: cat_info.label
collapsible: true
collapsed: false
items: cat_info.nav_items
}
final_nav_items << nav_cat
console.print_debug('Added category to nav: "${cat_info.label}" with ${cat_info.nav_items.len} items')
}
// Update website navigation
website.nav.my_sidebar = final_nav_items
console.print_green('Navigation structure built with ${website.pages.len} pages in ${categories.len} categories')
}

View File

@@ -1,46 +0,0 @@
module site
import os
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// PUBLISHING: Configure build and publish destinations
// ============================================================
fn play_publishing(mut plbook PlayBook, mut config SiteConfig) ! {
// Production publish destinations
mut build_dest_actions := plbook.find(filter: 'site.publish')!
for mut action in build_dest_actions {
mut p := action.params
path := p.get('path') or {
return error('!!site.publish: must specify "path"')
}
mut dest := BuildDest{
path: path
ssh_name: p.get_default('ssh_name', '')!
}
config.build_dest << dest
action.done = true
}
// Development publish destinations
mut build_dest_dev_actions := plbook.find(filter: 'site.publish_dev')!
for mut action in build_dest_dev_actions {
mut p := action.params
path := p.get('path') or {
return error('!!site.publish_dev: must specify "path"')
}
mut dest := BuildDest{
path: path
ssh_name: p.get_default('ssh_name', '')!
}
config.build_dest_dev << dest
action.done = true
}
}

View File

@@ -2,83 +2,43 @@
The Site module provides a structured way to define website configurations, navigation menus, pages, and sections using HeroScript. It's designed to work with static site generators like Docusaurus.
## Purpose
The Site module allows you to:
- Define website structure and configuration in a declarative way using HeroScript
- Organize pages into sections/categories
- Configure navigation menus and footers
- Manage page metadata (title, description, slug, etc.)
- Support multiple content collections
- Define build and publish destinations
## Quick Start
### Minimal HeroScript Example
```heroscript
!!site.config
name: "my_docs"
title: "My Documentation"
!!site.page src: "docs:introduction"
title: "Getting Started"
!!site.page src: "setup"
title: "Installation"
```
### Processing with V Code
```v
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playbook
import incubaid.herolib.develop.gittools
import incubaid.herolib.web.site
import incubaid.herolib.ui.console
import incubaid.herolib.core.playcmds
// Process HeroScript file
mut plbook := playbook.new(path: './site_config.heroscript')!
// Clone or use existing repository with HeroScript files
mysitepath := gittools.path(
git_url: 'https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/ebooks/tech'
git_pull: true
)!
// Execute site configuration
site.play(mut plbook)!
// Process all HeroScript files in the path
playcmds.run(heroscript_path: mysitepath.path)!
// Access the configured site
mut mysite := site.get(name: 'my_docs')!
// Print available pages
pages_map := mysite.list_pages()
for page_id, _ in pages_map {
console.print_item('Page: ${page_id}')
}
println('Site has ${mysite.pages.len} pages')
// Get the configured site
mut mysite := site.get(name: 'tfgrid_tech')!
println(mysite)
```
---
## Core Concepts
### Site
A website configuration that contains pages, navigation structure, and metadata.
### Page
A single page with:
- **ID**: `collection:page_name` format
- **Title**: Display name (optional - extracted from markdown if not provided)
- **Description**: SEO metadata
- **Draft**: Hidden from navigation if true
### Category (Section)
Groups related pages together in the navigation sidebar. Automatically collapsed/expandable.
### Collection
A logical group of pages. Pages reuse the collection once specified.
```heroscript
!!site.page src: "tech:intro" # Specifies collection "tech"
!!site.page src: "benefits" # Reuses collection "tech"
!!site.page src: "components" # Still uses collection "tech"
!!site.page src: "api:reference" # Switches to collection "api"
!!site.page src: "endpoints" # Uses collection "api"
```
---
## HeroScript Syntax
### 1. Site Configuration (Required)
### Basic Configuration
```heroscript
!!site.config
@@ -91,49 +51,20 @@ A logical group of pages. Pages reuse the collection once specified.
copyright: "© 2024 My Organization"
url: "https://docs.example.com"
base_url: "/"
url_home: "/docs"
```
**Parameters:**
- `name` - Internal site identifier (default: 'default')
- `title` - Main site title (shown in browser tab)
- `description` - Site description for SEO
- `tagline` - Short tagline/subtitle
- `favicon` - Path to favicon image
- `image` - Default OG image for social sharing
- `copyright` - Copyright notice
- `url` - Full site URL for Docusaurus
- `base_url` - Base URL path (e.g., "/" or "/docs/")
- `url_home` - Home page path
### 2. Metadata Overrides (Optional)
```heroscript
!!site.config_meta
title: "My Docs - Technical Reference"
image: "img/tech-og.png"
description: "Technical documentation and API reference"
```
Overrides specific metadata for SEO without changing core config.
### 3. Navigation Bar
### Navigation Menu
```heroscript
!!site.navbar
title: "My Documentation"
title: "My Site"
logo_alt: "Site Logo"
logo_src: "img/logo.svg"
logo_src_dark: "img/logo-dark.svg"
!!site.navbar_item
label: "Documentation"
to: "intro"
position: "left"
!!site.navbar_item
label: "API Reference"
to: "docs/api"
to: "docs/intro"
position: "left"
!!site.navbar_item
@@ -142,13 +73,7 @@ Overrides specific metadata for SEO without changing core config.
position: "right"
```
**Parameters:**
- `label` - Display text (required)
- `to` - Internal link
- `href` - External URL
- `position` - "left" or "right" in navbar
### 4. Footer Configuration
### Footer Configuration
```heroscript
!!site.footer
@@ -162,234 +87,242 @@ Overrides specific metadata for SEO without changing core config.
!!site.footer_item
title: "Docs"
label: "Getting Started"
to: "getting-started"
href: "https://docs.example.com/getting-started"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
!!site.footer_item
title: "Legal"
label: "Privacy"
href: "https://example.com/privacy"
```
### 5. Announcement Bar (Optional)
## Page Organization
### Example 1: Simple Pages Without Categories
When you don't need categories, pages are added sequentially. The collection only needs to be specified once, then it's reused for subsequent pages.
```heroscript
!!site.announcement
id: "new-release"
content: "🎉 Version 2.0 is now available!"
background_color: "#20232a"
text_color: "#fff"
is_closeable: true
!!site.page src: "mycelium_tech:introduction"
description: "Introduction to ThreeFold Technology"
slug: "/"
!!site.page src: "vision"
description: "Our Vision for the Future Internet"
!!site.page src: "what"
description: "What ThreeFold is Building"
!!site.page src: "presentation"
description: "ThreeFold Technology Presentation"
!!site.page src: "status"
description: "Current Development Status"
```
### 6. Pages and Categories
**Key Points:**
#### Simple: Pages Without Categories
- First page specifies collection as `tech:introduction` (collection:page_name format)
- Subsequent pages only need the page name (e.g., `vision`) - the `tech` collection is reused
- If `title` is not specified, it will be extracted from the markdown file itself
- Pages are ordered by their appearance in the HeroScript file
- `slug` can be used to customize the URL path (e.g., `"/"` for homepage)
```heroscript
!!site.page src: "guides:introduction"
title: "Getting Started"
description: "Introduction to the platform"
### Example 2: Pages with Categories
!!site.page src: "installation"
title: "Installation"
!!site.page src: "configuration"
title: "Configuration"
```
#### Advanced: Pages With Categories
Categories (sections) help organize pages into logical groups with their own navigation structure.
```heroscript
!!site.page_category
name: "basics"
label: "Getting Started"
name: "first_principle_thinking"
label: "First Principle Thinking"
!!site.page src: "guides:introduction"
title: "Introduction"
description: "Learn the basics"
!!site.page src: "first_principle_thinking:hardware_badly_used"
description: "Hardware is not used properly, why it is important to understand hardware"
!!site.page src: "installation"
title: "Installation"
!!site.page src: "internet_risk"
description: "Internet risk, how to mitigate it, and why it is important"
!!site.page src: "configuration"
title: "Configuration"
!!site.page_category
name: "advanced"
label: "Advanced Topics"
!!site.page src: "advanced:performance"
title: "Performance Tuning"
!!site.page src: "scaling"
title: "Scaling Guide"
!!site.page src: "onion_analogy"
description: "Compare onion with a computer, layers of abstraction"
```
**Page Parameters:**
- `src` - Source as `collection:page` (first page) or just `page_name` (reuse collection)
- `title` - Page title (optional, extracted from markdown if not provided)
- `description` - Page description
- `draft` - Hide from navigation (default: false)
- `hide_title` - Don't show title in page (default: false)
**Key Points:**
### 7. Content Imports
- `!!site.page_category` creates a new section/category
- `name` is the internal identifier (snake_case)
- `label` is the display name (automatically derived from `name` if not specified)
- Category name is converted to title case: `first_principle_thinking` → "First Principle Thinking"
- Once a category is defined, all subsequent pages belong to it until a new category is declared
- Collection persistence works the same: specify once (e.g., `first_principle_thinking:hardware_badly_used`), then reuse
### Example 3: Advanced Page Configuration
```heroscript
!!site.page_category
name: "components"
label: "System Components"
position: 100
!!site.page src: "mycelium_tech:mycelium"
title: "Mycelium Network"
description: "Peer-to-peer overlay network"
slug: "mycelium-network"
position: 1
draft: false
hide_title: false
!!site.page src: "fungistor"
title: "Fungistor Storage"
description: "Distributed storage system"
position: 2
```
**Available Page Parameters:**
- `src`: Source reference as `collection:page_name` (required for first page in collection)
- `title`: Page title (optional, extracted from markdown if not provided)
- `description`: Page description for metadata
- `slug`: Custom URL slug
- `position`: Manual ordering (auto-incremented if not specified)
- `draft`: Mark page as draft (default: false)
- `hide_title`: Hide the page title in rendering (default: false)
- `path`: Custom path for the page (defaults to category name)
- `category`: Override the current category for this page
## File Organization
HeroScript files should be organized with numeric prefixes to control execution order:
```
docs/
├── 0_config.heroscript # Site configuration
├── 1_menu.heroscript # Navigation and footer
├── 2_intro_pages.heroscript # Introduction pages
├── 3_tech_pages.heroscript # Technical documentation
└── 4_api_pages.heroscript # API reference
```
**Important:** Files are processed in alphabetical order, so use numeric prefixes (0_, 1_, 2_, etc.) to ensure correct execution sequence.
## Import External Content
```heroscript
!!site.import
url: "https://github.com/example/external-docs"
path: "/local/path/to/repo"
dest: "external"
replace: "PROJECT_NAME:My Project,VERSION:1.0.0"
visible: true
```
### 8. Publishing Destinations
## Publish Destinations
```heroscript
!!site.publish
path: "/var/www/html/docs"
ssh_name: "production"
ssh_name: "production_server"
!!site.publish_dev
path: "/tmp/docs-preview"
```
---
## Factory Methods
## Common Patterns
### Create or Get a Site
### Pattern 1: Multi-Section Technical Documentation
```v
import incubaid.herolib.web.site
```heroscript
!!site.config
name: "tech_docs"
title: "Technical Documentation"
// Create a new site
mut mysite := site.new(name: 'my_docs')!
!!site.page_category
name: "getting_started"
label: "Getting Started"
// Get an existing site
mut mysite := site.get(name: 'my_docs')!
!!site.page src: "docs:intro"
title: "Introduction"
// Get default site
mut mysite := site.default()!
!!site.page src: "installation"
title: "Installation"
// Check if site exists
if site.exists(name: 'my_docs') {
println('Site exists')
}
!!site.page_category
name: "concepts"
label: "Core Concepts"
!!site.page src: "concepts:architecture"
title: "Architecture"
!!site.page src: "components"
title: "Components"
!!site.page_category
name: "api"
label: "API Reference"
!!site.page src: "api:rest"
title: "REST API"
!!site.page src: "graphql"
title: "GraphQL"
// List all sites
sites := site.list()
println(sites)
```
### Pattern 2: Simple Blog/Knowledge Base
### Using with PlayBook
```heroscript
!!site.config
name: "blog"
title: "Knowledge Base"
```v
import incubaid.herolib.core.playbook
import incubaid.herolib.web.site
!!site.page src: "articles:first_post"
title: "Welcome to Our Blog"
// Create playbook from path
mut plbook := playbook.new(path: '/path/to/heroscripts')!
!!site.page src: "second_post"
title: "Understanding the Basics"
// Process site configuration
site.play(mut plbook)!
!!site.page src: "third_post"
title: "Advanced Techniques"
// Access the configured site
mut mysite := site.get(name: 'my_site')!
```
### Pattern 3: Project with External Imports
## Data Structures
```heroscript
!!site.config
name: "project_docs"
title: "Project Documentation"
### Site
!!site.import
url: "https://github.com/org/shared-docs"
dest: "shared"
visible: true
!!site.page_category
name: "product"
label: "Product Guide"
!!site.page src: "docs:overview"
title: "Overview"
!!site.page src: "features"
title: "Features"
!!site.page_category
name: "resources"
label: "Shared Resources"
!!site.page src: "shared:common"
title: "Common Patterns"
```v
pub struct Site {
pub mut:
pages []Page
sections []Section
siteconfig SiteConfig
}
```
---
### Page
## File Organization
Organize HeroScript files with numeric prefixes to control execution order:
```
docs/
├── 0_config.heroscript
│ └── !!site.config and !!site.config_meta
├── 1_menu.heroscript
│ └── !!site.navbar and !!site.footer
├── 2_pages.heroscript
│ └── !!site.page_category and !!site.page actions
└── 3_publish.heroscript
└── !!site.publish destinations
```v
pub struct Page {
pub mut:
name string // Page identifier
title string // Display title
description string // Page description
draft bool // Draft status
position int // Sort order
hide_title bool // Hide title in rendering
src string // Source as collection:page_name
path string // URL path (without page name)
section_name string // Category/section name
title_nr int // Title numbering level
slug string // Custom URL slug
}
```
**Why numeric prefixes?**
### Section
Files are processed in alphabetical order. Numeric prefixes ensure:
- Site config runs first
- Navigation menu configures before pages
- Pages build the final structure
- Publishing configured last
```v
pub struct Section {
pub mut:
name string // Internal identifier
position int // Sort order
path string // URL path
label string // Display name
}
```
---
## Best Practices
## Processing Order
1. **File Naming**: Use numeric prefixes (0_, 1_, 2_) to control execution order
2. **Collection Reuse**: Specify collection once, then reuse for subsequent pages
3. **Category Organization**: Group related pages under categories for better navigation
4. **Title Extraction**: Let titles be extracted from markdown files when possible
5. **Position Management**: Use automatic positioning unless you need specific ordering
6. **Description**: Always provide descriptions for better SEO and navigation
7. **Draft Status**: Use `draft: true` for work-in-progress pages
The Site module processes HeroScript in this strict order:
## Complete Example
1. Site Configuration
2. Metadata Overrides
3. Imports
4. Navigation
5. Footer
6. Announcement
7. Publishing
8. Pages & Categories
See `examples/web/site/site_example.vsh` for a complete working example.
Each stage depends on previous stages completing successfully.
For a real-world example, check: <https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/ebooks/tech>

View File

@@ -1,445 +0,0 @@
module site
import incubaid.herolib.core.playbook
import incubaid.herolib.web.site
import incubaid.herolib.ui.console
import os
// Big comprehensive HeroScript for testing
const test_heroscript = '
!!site.config
name: "test_docs"
title: "Test Documentation Site"
description: "A comprehensive test documentation site"
tagline: "Testing everything"
favicon: "img/favicon.png"
image: "img/test-og.png"
copyright: "© 2024 Test Organization"
url: "https://test.example.com"
base_url: "/"
url_home: "/docs"
!!site.config_meta
title: "Test Docs - Advanced"
image: "img/test-og-alternative.png"
description: "Advanced test documentation"
!!site.navbar
title: "Test Documentation"
logo_alt: "Test Logo"
logo_src: "img/logo.svg"
logo_src_dark: "img/logo-dark.svg"
!!site.navbar_item
label: "Getting Started"
to: "intro"
position: "left"
!!site.navbar_item
label: "API Reference"
to: "api"
position: "left"
!!site.navbar_item
label: "GitHub"
href: "https://github.com/example/test"
position: "right"
!!site.navbar_item
label: "Blog"
href: "https://blog.example.com"
position: "right"
!!site.footer
style: "dark"
!!site.footer_item
title: "Documentation"
label: "Introduction"
to: "intro"
!!site.footer_item
title: "Documentation"
label: "Getting Started"
to: "getting-started"
!!site.footer_item
title: "Documentation"
label: "Advanced Topics"
to: "advanced"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
!!site.footer_item
title: "Community"
label: "Twitter"
href: "https://twitter.com/example"
!!site.footer_item
title: "Legal"
label: "Privacy Policy"
href: "https://example.com/privacy"
!!site.footer_item
title: "Legal"
label: "Terms of Service"
href: "https://example.com/terms"
!!site.announcement
id: "v2-release"
content: "🎉 Version 2.0 is now available! Check out the new features."
background_color: "#1a472a"
text_color: "#fff"
is_closeable: true
!!site.page_category
name: "getting_started"
label: "Getting Started"
position: 10
!!site.page src: "guides:introduction"
title: "Introduction to Test Docs"
description: "Learn what this project is about"
!!site.page src: "installation"
title: "Installation Guide"
description: "How to install and setup"
!!site.page src: "quick_start"
title: "Quick Start"
description: "5 minute quick start guide"
!!site.page_category
name: "concepts"
label: "Core Concepts"
position: 20
!!site.page src: "concepts:architecture"
title: "Architecture Overview"
description: "Understanding the system architecture"
!!site.page src: "components"
title: "Key Components"
description: "Learn about the main components"
!!site.page src: "workflow"
title: "Typical Workflow"
description: "How to use the system"
!!site.page_category
name: "api"
label: "API Reference"
position: 30
!!site.page src: "api:rest"
title: "REST API"
description: "Complete REST API reference"
!!site.page src: "graphql"
title: "GraphQL API"
description: "GraphQL API documentation"
!!site.page src: "webhooks"
title: "Webhooks"
description: "Webhook configuration and examples"
!!site.page_category
name: "advanced"
label: "Advanced Topics"
position: 40
!!site.page src: "advanced:performance"
title: "Performance Optimization"
description: "Tips for optimal performance"
!!site.page src: "scaling"
title: "Scaling Guide"
description: "How to scale the system"
!!site.page src: "security"
title: "Security Best Practices"
description: "Security considerations and best practices"
!!site.page src: "troubleshooting"
title: "Troubleshooting"
description: "Common issues and solutions"
draft: false
!!site.publish
path: "/var/www/html/docs"
ssh_name: "production-server"
!!site.publish_dev
path: "/tmp/docs-dev"
'
fn test_site1() ! {
console.print_header('Site Module Comprehensive Test')
console.lf()
// ========================================================
// TEST 1: Create playbook from heroscript
// ========================================================
console.print_item('TEST 1: Creating playbook from HeroScript')
mut plbook := playbook.new(text: test_heroscript)!
console.print_green(' Playbook created successfully')
console.lf()
// ========================================================
// TEST 2: Process site configuration
// ========================================================
console.print_item('TEST 2: Processing site.play()')
site.play(mut plbook)!
console.print_green(' Site configuration processed successfully')
console.lf()
// ========================================================
// TEST 3: Retrieve site and validate
// ========================================================
console.print_item('TEST 3: Retrieving configured site')
mut test_site := site.get(name: 'test_docs')!
console.print_green(' Site retrieved successfully')
console.lf()
// ========================================================
// TEST 4: Validate SiteConfig
// ========================================================
console.print_header('Validating SiteConfig')
mut config := &test_site.siteconfig
help_test_string('Site Name', config.name, 'test_docs')
help_test_string('Site Title', config.title, 'Test Documentation Site')
help_test_string('Site Description', config.description, 'A comprehensive test documentation site')
help_test_string('Site Tagline', config.tagline, 'Testing everything')
help_test_string('Copyright', config.copyright, '© 2024 Test Organization')
help_test_string('Base URL', config.base_url, '/')
help_test_string('URL Home', config.url_home, '/docs')
help_test_string('Meta Title', config.meta_title, 'Test Docs - Advanced')
help_test_string('Meta Image', config.meta_image, 'img/test-og-alternative.png')
assert config.build_dest.len == 1, 'Should have 1 production build destination'
console.print_green(' Production build dest: ${config.build_dest[0].path}')
assert config.build_dest_dev.len == 1, 'Should have 1 dev build destination'
console.print_green(' Dev build dest: ${config.build_dest_dev[0].path}')
console.lf()
// ========================================================
// TEST 5: Validate Menu Configuration
// ========================================================
console.print_header('Validating Menu Configuration')
mut menu := config.menu
help_test_string('Menu Title', menu.title, 'Test Documentation')
help_test_string('Menu Logo Alt', menu.logo_alt, 'Test Logo')
help_test_string('Menu Logo Src', menu.logo_src, 'img/logo.svg')
help_test_string('Menu Logo Src Dark', menu.logo_src_dark, 'img/logo-dark.svg')
assert menu.items.len == 4, 'Should have 4 navbar items, got ${menu.items.len}'
console.print_green(' Menu has 4 navbar items')
// Validate navbar items
help_test_navbar_item(menu.items[0], 'Getting Started', 'intro', '', 'left')
help_test_navbar_item(menu.items[1], 'API Reference', 'api', '', 'left')
help_test_navbar_item(menu.items[2], 'GitHub', '', 'https://github.com/example/test',
'right')
help_test_navbar_item(menu.items[3], 'Blog', '', 'https://blog.example.com', 'right')
console.lf()
// ========================================================
// TEST 6: Validate Footer Configuration
// ========================================================
console.print_header('Validating Footer Configuration')
mut footer := config.footer
help_test_string('Footer Style', footer.style, 'dark')
assert footer.links.len == 3, 'Should have 3 footer link groups, got ${footer.links.len}'
console.print_green(' Footer has 3 link groups')
// Validate footer structure
for link_group in footer.links {
console.print_item('Footer group: "${link_group.title}" has ${link_group.items.len} items')
}
// Detailed footer validation
mut doc_links := footer.links.filter(it.title == 'Documentation')
assert doc_links.len == 1, 'Should have 1 Documentation link group'
assert doc_links[0].items.len == 3, 'Documentation should have 3 items'
console.print_green(' Documentation footer: 3 items')
mut community_links := footer.links.filter(it.title == 'Community')
assert community_links.len == 1, 'Should have 1 Community link group'
assert community_links[0].items.len == 2, 'Community should have 2 items'
console.print_green(' Community footer: 2 items')
mut legal_links := footer.links.filter(it.title == 'Legal')
assert legal_links.len == 1, 'Should have 1 Legal link group'
assert legal_links[0].items.len == 2, 'Legal should have 2 items'
console.print_green(' Legal footer: 2 items')
console.lf()
// ========================================================
// TEST 7: Validate Announcement Bar
// ========================================================
console.print_header('Validating Announcement Bar')
mut announcement := config.announcement
help_test_string('Announcement ID', announcement.id, 'v2-release')
help_test_string('Announcement Content', announcement.content, '🎉 Version 2.0 is now available! Check out the new features.')
help_test_string('Announcement BG Color', announcement.background_color, '#1a472a')
help_test_string('Announcement Text Color', announcement.text_color, '#fff')
assert announcement.is_closeable == true, 'Announcement should be closeable'
console.print_green(' Announcement bar configured correctly')
console.lf()
// ========================================================
// TEST 8: Validate Pages
// ========================================================
console.print_header('Validating Pages')
mut pages := test_site.pages.clone()
assert pages.len == 13, 'Should have 13 pages, got ${pages.len}'
console.print_green(' Total pages: ${pages.len}')
// List and validate pages
mut page_ids := pages.keys()
page_ids.sort()
for page_id in page_ids {
mut page := pages[page_id]
console.print_debug(' Page: ${page_id} - "${page.title}"')
}
// Validate specific pages
assert 'guides:introduction' in pages, 'guides:introduction page not found'
console.print_green(' Found guides:introduction')
assert 'concepts:architecture' in pages, 'concepts:architecture page not found'
console.print_green(' Found concepts:architecture')
assert 'api:rest' in pages, 'api:rest page not found'
console.print_green(' Found api:rest')
console.lf()
// ========================================================
// TEST 9: Validate Navigation Structure
// ========================================================
console.print_header('Validating Navigation Structure')
mut sidebar := unsafe { test_site.nav.my_sidebar.clone() }
console.print_item('Navigation sidebar has ${sidebar.len} items')
// Count categories
mut category_count := 0
mut doc_count := 0
for item in sidebar {
match item {
site.NavCat {
category_count++
console.print_debug(' Category: "${item.label}" with ${item.items.len} sub-items')
}
site.NavDoc {
doc_count++
console.print_debug(' Doc: "${item.label}" (${item.id})')
}
site.NavLink {
console.print_debug(' Link: "${item.label}" -> ${item.href}')
}
}
}
assert category_count == 4, 'Should have 4 categories, got ${category_count}'
console.print_green(' Navigation has 4 categories')
// Validate category structure
for item in sidebar {
match item {
site.NavCat {
console.print_item('Category: "${item.label}"')
println(' Collapsible: ${item.collapsible}, Collapsed: ${item.collapsed}')
println(' Items: ${item.items.len}')
// Validate sub-items
for sub_item in item.items {
match sub_item {
site.NavDoc {
println(' - ${sub_item.label} (${sub_item.id})')
}
else {
println(' - Unexpected item type')
}
}
}
}
else {}
}
}
console.lf()
// ========================================================
// TEST 10: Validate Site Factory
// ========================================================
console.print_header('Validating Site Factory')
mut all_sites := site.list()
console.print_item('Total sites registered: ${all_sites.len}')
for site_name in all_sites {
console.print_debug(' - ${site_name}')
}
assert all_sites.contains('test_docs'), 'test_docs should be in sites list'
console.print_green(' test_docs found in factory')
assert site.exists(name: 'test_docs'), 'test_docs should exist'
console.print_green(' test_docs verified to exist')
console.lf()
// ========================================================
// FINAL SUMMARY
// ========================================================
console.print_header('Test Summary')
console.print_green(' All tests passed successfully!')
console.print_item('Site Name: ${config.name}')
console.print_item('Pages: ${pages.len}')
console.print_item('Navigation Categories: ${category_count}')
console.print_item('Navbar Items: ${menu.items.len}')
console.print_item('Footer Groups: ${footer.links.len}')
console.print_item('Announcement: Active')
console.print_item('Build Destinations: ${config.build_dest.len} prod, ${config.build_dest_dev.len} dev')
console.lf()
console.print_green('All validations completed successfully!')
}
// ============================================================
// Helper Functions for Testing
// ============================================================
fn help_test_string(label string, actual string, expected string) {
if actual == expected {
console.print_green(' ${label}: "${actual}"')
} else {
console.print_stderr(' ${label}: expected "${expected}", got "${actual}"')
panic('Test failed: ${label}')
}
}
fn help_test_navbar_item(item MenuItem, label string, to string, href string, position string) {
assert item.label == label, 'Expected label "${label}", got "${item.label}"'
assert item.to == to, 'Expected to "${to}", got "${item.to}"'
assert item.href == href, 'Expected href "${href}", got "${item.href}"'
assert item.position == position, 'Expected position "${position}", got "${item.position}"'
console.print_green(' Navbar item: "${label}"')
}

View File

@@ -4,28 +4,18 @@ set -e
os_name="$(uname -s)"
arch_name="$(uname -m)"
version='1.0.38'
# Detect Linux distribution type
linux_type=""
if [[ "$os_name" == "Linux" ]]; then
if [ -f /etc/os-release ]; then
linux_type="$(. /etc/os-release && echo "$ID")"
fi
fi
# Base URL for GitHub releases (uses 'latest' to always get the most recent version)
base_url="https://github.com/incubaid/herolib/releases/latest/download"
# Base URL for GitHub releases
base_url="https://github.com/incubaid/herolib/releases/download/v${version}"
# Select the URL based on the platform. For Linux we have a single static binary
# Select the URL based on the platform
# Always use musl for Linux (static binary, works everywhere)
if [[ "$os_name" == "Linux" && "$arch_name" == "x86_64" ]]; then
url="$base_url/hero-x86_64-linux-musl"
elif [[ "$os_name" == "Linux" && "$arch_name" == "aarch64" ]]; then
url="$base_url/hero-aarch64-linux-musl"
elif [[ "$os_name" == "Darwin" && "$arch_name" == "arm64" ]]; then
url="$base_url/hero-aarch64-apple-darwin"
# elif [[ "$os_name" == "Darwin" && "$arch_name" == "x86_64" ]]; then
# url="$base_url/hero-x86_64-apple-darwin"
else
echo "Unsupported platform: $os_name $arch_name"
exit 1
@@ -45,7 +35,7 @@ if [ ! -z "$existing_hero" ]; then
fi
fi
if [[ "${OSNAME}" == "darwin"* ]]; then
if [[ "$os_name" == "Darwin" ]]; then
# Check if /usr/local/bin/hero exists and remove it
if [ -f /usr/local/bin/hero ]; then
rm /usr/local/bin/hero || { echo "Error: Failed to remove existing hero binary"; exit 1; }
@@ -85,12 +75,16 @@ fi
if [ -z "$url" ]; then
echo "Could not find url to download."
echo $urls
exit 1
fi
zprofile="${HOME}/.zprofile"
hero_bin_path="${HOME}/hero/bin"
# Only modify .zprofile on macOS (where we install to ~/hero/bin)
if [[ "$os_name" == "Darwin" ]]; then
zprofile="${HOME}/.zprofile"
temp_file="$(mktemp)"
trap 'rm -f "$temp_file"' EXIT
# Check if ~/.zprofile exists
if [ -f "$zprofile" ]; then
@@ -107,8 +101,7 @@ fi
echo "export PATH=\$PATH:$hero_bin_path" >> "$temp_file"
# Replace the original .zprofile with the modified version
mv "$temp_file" "$zprofile"
# Ensure the temporary file is removed (in case of script interruption before mv)
trap 'rm -f "$temp_file"' EXIT
fi
# Output the selected URL
echo "Download URL for your platform: $url"
@@ -121,19 +114,19 @@ set -e
# Check if file size is greater than 2 MB
file_size=$(du -m /tmp/downloaded_file | cut -f1)
if [ "$file_size" -ge 2 ]; then
# Create the target directory if it doesn't exist
if [[ "$os_name" == "Darwin" ]]; then
# macOS: install to ~/hero/bin
mkdir -p ~/hero/bin
if [[ "$OSTYPE" == "darwin"* ]]; then
# Move and rename the file
mv /tmp/downloaded_file ~/hero/bin/hero
chmod +x ~/hero/bin/hero
export PATH=$PATH:$hero_bin_path
else
# Linux: install to /usr/local/bin
mv /tmp/downloaded_file /usr/local/bin/hero
chmod +x /usr/local/bin/hero
fi
echo "Hero installed properly"
export PATH=$PATH:$hero_bin_path
hero -version
else
echo "Downloaded file is less than 2 MB. Process aborted."